xmit.c 71.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55
				struct ath_txq *txq, struct list_head *bf_q,
56
				struct ath_tx_status *ts, int txok);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
61
			     int txok);
62 63
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
64 65 66
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
67
					   struct sk_buff *skb);
68

69
enum {
70 71
	MCS_HT20,
	MCS_HT20_SGI,
72 73 74 75
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
76 77 78
/*********************/
/* Aggregation logic */
/*********************/
79

80
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
81
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
82 83 84 85
{
	spin_lock_bh(&txq->axq_lock);
}

86
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
87
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
88 89 90 91
{
	spin_unlock_bh(&txq->axq_lock);
}

92
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
93
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
94 95 96 97 98 99 100 101 102 103 104 105
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

106 107
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
			     struct ath_atx_tid *tid)
S
Sujith 已提交
108
{
S
Sujith 已提交
109
	struct ath_atx_ac *ac = tid->ac;
110 111 112 113 114 115
	struct list_head *list;
	struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
	struct ath_chanctx *ctx = avp->chanctx;

	if (!ctx)
		return;
S
Sujith 已提交
116

S
Sujith 已提交
117 118
	if (tid->sched)
		return;
S
Sujith 已提交
119

S
Sujith 已提交
120 121
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
122

S
Sujith 已提交
123 124
	if (ac->sched)
		return;
125

S
Sujith 已提交
126
	ac->sched = true;
127 128 129

	list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
	list_add_tail(&ac->list, list);
S
Sujith 已提交
130
}
131

132
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
133 134
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
135 136 137
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
138 139
}

140 141
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
142 143 144
	if (!tid->an->sta)
		return;

145 146 147 148
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

149 150 151 152 153 154 155
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

156 157 158
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
159
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
160 161 162
	struct ath_frame_info *fi = get_frame_info(skb);
	int hw_queue;
	int q = fi->txq;
163

164
	if (q < 0)
165 166
		return;

167
	txq = sc->tx.txq_map[q];
168 169 170
	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

171
	hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
172 173
	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
174
		ieee80211_wake_queue(sc->hw, hw_queue);
175 176 177 178
		txq->stopped = false;
	}
}

179 180 181
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
182
	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
183 184 185
	return ATH_AN_2_TID(an, tidno);
}

186 187
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
188
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
189 190 191 192
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
193 194 195 196 197 198 199
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
200 201
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

237
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
238
{
239
	struct ath_txq *txq = tid->ac->txq;
240
	struct sk_buff *skb;
S
Sujith 已提交
241 242
	struct ath_buf *bf;
	struct list_head bf_head;
243
	struct ath_tx_status ts;
244
	struct ath_frame_info *fi;
245
	bool sendbar = false;
246

247
	INIT_LIST_HEAD(&bf_head);
248

249
	memset(&ts, 0, sizeof(ts));
250

251
	while ((skb = __skb_dequeue(&tid->retry_q))) {
252 253
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
254
		if (!bf) {
255 256 257
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
258 259
		}

260
		if (fi->baw_tracked) {
261
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
262
			sendbar = true;
263
		}
264 265 266

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
267
	}
268

269
	if (sendbar) {
F
Felix Fietkau 已提交
270
		ath_txq_unlock(sc, txq);
271
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
272 273
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
274
}
275

S
Sujith 已提交
276 277
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
278
{
S
Sujith 已提交
279
	int index, cindex;
280

S
Sujith 已提交
281 282
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
283

284
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
285

286
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
287 288
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
289 290
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
291
	}
S
Sujith 已提交
292
}
293

S
Sujith 已提交
294
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
295
			     struct ath_buf *bf)
S
Sujith 已提交
296
{
297 298
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
299
	int index, cindex;
S
Sujith 已提交
300

301
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
302
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
303
	__set_bit(cindex, tid->tx_buf);
304
	fi->baw_tracked = 1;
305

S
Sujith 已提交
306 307 308 309
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
310 311 312
	}
}

S
Sujith 已提交
313 314
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
315 316

{
317
	struct sk_buff *skb;
S
Sujith 已提交
318 319
	struct ath_buf *bf;
	struct list_head bf_head;
320
	struct ath_tx_status ts;
321
	struct ath_frame_info *fi;
322 323

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
324
	INIT_LIST_HEAD(&bf_head);
325

326
	while ((skb = ath_tid_dequeue(tid))) {
327 328
		fi = get_frame_info(skb);
		bf = fi->bf;
329

330 331 332 333 334
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

335
		list_add_tail(&bf->list, &bf_head);
336
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
337
	}
338 339
}

S
Sujith 已提交
340
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
341
			     struct sk_buff *skb, int count)
342
{
343
	struct ath_frame_info *fi = get_frame_info(skb);
344
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
345
	struct ieee80211_hdr *hdr;
346
	int prev = fi->retries;
347

S
Sujith 已提交
348
	TX_STAT_INC(txq->axq_qnum, a_retries);
349 350 351
	fi->retries += count;

	if (prev > 0)
352
		return;
353

S
Sujith 已提交
354 355
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
356 357
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
358 359
}

360
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
361
{
362
	struct ath_buf *bf = NULL;
S
Sujith 已提交
363 364

	spin_lock_bh(&sc->tx.txbuflock);
365 366

	if (unlikely(list_empty(&sc->tx.txbuf))) {
367 368 369
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
370 371 372 373

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
374 375
	spin_unlock_bh(&sc->tx.txbuflock);

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
394 395 396 397
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
398
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
399
	tbf->bf_state = bf->bf_state;
400
	tbf->bf_state.stale = false;
S
Sujith 已提交
401 402 403 404

	return tbf;
}

405 406 407 408
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
409
	struct ath_frame_info *fi;
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
425
		fi = get_frame_info(bf->bf_mpdu);
426
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
427 428 429 430 431 432 433 434 435 436

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
437 438
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
439
				 struct ath_tx_status *ts, int txok)
440
{
S
Sujith 已提交
441 442
	struct ath_node *an = NULL;
	struct sk_buff *skb;
443
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
444
	struct ieee80211_hw *hw = sc->hw;
445
	struct ieee80211_hdr *hdr;
446
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
447
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
448
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
449 450
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
451
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
452
	u32 ba[WME_BA_BMP_SIZE >> 5];
453
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
454
	bool rc_update = true, isba;
455
	struct ieee80211_tx_rate rates[4];
456
	struct ath_frame_info *fi;
457
	int nframes;
458
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
459
	int i, retries;
460
	int bar_index = -1;
461

S
Sujith 已提交
462
	skb = bf->bf_mpdu;
463 464
	hdr = (struct ieee80211_hdr *)skb->data;

465 466
	tx_info = IEEE80211_SKB_CB(skb);

467
	memcpy(rates, bf->rates, sizeof(rates));
468

469 470 471 472
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

473
	rcu_read_lock();
474

475
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
476 477
	if (!sta) {
		rcu_read_unlock();
478

479 480 481 482
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

483
			if (!bf->bf_state.stale || bf_next != NULL)
484 485
				list_move_tail(&bf->list, &bf_head);

486
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
487 488 489

			bf = bf_next;
		}
490
		return;
491 492
	}

493
	an = (struct ath_node *)sta->drv_priv;
494
	tid = ath_get_skb_tid(sc, an, skb);
495
	seq_first = tid->seq_start;
496
	isba = ts->ts_flags & ATH9K_TX_BA;
497

498 499 500 501
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
502 503 504
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
505
	 */
506
	if (isba && tid->tidno != ts->tid)
507 508
		txok = false;

S
Sujith 已提交
509
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
510
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
511

S
Sujith 已提交
512
	if (isaggr && txok) {
513 514 515
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
516
		} else {
S
Sujith 已提交
517 518 519 520 521 522 523
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
524
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
525
				needreset = 1;
S
Sujith 已提交
526
		}
527 528
	}

529
	__skb_queue_head_init(&bf_pending);
530

531
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
532
	while (bf) {
533 534
		u16 seqno = bf->bf_state.seqno;

535
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
536
		bf_next = bf->bf_next;
537

538 539
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
540
		fi = get_frame_info(skb);
541

542 543
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
544 545 546 547 548 549
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
550 551
			/* transmit completion, subframe is
			 * acked by block ack */
552
			acked_cnt++;
S
Sujith 已提交
553 554
		} else if (!isaggr && txok) {
			/* transmit completion */
555
			acked_cnt++;
556 557 558 559 560 561 562 563
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
564
		} else {
565 566 567 568
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
569
		}
570

571 572 573 574
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
575
		INIT_LIST_HEAD(&bf_head);
576
		if (bf_next != NULL || !bf_last->bf_state.stale)
S
Sujith 已提交
577
			list_move_tail(&bf->list, &bf_head);
578

579
		if (!txpending) {
S
Sujith 已提交
580 581 582 583
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
584
			ath_tx_update_baw(sc, tid, seqno);
585

586
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
587
				memcpy(tx_info->control.rates, rates, sizeof(rates));
588
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
589
				rc_update = false;
590 591 592 593
				if (bf == bf->bf_lastbf)
					ath_dynack_sample_tx_ts(sc->sc_ah,
								bf->bf_mpdu,
								ts);
594 595
			}

596
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
597
				!txfail);
S
Sujith 已提交
598
		} else {
599 600 601 602
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
603
			/* retry the un-acked ones */
604
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
621
				}
622 623

				fi->bf = tbf;
S
Sujith 已提交
624 625 626 627 628 629
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
630
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
631 632 633
		}

		bf = bf_next;
634 635
	}

636
	/* prepend un-acked frames to the beginning of the pending frame queue */
637
	if (!skb_queue_empty(&bf_pending)) {
638
		if (an->sleeping)
639
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
640

641
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
642
		if (!an->sleeping) {
643
			ath_tx_queue_tid(sc, txq, tid);
644

S
Sujith Manoharan 已提交
645
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
646 647
				tid->ac->clear_ps_filter = true;
		}
648 649
	}

F
Felix Fietkau 已提交
650 651 652 653 654 655 656 657 658 659 660
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

661 662
	rcu_read_unlock();

663 664
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
665
}
666

667 668 669 670 671 672 673 674 675 676
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
677
	struct ieee80211_tx_info *info;
678 679 680 681 682 683 684 685 686 687 688
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
689 690 691 692
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
693
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
694
			ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
695
		}
696 697 698 699
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

700
	if (!flush)
701 702 703
		ath_txq_schedule(sc, txq);
}

704 705 706 707 708 709 710 711 712 713 714
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

715 716 717 718
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

719 720 721 722 723 724 725
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
726 727
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
728
{
S
Sujith 已提交
729 730
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
731
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
732
	u32 max_4ms_framelen, frmlen;
733
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
734
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
735
	int i;
S
Sujith 已提交
736

S
Sujith 已提交
737
	skb = bf->bf_mpdu;
S
Sujith 已提交
738
	tx_info = IEEE80211_SKB_CB(skb);
739
	rates = bf->rates;
S
Sujith 已提交
740

S
Sujith 已提交
741 742
	/*
	 * Find the lowest frame length among the rate series that will have a
743
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
744 745
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
746

S
Sujith 已提交
747
	for (i = 0; i < 4; i++) {
748
		int modeidx;
S
Sujith 已提交
749

750 751
		if (!rates[i].count)
			continue;
752

753 754 755
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
756
		}
757 758 759 760 761 762 763 764 765

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

766
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
767
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
768
	}
S
Sujith 已提交
769

770
	/*
S
Sujith 已提交
771 772 773
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
774
	 */
S
Sujith 已提交
775 776
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
777

778 779 780 781 782 783 784 785
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
786

787 788
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
789

S
Sujith 已提交
790 791
	return aggr_limit;
}
792

S
Sujith 已提交
793
/*
S
Sujith 已提交
794
 * Returns the number of delimiters to be added to
S
Sujith 已提交
795 796 797
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
798 799
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
800
{
801
#define FIRST_DESC_NDELIMS 60
802
	u32 nsymbits, nsymbols;
S
Sujith 已提交
803
	u16 minlen;
804
	u8 flags, rix;
805
	int width, streams, half_gi, ndelim, mindelim;
806
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
807 808 809

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
810 811

	/*
S
Sujith 已提交
812 813 814 815
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
816
	 */
817 818
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
819
		ndelim += ATH_AGGR_ENCRYPTDELIM;
820

821 822 823 824
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
825 826
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
827 828
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
829 830 831 832 833
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
834
	 *
S
Sujith 已提交
835 836 837
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
838 839

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
840
		return ndelim;
841

842 843
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
844 845
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
846

S
Sujith 已提交
847
	if (half_gi)
848
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
849
	else
850
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
851

S
Sujith 已提交
852 853
	if (nsymbols == 0)
		nsymbols = 1;
854

855 856
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
857
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
858

S
Sujith 已提交
859 860 861
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
862 863
	}

S
Sujith 已提交
864
	return ndelim;
865 866
}

867 868
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
869
			struct ath_atx_tid *tid, struct sk_buff_head **q)
870
{
871
	struct ieee80211_tx_info *tx_info;
872
	struct ath_frame_info *fi;
873
	struct sk_buff *skb;
874
	struct ath_buf *bf;
875
	u16 seqno;
876

877
	while (1) {
878 879 880 881
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

882
		skb = skb_peek(*q);
883 884 885
		if (!skb)
			break;

886 887
		fi = get_frame_info(skb);
		bf = fi->bf;
888
		if (!fi->bf)
F
Felix Fietkau 已提交
889
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
890 891
		else
			bf->bf_state.stale = false;
892

F
Felix Fietkau 已提交
893
		if (!bf) {
894
			__skb_unlink(skb, *q);
895
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
896
			ieee80211_free_txskb(sc->hw, skb);
897
			continue;
F
Felix Fietkau 已提交
898
		}
899

900 901 902 903 904
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
905 906 907 908 909 910 911 912 913

		/*
		 * No aggregation session is running, but there may be frames
		 * from a previous session or a failed attempt in the queue.
		 * Send them out as normal data frames
		 */
		if (!tid->active)
			tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

914 915 916 917 918
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

919
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
920
		seqno = bf->bf_state.seqno;
921

S
Sujith 已提交
922
		/* do not step over block-ack window */
923
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
924
			break;
925

926 927 928 929 930 931
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
932
			__skb_unlink(skb, *q);
933 934 935 936 937
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

938 939 940 941 942 943
		return bf;
	}

	return NULL;
}

944 945 946 947 948
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
949 950
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
951
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
952
	int nframes = 0, ndelim;
953
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
954
	    al_delta, h_baw = tid->baw_size / 2;
955 956 957
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
958
	bool closed = false;
959

960 961
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
962

963
	do {
964 965 966
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
967
		/* do not exceed aggregation limit */
968
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
969 970
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
971
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
972
				break;
973

F
Felix Fietkau 已提交
974
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
975 976
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
977
				break;
S
Sujith 已提交
978
		}
979

S
Sujith 已提交
980
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
981
		al += bpad + al_delta;
982

S
Sujith 已提交
983 984 985 986
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
987 988
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
989
		bpad = PADBYTES(al_delta) + (ndelim << 2);
990

991
		nframes++;
S
Sujith 已提交
992
		bf->bf_next = NULL;
993

S
Sujith 已提交
994
		/* link buffers of this frame to the aggregate */
995 996
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
997
		bf->bf_state.ndelim = ndelim;
998

999
		__skb_unlink(skb, tid_q);
1000
		list_add_tail(&bf->list, bf_q);
1001
		if (bf_prev)
S
Sujith 已提交
1002
			bf_prev->bf_next = bf;
1003

S
Sujith 已提交
1004
		bf_prev = bf;
S
Sujith 已提交
1005

1006 1007 1008 1009 1010
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
1011
	} while (ath_tid_has_buffered(tid));
1012

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

1023
	*aggr_len = al;
S
Sujith 已提交
1024

1025
	return closed;
S
Sujith 已提交
1026 1027
#undef PADBYTES
}
1028

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1058 1059 1060 1061 1062 1063
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

1064
	usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1096
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1097
			     struct ath_tx_info *info, int len, bool rts)
1098 1099
{
	struct ath_hw *ah = sc->sc_ah;
1100
	struct ath_common *common = ath9k_hw_common(ah);
1101 1102 1103 1104 1105
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1106
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1107
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1108 1109
	int i;
	u8 rix = 0;
1110 1111 1112

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1113
	rates = bf->rates;
1114
	hdr = (struct ieee80211_hdr *)skb->data;
1115 1116 1117

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1118
	info->rtscts_rate = fi->rtscts_rate;
1119

1120
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1121 1122 1123 1124 1125 1126 1127
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1128
		info->rates[i].Tries = rates[i].count;
1129

S
Sujith Manoharan 已提交
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1141 1142
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1143
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1144 1145
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1146 1147 1148
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1149
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1150
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1151
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1152 1153 1154 1155 1156 1157 1158

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1159 1160 1161 1162
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1163 1164
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1165
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1166 1167 1168 1169
			continue;
		}

		/* legacy rates */
1170
		rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1171 1172 1173 1174 1175 1176
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1177
		info->rates[i].Rate = rate->hw_value;
1178 1179
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1180
				info->rates[i].Rate |= rate->hw_value_short;
1181 1182 1183 1184 1185
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1186
			info->rates[i].ChSel = ah->txchainmask;
1187
		else
1188 1189
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1190

1191
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1192 1193 1194 1195 1196
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1197
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1198 1199

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1200 1201 1202
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1203

1204 1205 1206 1207 1208 1209 1210 1211
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1212

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1225 1226
}

1227 1228
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1229 1230
{
	struct ath_hw *ah = sc->sc_ah;
1231
	struct ath_buf *bf_first = NULL;
1232
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1233 1234
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1235

1236 1237 1238 1239 1240 1241
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1242
	while (bf) {
1243
		struct sk_buff *skb = bf->bf_mpdu;
1244
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1245
		struct ath_frame_info *fi = get_frame_info(skb);
1246
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1247 1248

		info.type = get_hw_packet_type(skb);
1249
		if (bf->bf_next)
1250
			info.link = bf->bf_next->bf_daddr;
1251
		else
L
Luis R. Rodriguez 已提交
1252
			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1253

1254 1255 1256
		if (!bf_first) {
			bf_first = bf;

L
Luis R. Rodriguez 已提交
1257 1258
			if (!sc->tx99_state)
				info.flags = ATH9K_TXDESC_INTREQ;
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
1287 1288 1289 1290

			if (!aggr)
				len = fi->framelen;

S
Sujith Manoharan 已提交
1291
			ath_buf_set_rate(sc, bf, &info, len, rts);
1292 1293
		}

1294 1295
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1296 1297 1298 1299 1300
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1301
			if (bf == bf_first)
1302
				info.aggr = AGGR_BUF_FIRST;
1303
			else if (bf == bf_first->bf_lastbf)
1304 1305 1306
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1307

1308 1309
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1310 1311
		}

1312 1313 1314
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1315
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1316 1317 1318 1319
		bf = bf->bf_next;
	}
}

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

1355 1356
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, bool *stop)
S
Sujith 已提交
1357
{
S
Sujith 已提交
1358
	struct ath_buf *bf;
1359
	struct ieee80211_tx_info *tx_info;
1360
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1361
	struct list_head bf_q;
1362 1363
	int aggr_len = 0;
	bool aggr, last = true;
1364

1365 1366
	if (!ath_tid_has_buffered(tid))
		return false;
1367

1368
	INIT_LIST_HEAD(&bf_q);
S
Sujith 已提交
1369

1370 1371 1372
	bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
	if (!bf)
		return false;
1373

1374 1375 1376 1377 1378 1379 1380
	tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
	if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
		*stop = true;
		return false;
	}
1381

1382 1383 1384 1385 1386 1387
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
	if (aggr)
		last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
					tid_q, &aggr_len);
	else
		ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1388

1389 1390
	if (list_empty(&bf_q))
		return false;
1391

1392
	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1393 1394 1395
		tid->ac->clear_ps_filter = false;
		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
	}
1396

1397 1398 1399
	ath_tx_fill_desc(sc, bf, txq, aggr_len);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	return true;
S
Sujith 已提交
1400 1401
}

1402 1403
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1404 1405
{
	struct ath_atx_tid *txtid;
1406
	struct ath_txq *txq;
S
Sujith 已提交
1407
	struct ath_node *an;
1408
	u8 density;
S
Sujith 已提交
1409 1410

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1411
	txtid = ATH_AN_2_TID(an, tid);
1412 1413 1414
	txq = txtid->ac->txq;

	ath_txq_lock(sc, txq);
1415

1416 1417 1418 1419
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1420
	if (sta->ht_cap.ht_supported) {
1421 1422
		an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				      sta->ht_cap.ampdu_factor)) - 1;
1423 1424 1425 1426
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1427 1428 1429
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1430
	txtid->active = true;
1431
	*ssn = txtid->seq_start = txtid->seq_next;
1432
	txtid->bar_index = -1;
1433

1434 1435 1436
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1437 1438
	ath_txq_unlock_complete(sc, txq);

1439
	return 0;
S
Sujith 已提交
1440
}
1441

1442
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1443 1444 1445
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1446
	struct ath_txq *txq = txtid->ac->txq;
1447

F
Felix Fietkau 已提交
1448
	ath_txq_lock(sc, txq);
1449 1450
	txtid->active = false;
	ath_tx_flush_tid(sc, txtid);
1451
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1452
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1453
}
1454

1455 1456
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1457 1458 1459 1460
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1461
	bool buffered;
1462 1463 1464
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1465
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1466 1467 1468 1469

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1470
		ath_txq_lock(sc, txq);
1471

1472 1473 1474 1475 1476
		if (!tid->sched) {
			ath_txq_unlock(sc, txq);
			continue;
		}

1477
		buffered = ath_tid_has_buffered(tid);
1478 1479 1480 1481 1482 1483 1484 1485 1486

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1487
		ath_txq_unlock(sc, txq);
1488

1489 1490
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1501
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1502 1503 1504 1505

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1506
		ath_txq_lock(sc, txq);
1507 1508
		ac->clear_ps_filter = true;

F
Felix Fietkau 已提交
1509
		if (ath_tid_has_buffered(tid)) {
1510
			ath_tx_queue_tid(sc, txq, tid);
1511 1512 1513
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1514
		ath_txq_unlock_complete(sc, txq);
1515 1516 1517
	}
}

1518 1519
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1520
{
1521
	struct ath_atx_tid *tid;
S
Sujith 已提交
1522
	struct ath_node *an;
1523
	struct ath_txq *txq;
S
Sujith 已提交
1524 1525

	an = (struct ath_node *)sta->drv_priv;
1526 1527
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1528

1529 1530 1531 1532
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;

1533
	if (ath_tid_has_buffered(tid)) {
1534
		ath_tx_queue_tid(sc, txq, tid);
1535 1536 1537 1538
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1539 1540
}

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1553
	struct sk_buff_head *tid_q;
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);

		ath_txq_lock(sc, tid->ac->txq);
1567 1568
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1569 1570 1571
			if (!bf)
				break;

1572
			__skb_unlink(bf->bf_mpdu, tid_q);
1573 1574
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1575 1576 1577 1578
			if (bf_isampdu(bf)) {
				ath_tx_addto_baw(sc, tid, bf);
				bf->bf_state.bf_type &= ~BUF_AGGR;
			}
1579 1580 1581 1582 1583 1584 1585 1586
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1587
			if (an->sta && !ath_tid_has_buffered(tid))
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1606 1607 1608
/********************/
/* Queue Management */
/********************/
1609

S
Sujith 已提交
1610
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1611
{
1612
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1613
	struct ath9k_tx_queue_info qi;
1614
	static const int subtype_txq_to_hwq[] = {
1615 1616 1617 1618
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1619
	};
1620
	int axq_qnum, i;
1621

S
Sujith 已提交
1622
	memset(&qi, 0, sizeof(qi));
1623
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1624 1625 1626 1627
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1628 1629

	/*
S
Sujith 已提交
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1643
	 */
1644
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1645
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1646 1647 1648 1649 1650 1651 1652
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1653 1654
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1655
		/*
S
Sujith 已提交
1656 1657
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1658
		 */
S
Sujith 已提交
1659
		return NULL;
1660
	}
1661 1662
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1663

1664 1665
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1666
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1667
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1668 1669 1670
		INIT_LIST_HEAD(&txq->axq_q);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1671
		txq->axq_ampdu_depth = 0;
1672
		txq->axq_tx_inprogress = false;
1673
		sc->tx.txqsetup |= 1<<axq_qnum;
1674 1675 1676 1677

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1678
	}
1679
	return &sc->tx.txq[axq_qnum];
1680 1681
}

S
Sujith 已提交
1682 1683 1684
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1685
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1686 1687 1688
	int error = 0;
	struct ath9k_tx_queue_info qi;

1689
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1690 1691 1692 1693 1694 1695 1696 1697 1698

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1699 1700
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1712
	struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
S
Sujith 已提交
1713
	int qnum = sc->beacon.cabq->axq_qnum;
1714

S
Sujith 已提交
1715
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1716

1717
	qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
1718
			    ATH_CABQ_READY_TIME) / 100;
S
Sujith 已提交
1719 1720 1721
	ath_txq_update(sc, qnum, &qi);

	return 0;
1722 1723
}

1724
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1725
			       struct list_head *list)
1726
{
S
Sujith 已提交
1727 1728
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1729 1730 1731
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1732
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1733
	INIT_LIST_HEAD(&bf_head);
1734

1735 1736
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1737

1738
		if (bf->bf_state.stale) {
1739
			list_del(&bf->list);
1740

1741 1742
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1743
		}
1744

S
Sujith 已提交
1745
		lastbf = bf->bf_lastbf;
1746
		list_cut_position(&bf_head, list, &lastbf->list);
1747
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1748
	}
1749
}
1750

1751 1752 1753 1754 1755 1756
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1757
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1758
{
F
Felix Fietkau 已提交
1759 1760
	ath_txq_lock(sc, txq);

1761
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1762
		int idx = txq->txq_tailidx;
1763

1764
		while (!list_empty(&txq->txq_fifo[idx])) {
1765
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1766 1767

			INCR(idx, ATH_TXFIFO_DEPTH);
1768
		}
1769
		txq->txq_tailidx = idx;
1770
	}
1771

1772 1773
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1774
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1775

F
Felix Fietkau 已提交
1776
	ath_txq_unlock_complete(sc, txq);
1777 1778
}

1779
bool ath_drain_all_txq(struct ath_softc *sc)
1780
{
1781
	struct ath_hw *ah = sc->sc_ah;
1782
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1783
	struct ath_txq *txq;
1784 1785
	int i;
	u32 npend = 0;
S
Sujith 已提交
1786

1787
	if (test_bit(ATH_OP_INVALID, &common->op_flags))
1788
		return true;
S
Sujith 已提交
1789

1790
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1791

1792
	/* Check if any queue remains active */
S
Sujith 已提交
1793
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1794 1795 1796
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

F
Felix Fietkau 已提交
1797 1798 1799
		if (!sc->tx.txq[i].axq_depth)
			continue;

1800 1801
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1802 1803
	}

1804
	if (npend)
1805
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1806 1807

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1818
		ath_draintxq(sc, txq);
S
Sujith 已提交
1819
	}
1820 1821

	return !npend;
S
Sujith 已提交
1822
}
1823

S
Sujith 已提交
1824
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1825
{
S
Sujith 已提交
1826 1827
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1828
}
1829

1830
/* For each acq entry, for each tid, try to schedule packets
1831 1832
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1833 1834
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1835
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1836
	struct ath_atx_ac *ac, *last_ac;
1837
	struct ath_atx_tid *tid, *last_tid;
1838
	struct list_head *ac_list;
1839
	bool sent = false;
1840

1841 1842 1843
	if (txq->mac80211_qnum < 0)
		return;

1844
	spin_lock_bh(&sc->chan_lock);
1845
	ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
1846
	spin_unlock_bh(&sc->chan_lock);
1847

1848
	if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
1849
	    list_empty(ac_list))
S
Sujith 已提交
1850
		return;
1851

1852
	spin_lock_bh(&sc->chan_lock);
1853 1854
	rcu_read_lock();

1855 1856
	last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
	while (!list_empty(ac_list)) {
1857
		bool stop = false;
1858

1859 1860 1861
		if (sc->cur_chan->stopped)
			break;

1862
		ac = list_first_entry(ac_list, struct ath_atx_ac, list);
1863 1864 1865
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1866

1867
		while (!list_empty(&ac->tid_q)) {
1868

1869 1870 1871 1872
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1873

1874 1875
			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
				sent = true;
1876

1877 1878 1879 1880
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1881
			if (ath_tid_has_buffered(tid))
1882
				ath_tx_queue_tid(sc, txq, tid);
1883

1884
			if (stop || tid == last_tid)
1885 1886
				break;
		}
1887

1888 1889
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
1890
			list_add_tail(&ac->list, ac_list);
1891
		}
1892

1893
		if (stop)
1894
			break;
1895 1896 1897 1898 1899 1900

		if (ac == last_ac) {
			if (!sent)
				break;

			sent = false;
1901
			last_ac = list_entry(ac_list->prev,
1902 1903
					     struct ath_atx_ac, list);
		}
S
Sujith 已提交
1904
	}
1905 1906

	rcu_read_unlock();
1907
	spin_unlock_bh(&sc->chan_lock);
S
Sujith 已提交
1908
}
1909

1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
void ath_txq_schedule_all(struct ath_softc *sc)
{
	struct ath_txq *txq;
	int i;

	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
		txq = sc->tx.txq_map[i];

		spin_lock_bh(&txq->axq_lock);
		ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
1924 1925 1926 1927
/***********/
/* TX, DMA */
/***********/

1928
/*
S
Sujith 已提交
1929 1930
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1931
 */
S
Sujith 已提交
1932
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1933
			     struct list_head *head, bool internal)
1934
{
1935
	struct ath_hw *ah = sc->sc_ah;
1936
	struct ath_common *common = ath9k_hw_common(ah);
1937 1938 1939
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1940

S
Sujith 已提交
1941 1942 1943 1944
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1945

S
Sujith 已提交
1946 1947
	if (list_empty(head))
		return;
1948

1949
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1950
	bf = list_first_entry(head, struct ath_buf, list);
1951
	bf_last = list_entry(head->prev, struct ath_buf, list);
1952

1953 1954
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1955

1956 1957
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1958
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1959
		puttxbuf = true;
S
Sujith 已提交
1960
	} else {
1961 1962
		list_splice_tail_init(head, &txq->axq_q);

1963 1964
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1965
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1966 1967
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1968 1969 1970 1971 1972 1973 1974 1975 1976
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1977
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1978 1979 1980
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

L
Luis R. Rodriguez 已提交
1981
	if (!edma || sc->tx99_state) {
F
Felix Fietkau 已提交
1982
		TX_STAT_INC(txq->axq_qnum, txstart);
1983
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1984
	}
1985 1986

	if (!internal) {
1987 1988 1989 1990 1991
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

1992 1993 1994
			bf_last = bf->bf_lastbf;
			bf = bf_last->bf_next;
			bf_last->bf_next = NULL;
1995
		}
1996
	}
S
Sujith 已提交
1997
}
1998

F
Felix Fietkau 已提交
1999
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
2000
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
2001
{
2002
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2003 2004
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
2005
	struct ath_buf *bf = fi->bf;
2006 2007 2008

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
2009
	bf->bf_state.bf_type = 0;
2010 2011 2012 2013
	if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
		bf->bf_state.bf_type = BUF_AMPDU;
		ath_tx_addto_baw(sc, tid, bf);
	}
S
Sujith 已提交
2014

2015
	bf->bf_next = NULL;
S
Sujith 已提交
2016
	bf->bf_lastbf = bf;
2017
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
2018
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
2019
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
2020 2021
}

2022 2023 2024
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
2025
			     int framelen)
S
Sujith 已提交
2026 2027
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2028
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
2029
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2030
	const struct ieee80211_rate *rate;
2031
	struct ath_frame_info *fi = get_frame_info(skb);
2032
	struct ath_node *an = NULL;
2033
	enum ath9k_key_type keytype;
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
2044

2045
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
2046
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
2047

2048 2049 2050
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2051
	memset(fi, 0, sizeof(*fi));
2052
	fi->txq = -1;
2053 2054
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2055 2056
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2057 2058 2059 2060
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2061 2062 2063

	if (!rate)
		return;
2064 2065 2066
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2067 2068
}

2069 2070 2071 2072
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2073

2074
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2075
	    (chainmask == 0x7) && (rate < 0x90))
2076
		return 0x3;
2077 2078 2079
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2080 2081 2082 2083
	else
		return chainmask;
}

2084 2085 2086 2087
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2088
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2089
					   struct ath_txq *txq,
2090
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2091
					   struct sk_buff *skb)
2092
{
F
Felix Fietkau 已提交
2093
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2094
	struct ath_frame_info *fi = get_frame_info(skb);
2095
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2096
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2097
	int fragno;
2098
	u16 seqno;
F
Felix Fietkau 已提交
2099 2100 2101

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2102
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2103
		return NULL;
F
Felix Fietkau 已提交
2104
	}
2105

S
Sujith 已提交
2106
	ATH_TXBUF_RESET(bf);
2107

2108
	if (tid && ieee80211_is_data_present(hdr->frame_control)) {
S
Sujith Manoharan 已提交
2109
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2110 2111
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2112 2113 2114 2115 2116 2117 2118

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2119 2120 2121
		bf->bf_state.seqno = seqno;
	}

2122
	bf->bf_mpdu = skb;
2123

B
Ben Greear 已提交
2124 2125 2126
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2127
		bf->bf_mpdu = NULL;
2128
		bf->bf_buf_addr = 0;
2129 2130
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2131
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2132
		return NULL;
2133 2134
	}

2135
	fi->bf = bf;
F
Felix Fietkau 已提交
2136 2137 2138 2139

	return bf;
}

2140 2141
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2142
{
2143 2144
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2145
	struct ieee80211_sta *sta = txctl->sta;
2146
	struct ieee80211_vif *vif = info->control.vif;
2147
	struct ath_vif *avp;
2148
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2149
	int frmlen = skb->len + FCS_LEN;
2150
	int padpos, padsize;
2151

2152 2153 2154
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;
2155 2156 2157 2158
	else if (vif && ieee80211_is_data(hdr->frame_control)) {
		avp = (void *)vif->drv_priv;
		txctl->an = &avp->mcast_node;
	}
2159

F
Felix Fietkau 已提交
2160 2161 2162
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2163
	/*
S
Sujith 已提交
2164 2165 2166
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2167
	 */
S
Sujith 已提交
2168 2169 2170 2171 2172
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2173 2174
	}

2175 2176 2177 2178 2179
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2180
	/* Add the padding after the header if this is not already done */
2181
	padpos = ieee80211_hdrlen(hdr->frame_control);
2182 2183 2184 2185
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2186

2187 2188
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2189 2190
	}

2191
	setup_frame_info(hw, sta, skb, frmlen);
2192 2193 2194
	return 0;
}

2195

2196 2197 2198 2199 2200 2201 2202 2203
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
2204
	struct ath_frame_info *fi = get_frame_info(skb);
2205
	struct ath_vif *avp = NULL;
2206 2207 2208 2209
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
2210
	bool queue;
2211
	int q, hw_queue;
2212 2213
	int ret;

2214 2215 2216
	if (vif)
		avp = (void *)vif->drv_priv;

2217 2218 2219
	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
		txctl->force_channel = true;

2220 2221 2222 2223 2224
	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2225 2226 2227 2228 2229
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2230
	q = skb_get_queue_mapping(skb);
2231
	hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
F
Felix Fietkau 已提交
2232 2233

	ath_txq_lock(sc, txq);
2234 2235 2236 2237 2238 2239 2240
	if (txq == sc->tx.txq_map[q]) {
		fi->txq = q;
		if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
		    !txq->stopped) {
			ieee80211_stop_queue(sc->hw, hw_queue);
			txq->stopped = true;
		}
2241 2242
	}

2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257
	queue = ieee80211_is_data_present(hdr->frame_control);

	/* Force queueing of all frames that belong to a virtual interface on
	 * a different channel context, to ensure that they are sent on the
	 * correct channel.
	 */
	if (((avp && avp->chanctx != sc->cur_chan) ||
	     sc->cur_chan->stopped) && !txctl->force_channel) {
		if (!txctl->an)
			txctl->an = &avp->mcast_node;
		info->flags &= ~IEEE80211_TX_CTL_PS_RESPONSE;
		queue = true;
	}

	if (txctl->an && queue)
2258 2259
		tid = ath_get_skb_tid(sc, txctl->an, skb);

2260 2261
	if (info->flags & (IEEE80211_TX_CTL_PS_RESPONSE |
			   IEEE80211_TX_CTL_TX_OFFCHAN)) {
2262 2263 2264
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2265
	} else if (txctl->an && queue) {
2266 2267
		WARN_ON(tid->ac->txq != txctl->txq);

2268 2269 2270
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2271
		/*
2272 2273
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2274
		 */
2275 2276 2277
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
2278
			ath_tx_queue_tid(sc, txq, tid);
2279 2280

		ath_txq_schedule(sc, txq);
2281 2282 2283
		goto out;
	}

2284
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2285
	if (!bf) {
2286
		ath_txq_skb_done(sc, txq, skb);
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2299
	ath_set_rates(vif, sta, bf);
2300
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2301

2302
out:
F
Felix Fietkau 已提交
2303
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2304

2305
	return 0;
2306 2307
}

2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
2324 2325
		sc->cur_chan->beacon.beacon_interval * 1000 *
		sc->cur_chan->beacon.dtim_period / ATH_BCBUF;
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2339
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2376 2377 2378
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2379

S
Sujith 已提交
2380
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2381
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2382
{
S
Sujith 已提交
2383
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2384
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2385
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2386
	int padpos, padsize;
S
Sujith Manoharan 已提交
2387
	unsigned long flags;
S
Sujith 已提交
2388

2389
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2390

2391
	if (sc->sc_ah->caldata)
2392
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2393

2394
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2395 2396
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2397

2398
	padpos = ieee80211_hdrlen(hdr->frame_control);
2399 2400 2401 2402 2403 2404 2405 2406
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2407
	}
S
Sujith 已提交
2408

S
Sujith Manoharan 已提交
2409
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2410
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2411
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2412
		ath_dbg(common, PS,
J
Joe Perches 已提交
2413
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2414 2415 2416 2417
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2418
	}
S
Sujith Manoharan 已提交
2419
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2420

2421
	__skb_queue_tail(&txq->complete_q, skb);
2422
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2423
}
2424

S
Sujith 已提交
2425
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2426
				struct ath_txq *txq, struct list_head *bf_q,
2427
				struct ath_tx_status *ts, int txok)
2428
{
S
Sujith 已提交
2429
	struct sk_buff *skb = bf->bf_mpdu;
2430
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2431
	unsigned long flags;
2432
	int tx_flags = 0;
2433

2434
	if (!txok)
2435
		tx_flags |= ATH_TX_ERROR;
2436

2437 2438 2439
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2440
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2441
	bf->bf_buf_addr = 0;
L
Luis R. Rodriguez 已提交
2442 2443
	if (sc->tx99_state)
		goto skip_tx_complete;
2444 2445

	if (bf->bf_state.bfs_paprd) {
2446 2447 2448
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2449
			dev_kfree_skb_any(skb);
2450
		else
2451
			complete(&sc->paprd_complete);
2452
	} else {
2453
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2454
		ath_tx_complete(sc, skb, tx_flags, txq);
2455
	}
L
Luis R. Rodriguez 已提交
2456
skip_tx_complete:
2457 2458 2459 2460
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2461 2462 2463 2464 2465 2466 2467

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2468 2469
}

F
Felix Fietkau 已提交
2470 2471
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2472
			     int txok)
2473
{
S
Sujith 已提交
2474
	struct sk_buff *skb = bf->bf_mpdu;
2475
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2476
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2477
	struct ieee80211_hw *hw = sc->hw;
2478
	struct ath_hw *ah = sc->sc_ah;
2479
	u8 i, tx_rateindex;
2480

S
Sujith 已提交
2481
	if (txok)
2482
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2483

2484
	tx_rateindex = ts->ts_rateindex;
2485 2486
	WARN_ON(tx_rateindex >= hw->max_rates);

2487
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2488
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2489

2490
		BUG_ON(nbad > nframes);
2491
	}
2492 2493
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2494

2495
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2496
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2509 2510 2511
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2512
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2513 2514
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2515
	}
2516

2517
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2518
		tx_info->status.rates[i].count = 0;
2519 2520
		tx_info->status.rates[i].idx = -1;
	}
2521

2522
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2523 2524
}

S
Sujith 已提交
2525
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2526
{
2527
	struct ath_hw *ah = sc->sc_ah;
2528
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2529
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2530
	struct list_head bf_head;
S
Sujith 已提交
2531
	struct ath_desc *ds;
2532
	struct ath_tx_status ts;
S
Sujith 已提交
2533
	int status;
2534

2535
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2536 2537
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2538

F
Felix Fietkau 已提交
2539
	ath_txq_lock(sc, txq);
2540
	for (;;) {
2541
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2542 2543
			break;

2544 2545
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2546
			ath_txq_schedule(sc, txq);
2547 2548 2549 2550
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2551 2552 2553 2554 2555 2556 2557 2558 2559
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
2560
		if (bf->bf_state.stale) {
S
Sujith 已提交
2561
			bf_held = bf;
2562
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2563
				break;
2564 2565 2566

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2567 2568 2569
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2570
		ds = lastbf->bf_desc;
2571

2572 2573
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2574
		if (status == -EINPROGRESS)
S
Sujith 已提交
2575
			break;
2576

2577
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2578

S
Sujith 已提交
2579 2580 2581 2582 2583
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
2584
		lastbf->bf_state.stale = true;
S
Sujith 已提交
2585 2586 2587 2588
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2589

2590
		if (bf_held) {
2591 2592
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2593
		}
2594

2595
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2596
	}
F
Felix Fietkau 已提交
2597
	ath_txq_unlock_complete(sc, txq);
2598 2599
}

S
Sujith 已提交
2600
void ath_tx_tasklet(struct ath_softc *sc)
2601
{
2602 2603
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2604
	int i;
2605

S
Sujith 已提交
2606 2607 2608
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2609 2610 2611
	}
}

2612 2613
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2614
	struct ath_tx_status ts;
2615 2616 2617 2618 2619
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2620
	struct list_head *fifo_list;
2621 2622 2623
	int status;

	for (;;) {
2624
		if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
2625 2626
			break;

2627
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2628 2629 2630
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2631
			ath_dbg(common, XMIT, "Error processing tx status\n");
2632 2633 2634
			break;
		}

2635 2636 2637 2638
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2639

2640 2641 2642 2643 2644
			if (ath9k_is_chanctx_enabled()) {
				ath_chanctx_event(sc, NULL,
						  ATH_CHANCTX_EVENT_BEACON_SENT);
			}

2645
			ath9k_csa_update(sc);
2646
			continue;
2647
		}
2648

2649
		txq = &sc->tx.txq[ts.qid];
2650

F
Felix Fietkau 已提交
2651
		ath_txq_lock(sc, txq);
2652

2653 2654
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2655 2656
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2657
			ath_txq_unlock(sc, txq);
2658 2659 2660
			return;
		}

2661
		bf = list_first_entry(fifo_list, struct ath_buf, list);
2662
		if (bf->bf_state.stale) {
2663 2664 2665 2666 2667
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2668 2669 2670
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2671 2672
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2673
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2674

2675 2676
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2677

2678 2679 2680 2681 2682
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2683
		} else {
2684
			lastbf->bf_state.stale = true;
2685 2686 2687
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2688
		}
2689

2690
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2691
		ath_txq_unlock_complete(sc, txq);
2692 2693 2694
	}
}

S
Sujith 已提交
2695 2696 2697
/*****************/
/* Init, Cleanup */
/*****************/
2698

2699 2700 2701 2702 2703 2704
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2705 2706
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2726
int ath_tx_init(struct ath_softc *sc, int nbufs)
2727
{
2728
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2729
	int error = 0;
2730

2731
	spin_lock_init(&sc->tx.txbuflock);
2732

2733
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2734
				  "tx", nbufs, 1, 1);
2735
	if (error != 0) {
2736 2737
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2738
		return error;
2739
	}
2740

2741
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2742
				  "beacon", ATH_BCBUF, 1, 1);
2743
	if (error != 0) {
2744 2745
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2746
		return error;
2747
	}
2748

2749 2750
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2751
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2752
		error = ath_tx_edma_init(sc);
2753

S
Sujith 已提交
2754
	return error;
2755 2756 2757 2758
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2759 2760 2761
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2762

2763
	for (tidno = 0, tid = &an->tid[tidno];
2764
	     tidno < IEEE80211_NUM_TIDS;
2765 2766 2767 2768 2769 2770 2771
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
2772
		tid->active	   = false;
2773
		__skb_queue_head_init(&tid->buf_q);
2774
		__skb_queue_head_init(&tid->retry_q);
2775
		acno = TID_TO_WME_AC(tidno);
2776
		tid->ac = &an->ac[acno];
2777
	}
2778

2779
	for (acno = 0, ac = &an->ac[acno];
2780
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2781
		ac->sched    = false;
2782
		ac->clear_ps_filter = true;
2783
		ac->txq = sc->tx.txq_map[acno];
2784
		INIT_LIST_HEAD(&ac->tid_q);
2785 2786 2787
	}
}

S
Sujith 已提交
2788
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2789
{
2790 2791
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2792
	struct ath_txq *txq;
2793
	int tidno;
S
Sujith 已提交
2794

2795
	for (tidno = 0, tid = &an->tid[tidno];
2796
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2797

2798
		ac = tid->ac;
2799
		txq = ac->txq;
2800

F
Felix Fietkau 已提交
2801
		ath_txq_lock(sc, txq);
2802 2803 2804 2805 2806 2807 2808 2809 2810

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2811
		}
2812 2813

		ath_tid_drain(sc, txq, tid);
2814
		tid->active = false;
2815

F
Felix Fietkau 已提交
2816
		ath_txq_unlock(sc, txq);
2817 2818
	}
}
L
Luis R. Rodriguez 已提交
2819

2820 2821
#ifdef CONFIG_ATH9K_TX99

L
Luis R. Rodriguez 已提交
2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
		    struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ath_frame_info *fi = get_frame_info(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	int padpos, padsize;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;

	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize) {
			ath_dbg(common, XMIT,
				"tx99 padding failed\n");
		return -EINVAL;
		}

		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
	}

	fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->framelen = skb->len + FCS_LEN;
	fi->keytype = ATH9K_KEY_TYPE_CLEAR;

	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
	if (!bf) {
		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
		return -EINVAL;
	}

	ath_set_rates(sc->tx99_vif, NULL, bf);

	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

	ath_tx_send_normal(sc, txctl->txq, NULL, skb);

	return 0;
}
2864 2865

#endif /* CONFIG_ATH9K_TX99 */