xmit.c 69.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
32 33
#define TIME_SYMBOLS(t)         ((t) >> 2)
#define TIME_SYMBOLS_HALFGI(t)  (((t) * 5 - 4) / 18)
34 35 36 37
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


38
static u16 bits_per_symbol[][2] = {
39 40 41 42 43 44 45 46 47 48 49 50 51
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
52
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 54 55
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
56
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57
				struct ath_txq *txq, struct list_head *bf_q,
58
				struct ath_tx_status *ts, int txok);
59
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
60
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
61 62
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
63
			     int txok);
64 65
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
66 67 68
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
69
					   struct sk_buff *skb);
70

71
enum {
72 73
	MCS_HT20,
	MCS_HT20_SGI,
74 75 76 77
	MCS_HT40,
	MCS_HT40_SGI,
};

S
Sujith 已提交
78 79 80
/*********************/
/* Aggregation logic */
/*********************/
81

82
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
83
	__acquires(&txq->axq_lock)
F
Felix Fietkau 已提交
84 85 86 87
{
	spin_lock_bh(&txq->axq_lock);
}

88
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
89
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
90 91 92 93
{
	spin_unlock_bh(&txq->axq_lock);
}

94
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
95
	__releases(&txq->axq_lock)
F
Felix Fietkau 已提交
96 97 98 99 100 101 102 103 104 105 106 107
{
	struct sk_buff_head q;
	struct sk_buff *skb;

	__skb_queue_head_init(&q);
	skb_queue_splice_init(&txq->complete_q, &q);
	spin_unlock_bh(&txq->axq_lock);

	while ((skb = __skb_dequeue(&q)))
		ieee80211_tx_status(sc->hw, skb);
}

S
Sujith 已提交
108
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
109
{
S
Sujith 已提交
110
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
111

S
Sujith 已提交
112 113
	if (tid->paused)
		return;
S
Sujith 已提交
114

S
Sujith 已提交
115 116
	if (tid->sched)
		return;
S
Sujith 已提交
117

S
Sujith 已提交
118 119
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
120

S
Sujith 已提交
121 122
	if (ac->sched)
		return;
123

S
Sujith 已提交
124 125 126
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
127

128
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
129 130
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
131 132 133
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
134 135
}

136 137
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
138 139 140
	if (!tid->an->sta)
		return;

141 142 143 144
	ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
			   seqno << IEEE80211_SEQ_SEQ_SHIFT);
}

145 146 147 148 149 150 151
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
			  struct ath_buf *bf)
{
	ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
			       ARRAY_SIZE(bf->rates));
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
	int q;

	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

	if (txq != sc->tx.txq_map[q])
		return;

	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
		ieee80211_wake_queue(sc->hw, q);
		txq->stopped = false;
	}
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	u8 tidno = 0;

	hdr = (struct ieee80211_hdr *) skb->data;
	if (ieee80211_is_data_qos(hdr->frame_control))
		tidno = ieee80211_get_qos_ctl(hdr)[0];

	tidno &= IEEE80211_QOS_CTL_TID_MASK;
	return ATH_AN_2_TID(an, tidno);
}

188 189
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
{
190
	return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
191 192 193 194
}

static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
{
195 196 197 198 199 200 201
	struct sk_buff *skb;

	skb = __skb_dequeue(&tid->retry_q);
	if (!skb)
		skb = __skb_dequeue(&tid->buf_q);

	return skb;
202 203
}

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
/*
 * ath_tx_tid_change_state:
 * - clears a-mpdu flag of previous session
 * - force sequence number allocation to fix next BlockAck Window
 */
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
	struct ath_txq *txq = tid->ac->txq;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb, *tskb;
	struct ath_buf *bf;
	struct ath_frame_info *fi;

	skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
		fi = get_frame_info(skb);
		bf = fi->bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;

		if (bf)
			continue;

		bf = ath_tx_setup_buffer(sc, txq, tid, skb);
		if (!bf) {
			__skb_unlink(skb, &tid->buf_q);
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
		}
	}

}

239
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
240
{
241
	struct ath_txq *txq = tid->ac->txq;
242
	struct sk_buff *skb;
S
Sujith 已提交
243 244
	struct ath_buf *bf;
	struct list_head bf_head;
245
	struct ath_tx_status ts;
246
	struct ath_frame_info *fi;
247
	bool sendbar = false;
248

249
	INIT_LIST_HEAD(&bf_head);
250

251
	memset(&ts, 0, sizeof(ts));
252

253
	while ((skb = __skb_dequeue(&tid->retry_q))) {
254 255
		fi = get_frame_info(skb);
		bf = fi->bf;
F
Felix Fietkau 已提交
256
		if (!bf) {
257 258 259
			ath_txq_skb_done(sc, txq, skb);
			ieee80211_free_txskb(sc->hw, skb);
			continue;
F
Felix Fietkau 已提交
260 261
		}

262
		if (fi->baw_tracked) {
263
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
264
			sendbar = true;
265
		}
266 267 268

		list_add_tail(&bf->list, &bf_head);
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
269
	}
270

271
	if (sendbar) {
F
Felix Fietkau 已提交
272
		ath_txq_unlock(sc, txq);
273
		ath_send_bar(tid, tid->seq_start);
F
Felix Fietkau 已提交
274 275
		ath_txq_lock(sc, txq);
	}
S
Sujith 已提交
276
}
277

S
Sujith 已提交
278 279
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
280
{
S
Sujith 已提交
281
	int index, cindex;
282

S
Sujith 已提交
283 284
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
285

286
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
287

288
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
289 290
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
291 292
		if (tid->bar_index >= 0)
			tid->bar_index--;
S
Sujith 已提交
293
	}
S
Sujith 已提交
294
}
295

S
Sujith 已提交
296
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
297
			     struct ath_buf *bf)
S
Sujith 已提交
298
{
299 300
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
	u16 seqno = bf->bf_state.seqno;
S
Sujith 已提交
301
	int index, cindex;
S
Sujith 已提交
302

303
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
304
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
305
	__set_bit(cindex, tid->tx_buf);
306
	fi->baw_tracked = 1;
307

S
Sujith 已提交
308 309 310 311
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
312 313 314
	}
}

S
Sujith 已提交
315 316
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
317 318

{
319
	struct sk_buff *skb;
S
Sujith 已提交
320 321
	struct ath_buf *bf;
	struct list_head bf_head;
322
	struct ath_tx_status ts;
323
	struct ath_frame_info *fi;
324 325

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
326
	INIT_LIST_HEAD(&bf_head);
327

328
	while ((skb = ath_tid_dequeue(tid))) {
329 330
		fi = get_frame_info(skb);
		bf = fi->bf;
331

332 333 334 335 336
		if (!bf) {
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			continue;
		}

337
		list_add_tail(&bf->list, &bf_head);
338
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
S
Sujith 已提交
339
	}
340 341
}

S
Sujith 已提交
342
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
343
			     struct sk_buff *skb, int count)
344
{
345
	struct ath_frame_info *fi = get_frame_info(skb);
346
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
347
	struct ieee80211_hdr *hdr;
348
	int prev = fi->retries;
349

S
Sujith 已提交
350
	TX_STAT_INC(txq->axq_qnum, a_retries);
351 352 353
	fi->retries += count;

	if (prev > 0)
354
		return;
355

S
Sujith 已提交
356 357
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
358 359
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
360 361
}

362
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
363
{
364
	struct ath_buf *bf = NULL;
S
Sujith 已提交
365 366

	spin_lock_bh(&sc->tx.txbuflock);
367 368

	if (unlikely(list_empty(&sc->tx.txbuf))) {
369 370 371
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
372 373 374 375

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
376 377
	spin_unlock_bh(&sc->tx.txbuflock);

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
396 397 398 399
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
400
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
401 402 403 404 405
	tbf->bf_state = bf->bf_state;

	return tbf;
}

406 407 408 409
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
410
	struct ath_frame_info *fi;
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
426
		fi = get_frame_info(bf->bf_mpdu);
427
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
428 429 430 431 432 433 434 435 436 437

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
438 439
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
440
				 struct ath_tx_status *ts, int txok)
441
{
S
Sujith 已提交
442 443
	struct ath_node *an = NULL;
	struct sk_buff *skb;
444
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
445
	struct ieee80211_hw *hw = sc->hw;
446
	struct ieee80211_hdr *hdr;
447
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
448
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
449
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
450 451
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
452
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
453
	u32 ba[WME_BA_BMP_SIZE >> 5];
454
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
455
	bool rc_update = true, isba;
456
	struct ieee80211_tx_rate rates[4];
457
	struct ath_frame_info *fi;
458
	int nframes;
459
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
460
	int i, retries;
461
	int bar_index = -1;
462

S
Sujith 已提交
463
	skb = bf->bf_mpdu;
464 465
	hdr = (struct ieee80211_hdr *)skb->data;

466 467
	tx_info = IEEE80211_SKB_CB(skb);

468
	memcpy(rates, bf->rates, sizeof(rates));
469

470 471 472 473
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

474
	rcu_read_lock();
475

476
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
477 478
	if (!sta) {
		rcu_read_unlock();
479

480 481 482 483
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

484
			if (!bf->bf_state.stale || bf_next != NULL)
485 486
				list_move_tail(&bf->list, &bf_head);

487
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
488 489 490

			bf = bf_next;
		}
491
		return;
492 493
	}

494
	an = (struct ath_node *)sta->drv_priv;
495
	tid = ath_get_skb_tid(sc, an, skb);
496
	seq_first = tid->seq_start;
497
	isba = ts->ts_flags & ATH9K_TX_BA;
498

499 500 501 502
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
503 504 505
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
506
	 */
507
	if (isba && tid->tidno != ts->tid)
508 509
		txok = false;

S
Sujith 已提交
510
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
511
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
512

S
Sujith 已提交
513
	if (isaggr && txok) {
514 515 516
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
517
		} else {
S
Sujith 已提交
518 519 520 521 522 523 524
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
525
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
526
				needreset = 1;
S
Sujith 已提交
527
		}
528 529
	}

530
	__skb_queue_head_init(&bf_pending);
531

532
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
533
	while (bf) {
534 535
		u16 seqno = bf->bf_state.seqno;

536
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
537
		bf_next = bf->bf_next;
538

539 540
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
541
		fi = get_frame_info(skb);
542

543 544
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
545 546 547 548 549 550
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
551 552
			/* transmit completion, subframe is
			 * acked by block ack */
553
			acked_cnt++;
S
Sujith 已提交
554 555
		} else if (!isaggr && txok) {
			/* transmit completion */
556
			acked_cnt++;
557 558 559 560 561 562 563 564
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
S
Sujith 已提交
565
		} else {
566 567 568 569
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
S
Sujith 已提交
570
		}
571

572 573 574 575
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
576
		INIT_LIST_HEAD(&bf_head);
577
		if (bf_next != NULL || !bf_last->bf_state.stale)
S
Sujith 已提交
578
			list_move_tail(&bf->list, &bf_head);
579

580
		if (!txpending) {
S
Sujith 已提交
581 582 583 584
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
585
			ath_tx_update_baw(sc, tid, seqno);
586

587
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
588
				memcpy(tx_info->control.rates, rates, sizeof(rates));
589
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
590 591 592
				rc_update = false;
			}

593
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
594
				!txfail);
S
Sujith 已提交
595
		} else {
596 597 598 599
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
S
Sujith 已提交
600
			/* retry the un-acked ones */
601
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
618
				}
619 620

				fi->bf = tbf;
S
Sujith 已提交
621 622 623 624 625 626
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
627
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
628 629 630
		}

		bf = bf_next;
631 632
	}

633
	/* prepend un-acked frames to the beginning of the pending frame queue */
634
	if (!skb_queue_empty(&bf_pending)) {
635
		if (an->sleeping)
636
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
637

638
		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
639
		if (!an->sleeping) {
640
			ath_tx_queue_tid(txq, tid);
641

S
Sujith Manoharan 已提交
642
			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
643 644
				tid->ac->clear_ps_filter = true;
		}
645 646
	}

F
Felix Fietkau 已提交
647 648 649 650 651 652 653 654 655 656 657
	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

658 659
	rcu_read_unlock();

660 661
	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
S
Sujith 已提交
662
}
663

664 665 666 667 668 669 670 671 672 673
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
674
	struct ieee80211_tx_info *info;
675 676 677 678 679 680 681 682 683 684 685
	bool txok, flush;

	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
686 687 688 689
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates,
			       sizeof(info->control.rates));
690
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
691
		}
692 693 694 695
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

696
	if (!flush)
697 698 699
		ath_txq_schedule(sc, txq);
}

700 701 702 703 704 705 706 707 708 709 710
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

711 712 713 714
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

715 716 717 718 719 720 721
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
722 723
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
724
{
S
Sujith 已提交
725 726
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
727
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
728
	u32 max_4ms_framelen, frmlen;
729
	u16 aggr_limit, bt_aggr_limit, legacy = 0;
730
	int q = tid->ac->txq->mac80211_qnum;
S
Sujith 已提交
731
	int i;
S
Sujith 已提交
732

S
Sujith 已提交
733
	skb = bf->bf_mpdu;
S
Sujith 已提交
734
	tx_info = IEEE80211_SKB_CB(skb);
735
	rates = bf->rates;
S
Sujith 已提交
736

S
Sujith 已提交
737 738
	/*
	 * Find the lowest frame length among the rate series that will have a
739
	 * 4ms (or TXOP limited) transmit duration.
S
Sujith 已提交
740 741
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
742

S
Sujith 已提交
743
	for (i = 0; i < 4; i++) {
744
		int modeidx;
S
Sujith 已提交
745

746 747
		if (!rates[i].count)
			continue;
748

749 750 751
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
			legacy = 1;
			break;
752
		}
753 754 755 756 757 758 759 760 761

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			modeidx = MCS_HT40;
		else
			modeidx = MCS_HT20;

		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			modeidx++;

762
		frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
763
		max_4ms_framelen = min(max_4ms_framelen, frmlen);
764
	}
S
Sujith 已提交
765

766
	/*
S
Sujith 已提交
767 768 769
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
770
	 */
S
Sujith 已提交
771 772
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
773

774 775 776 777 778 779 780 781
	aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);

	/*
	 * Override the default aggregation limit for BTCOEX.
	 */
	bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
	if (bt_aggr_limit)
		aggr_limit = bt_aggr_limit;
782

S
Sujith 已提交
783
	/*
L
Lucas De Marchi 已提交
784 785
	 * h/w can accept aggregates up to 16 bit lengths (65535).
	 * The IE, however can hold up to 65536, which shows up here
S
Sujith 已提交
786
	 * as zero. Ignore 65536 since we  are constrained by hw.
787
	 */
788 789
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
790

S
Sujith 已提交
791 792
	return aggr_limit;
}
793

S
Sujith 已提交
794
/*
S
Sujith 已提交
795
 * Returns the number of delimiters to be added to
S
Sujith 已提交
796 797 798
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
799 800
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
801
{
802
#define FIRST_DESC_NDELIMS 60
803
	u32 nsymbits, nsymbols;
S
Sujith 已提交
804
	u16 minlen;
805
	u8 flags, rix;
806
	int width, streams, half_gi, ndelim, mindelim;
807
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
808 809 810

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
811 812

	/*
S
Sujith 已提交
813 814 815 816
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
817
	 */
818 819
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
820
		ndelim += ATH_AGGR_ENCRYPTDELIM;
821

822 823 824 825
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
826 827
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
828 829
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
830 831 832 833 834
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
835
	 *
S
Sujith 已提交
836 837 838
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
839 840

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
841
		return ndelim;
842

843 844
	rix = bf->rates[0].idx;
	flags = bf->rates[0].flags;
S
Sujith 已提交
845 846
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
847

S
Sujith 已提交
848
	if (half_gi)
849
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
850
	else
851
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
852

S
Sujith 已提交
853 854
	if (nsymbols == 0)
		nsymbols = 1;
855

856 857
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
858
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
859

S
Sujith 已提交
860 861 862
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
863 864
	}

S
Sujith 已提交
865
	return ndelim;
866 867
}

868 869
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
870
			struct ath_atx_tid *tid, struct sk_buff_head **q)
871
{
872
	struct ieee80211_tx_info *tx_info;
873
	struct ath_frame_info *fi;
874
	struct sk_buff *skb;
875
	struct ath_buf *bf;
876
	u16 seqno;
877

878
	while (1) {
879 880 881 882
		*q = &tid->retry_q;
		if (skb_queue_empty(*q))
			*q = &tid->buf_q;

883
		skb = skb_peek(*q);
884 885 886
		if (!skb)
			break;

887 888
		fi = get_frame_info(skb);
		bf = fi->bf;
889
		if (!fi->bf)
F
Felix Fietkau 已提交
890
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
891 892
		else
			bf->bf_state.stale = false;
893

F
Felix Fietkau 已提交
894
		if (!bf) {
895
			__skb_unlink(skb, *q);
896
			ath_txq_skb_done(sc, txq, skb);
F
Felix Fietkau 已提交
897
			ieee80211_free_txskb(sc->hw, skb);
898
			continue;
F
Felix Fietkau 已提交
899
		}
900

901 902 903 904 905 906 907 908 909 910
		bf->bf_next = NULL;
		bf->bf_lastbf = bf;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
			bf->bf_state.bf_type = 0;
			return bf;
		}

911
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
912
		seqno = bf->bf_state.seqno;
913

S
Sujith 已提交
914
		/* do not step over block-ack window */
915
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
S
Sujith 已提交
916
			break;
917

918 919 920 921 922 923
		if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
			struct ath_tx_status ts = {};
			struct list_head bf_head;

			INIT_LIST_HEAD(&bf_head);
			list_add(&bf->list, &bf_head);
924
			__skb_unlink(skb, *q);
925 926 927 928 929
			ath_tx_update_baw(sc, tid, seqno);
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
			continue;
		}

930 931 932 933 934 935
		return bf;
	}

	return NULL;
}

936 937 938 939 940
static bool
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
		 struct ath_atx_tid *tid, struct list_head *bf_q,
		 struct ath_buf *bf_first, struct sk_buff_head *tid_q,
		 int *aggr_len)
941 942
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
943
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
F
Felix Fietkau 已提交
944
	int nframes = 0, ndelim;
945
	u16 aggr_limit = 0, al = 0, bpad = 0,
F
Felix Fietkau 已提交
946
	    al_delta, h_baw = tid->baw_size / 2;
947 948 949
	struct ieee80211_tx_info *tx_info;
	struct ath_frame_info *fi;
	struct sk_buff *skb;
950
	bool closed = false;
951

952 953
	bf = bf_first;
	aggr_limit = ath_lookup_rate(sc, bf, tid);
954

955
	do {
956 957 958
		skb = bf->bf_mpdu;
		fi = get_frame_info(skb);

S
Sujith 已提交
959
		/* do not exceed aggregation limit */
960
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
F
Felix Fietkau 已提交
961 962
		if (nframes) {
			if (aggr_limit < al + bpad + al_delta ||
963
			    ath_lookup_legacy(bf) || nframes >= h_baw)
F
Felix Fietkau 已提交
964
				break;
965

F
Felix Fietkau 已提交
966
			tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
967 968
			if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			    !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
F
Felix Fietkau 已提交
969
				break;
S
Sujith 已提交
970
		}
971

S
Sujith 已提交
972
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
973
		al += bpad + al_delta;
974

S
Sujith 已提交
975 976 977 978
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
979 980
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
981
		bpad = PADBYTES(al_delta) + (ndelim << 2);
982

983
		nframes++;
S
Sujith 已提交
984
		bf->bf_next = NULL;
985

S
Sujith 已提交
986
		/* link buffers of this frame to the aggregate */
987 988
		if (!fi->baw_tracked)
			ath_tx_addto_baw(sc, tid, bf);
989
		bf->bf_state.ndelim = ndelim;
990

991
		__skb_unlink(skb, tid_q);
992
		list_add_tail(&bf->list, bf_q);
993
		if (bf_prev)
S
Sujith 已提交
994
			bf_prev->bf_next = bf;
995

S
Sujith 已提交
996
		bf_prev = bf;
S
Sujith 已提交
997

998 999 1000 1001 1002
		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf) {
			closed = true;
			break;
		}
1003
	} while (ath_tid_has_buffered(tid));
1004

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	bf = bf_first;
	bf->bf_lastbf = bf_prev;

	if (bf == bf_prev) {
		al = get_frame_info(bf->bf_mpdu)->framelen;
		bf->bf_state.bf_type = BUF_AMPDU;
	} else {
		TX_STAT_INC(txq->axq_qnum, a_aggr);
	}

1015
	*aggr_len = al;
S
Sujith 已提交
1016

1017
	return closed;
S
Sujith 已提交
1018 1019
#undef PADBYTES
}
1020

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
{
	int streams = HT_RC_2_STREAMS(mcs);
	int symbols, bits;
	int bytes = 0;

	symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
	bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
	bits -= OFDM_PLCP_BITS;
	bytes = bits / 8;
	bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
	if (bytes > 65532)
		bytes = 65532;

	return bytes;
}

void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
{
	u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
	int mcs;

	/* 4ms is the default (and maximum) duration */
	if (!txop || txop > 4096)
		txop = 4096;

	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
	for (mcs = 0; mcs < 32; mcs++) {
		cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
		cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
		cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
		cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
	}
}

1088
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
S
Sujith Manoharan 已提交
1089
			     struct ath_tx_info *info, int len, bool rts)
1090 1091 1092 1093 1094 1095 1096
{
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
1097
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith Manoharan 已提交
1098
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
1099 1100
	int i;
	u8 rix = 0;
1101 1102 1103

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
1104
	rates = bf->rates;
1105
	hdr = (struct ieee80211_hdr *)skb->data;
1106 1107 1108

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1109
	info->rtscts_rate = fi->rtscts_rate;
1110

1111
	for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1112 1113 1114 1115 1116 1117 1118
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
1119
		info->rates[i].Tries = rates[i].count;
1120

S
Sujith Manoharan 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
		/*
		 * Handle RTS threshold for unaggregated HT frames.
		 */
		if (bf_isampdu(bf) && !bf_isaggr(bf) &&
		    (rates[i].flags & IEEE80211_TX_RC_MCS) &&
		    unlikely(rts_thresh != (u32) -1)) {
			if (!rts_thresh || (len > rts_thresh))
				rts = true;
		}

		if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1132 1133
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
1134
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1135 1136
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
1137 1138 1139
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1140
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1141
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1142
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1143 1144 1145 1146 1147 1148 1149

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
1150 1151 1152 1153
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1154 1155
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1156
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1157 1158 1159 1160
			continue;
		}

		/* legacy rates */
1161
		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1162 1163 1164 1165 1166 1167
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

1168
		info->rates[i].Rate = rate->hw_value;
1169 1170
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1171
				info->rates[i].Rate |= rate->hw_value_short;
1172 1173 1174 1175 1176
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
1177
			info->rates[i].ChSel = ah->txchainmask;
1178
		else
1179 1180
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
1181

1182
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1183 1184 1185 1186 1187
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1188
		info->flags &= ~ATH9K_TXDESC_RTSENA;
1189 1190

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1191 1192 1193
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
1194

1195 1196 1197 1198 1199 1200 1201 1202
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
1203

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1216 1217
}

1218 1219
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1220 1221
{
	struct ath_hw *ah = sc->sc_ah;
1222
	struct ath_buf *bf_first = NULL;
1223
	struct ath_tx_info info;
S
Sujith Manoharan 已提交
1224 1225
	u32 rts_thresh = sc->hw->wiphy->rts_threshold;
	bool rts = false;
1226

1227 1228 1229 1230 1231 1232
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

1233
	while (bf) {
1234
		struct sk_buff *skb = bf->bf_mpdu;
1235
		struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1236
		struct ath_frame_info *fi = get_frame_info(skb);
1237
		bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1238 1239

		info.type = get_hw_packet_type(skb);
1240
		if (bf->bf_next)
1241
			info.link = bf->bf_next->bf_daddr;
1242
		else
L
Luis R. Rodriguez 已提交
1243
			info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
1244

1245 1246 1247
		if (!bf_first) {
			bf_first = bf;

L
Luis R. Rodriguez 已提交
1248 1249
			if (!sc->tx99_state)
				info.flags = ATH9K_TXDESC_INTREQ;
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
			if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
			    txq == sc->tx.uapsdq)
				info.flags |= ATH9K_TXDESC_CLRDMASK;

			if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
				info.flags |= ATH9K_TXDESC_NOACK;
			if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
				info.flags |= ATH9K_TXDESC_LDPC;

			if (bf->bf_state.bfs_paprd)
				info.flags |= (u32) bf->bf_state.bfs_paprd <<
					      ATH9K_TXDESC_PAPRD_S;

S
Sujith Manoharan 已提交
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
			/*
			 * mac80211 doesn't handle RTS threshold for HT because
			 * the decision has to be taken based on AMPDU length
			 * and aggregation is done entirely inside ath9k.
			 * Set the RTS/CTS flag for the first subframe based
			 * on the threshold.
			 */
			if (aggr && (bf == bf_first) &&
			    unlikely(rts_thresh != (u32) -1)) {
				/*
				 * "len" is the size of the entire AMPDU.
				 */
				if (!rts_thresh || (len > rts_thresh))
					rts = true;
			}
			ath_buf_set_rate(sc, bf, &info, len, rts);
1279 1280
		}

1281 1282
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1283 1284 1285 1286 1287
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1288
			if (bf == bf_first)
1289
				info.aggr = AGGR_BUF_FIRST;
1290
			else if (bf == bf_first->bf_lastbf)
1291 1292 1293
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1294

1295 1296
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1297 1298
		}

1299 1300 1301
		if (bf == bf_first->bf_lastbf)
			bf_first = NULL;

1302
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1303 1304 1305 1306
		bf = bf->bf_next;
	}
}

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
static void
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
		  struct ath_atx_tid *tid, struct list_head *bf_q,
		  struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
	struct ath_buf *bf = bf_first, *bf_prev = NULL;
	struct sk_buff *skb;
	int nframes = 0;

	do {
		struct ieee80211_tx_info *tx_info;
		skb = bf->bf_mpdu;

		nframes++;
		__skb_unlink(skb, tid_q);
		list_add_tail(&bf->list, bf_q);
		if (bf_prev)
			bf_prev->bf_next = bf;
		bf_prev = bf;

		if (nframes >= 2)
			break;

		bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
		if (!bf)
			break;

		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
			break;

		ath_set_rates(tid->an->vif, tid->an->sta, bf);
	} while (1);
}

1342 1343
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid, bool *stop)
S
Sujith 已提交
1344
{
S
Sujith 已提交
1345
	struct ath_buf *bf;
1346
	struct ieee80211_tx_info *tx_info;
1347
	struct sk_buff_head *tid_q;
S
Sujith 已提交
1348
	struct list_head bf_q;
1349 1350
	int aggr_len = 0;
	bool aggr, last = true;
1351

1352 1353
	if (!ath_tid_has_buffered(tid))
		return false;
1354

1355
	INIT_LIST_HEAD(&bf_q);
S
Sujith 已提交
1356

1357 1358 1359
	bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
	if (!bf)
		return false;
1360

1361 1362 1363 1364 1365 1366 1367
	tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
	if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
		(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
		*stop = true;
		return false;
	}
1368

1369 1370 1371 1372 1373 1374
	ath_set_rates(tid->an->vif, tid->an->sta, bf);
	if (aggr)
		last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
					tid_q, &aggr_len);
	else
		ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
1375

1376 1377
	if (list_empty(&bf_q))
		return false;
1378

1379
	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
1380 1381 1382
		tid->ac->clear_ps_filter = false;
		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
	}
1383

1384 1385 1386
	ath_tx_fill_desc(sc, bf, txq, aggr_len);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	return true;
S
Sujith 已提交
1387 1388
}

1389 1390
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1391 1392 1393
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;
1394
	u8 density;
S
Sujith 已提交
1395 1396

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1397
	txtid = ATH_AN_2_TID(an, tid);
1398

1399 1400 1401 1402
	/* update ampdu factor/density, they may have changed. This may happen
	 * in HT IBSS when a beacon with HT-info is received after the station
	 * has already been added.
	 */
1403
	if (sta->ht_cap.ht_supported) {
1404 1405 1406 1407 1408 1409
		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
				     sta->ht_cap.ampdu_factor);
		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
		an->mpdudensity = density;
	}

1410 1411 1412
	/* force sequence number allocation for pending frames */
	ath_tx_tid_change_state(sc, txtid);

1413
	txtid->active = true;
1414
	txtid->paused = true;
1415
	*ssn = txtid->seq_start = txtid->seq_next;
1416
	txtid->bar_index = -1;
1417

1418 1419 1420
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1421
	return 0;
S
Sujith 已提交
1422
}
1423

1424
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1425 1426 1427
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1428
	struct ath_txq *txq = txtid->ac->txq;
1429

F
Felix Fietkau 已提交
1430
	ath_txq_lock(sc, txq);
1431
	txtid->active = false;
1432
	txtid->paused = false;
1433
	ath_tx_flush_tid(sc, txtid);
1434
	ath_tx_tid_change_state(sc, txtid);
F
Felix Fietkau 已提交
1435
	ath_txq_unlock_complete(sc, txq);
S
Sujith 已提交
1436
}
1437

1438 1439
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1440 1441 1442 1443
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1444
	bool buffered;
1445 1446 1447
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1448
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1449 1450 1451 1452 1453 1454 1455

		if (!tid->sched)
			continue;

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1456
		ath_txq_lock(sc, txq);
1457

1458
		buffered = ath_tid_has_buffered(tid);
1459 1460 1461 1462 1463 1464 1465 1466 1467

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

F
Felix Fietkau 已提交
1468
		ath_txq_unlock(sc, txq);
1469

1470 1471
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
1482
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1483 1484 1485 1486

		ac = tid->ac;
		txq = ac->txq;

F
Felix Fietkau 已提交
1487
		ath_txq_lock(sc, txq);
1488 1489
		ac->clear_ps_filter = true;

1490
		if (!tid->paused && ath_tid_has_buffered(tid)) {
1491 1492 1493 1494
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

F
Felix Fietkau 已提交
1495
		ath_txq_unlock_complete(sc, txq);
1496 1497 1498
	}
}

1499 1500
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
			u16 tidno)
S
Sujith 已提交
1501
{
1502
	struct ath_atx_tid *tid;
S
Sujith 已提交
1503
	struct ath_node *an;
1504
	struct ath_txq *txq;
S
Sujith 已提交
1505 1506

	an = (struct ath_node *)sta->drv_priv;
1507 1508
	tid = ATH_AN_2_TID(an, tidno);
	txq = tid->ac->txq;
S
Sujith 已提交
1509

1510 1511 1512 1513 1514
	ath_txq_lock(sc, txq);

	tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
	tid->paused = false;

1515
	if (ath_tid_has_buffered(tid)) {
1516 1517 1518 1519 1520
		ath_tx_queue_tid(txq, tid);
		ath_txq_schedule(sc, txq);
	}

	ath_txq_unlock_complete(sc, txq);
1521 1522
}

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   u16 tids, int nframes,
				   enum ieee80211_frame_release_type reason,
				   bool more_data)
{
	struct ath_softc *sc = hw->priv;
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_txq *txq = sc->tx.uapsdq;
	struct ieee80211_tx_info *info;
	struct list_head bf_q;
	struct ath_buf *bf_tail = NULL, *bf;
1535
	struct sk_buff_head *tid_q;
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
	int sent = 0;
	int i;

	INIT_LIST_HEAD(&bf_q);
	for (i = 0; tids && nframes; i++, tids >>= 1) {
		struct ath_atx_tid *tid;

		if (!(tids & 1))
			continue;

		tid = ATH_AN_2_TID(an, i);
		if (tid->paused)
			continue;

		ath_txq_lock(sc, tid->ac->txq);
1551 1552
		while (nframes > 0) {
			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
1553 1554 1555
			if (!bf)
				break;

1556
			__skb_unlink(bf->bf_mpdu, tid_q);
1557 1558
			list_add_tail(&bf->list, &bf_q);
			ath_set_rates(tid->an->vif, tid->an->sta, bf);
1559
			ath_tx_addto_baw(sc, tid, bf);
1560 1561 1562 1563 1564 1565 1566 1567 1568
			bf->bf_state.bf_type &= ~BUF_AGGR;
			if (bf_tail)
				bf_tail->bf_next = bf;

			bf_tail = bf;
			nframes--;
			sent++;
			TX_STAT_INC(txq->axq_qnum, a_queued_hw);

1569
			if (an->sta && !ath_tid_has_buffered(tid))
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
				ieee80211_sta_set_buffered(an->sta, i, false);
		}
		ath_txq_unlock_complete(sc, tid->ac->txq);
	}

	if (list_empty(&bf_q))
		return;

	info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
	info->flags |= IEEE80211_TX_STATUS_EOSP;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	ath_txq_lock(sc, txq);
	ath_tx_fill_desc(sc, bf, txq, 0);
	ath_tx_txqaddbuf(sc, txq, &bf_q, false);
	ath_txq_unlock(sc, txq);
}

S
Sujith 已提交
1588 1589 1590
/********************/
/* Queue Management */
/********************/
1591

S
Sujith 已提交
1592
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1593
{
1594
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1595
	struct ath9k_tx_queue_info qi;
1596
	static const int subtype_txq_to_hwq[] = {
1597 1598 1599 1600
		[IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
		[IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
		[IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
		[IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1601
	};
1602
	int axq_qnum, i;
1603

S
Sujith 已提交
1604
	memset(&qi, 0, sizeof(qi));
1605
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1606 1607 1608 1609
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1610 1611

	/*
S
Sujith 已提交
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1625
	 */
1626
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1627
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1628 1629 1630 1631 1632 1633 1634
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1635 1636
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1637
		/*
S
Sujith 已提交
1638 1639
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1640
		 */
S
Sujith 已提交
1641
		return NULL;
1642
	}
1643 1644
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1645

1646 1647
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1648
		txq->axq_link = NULL;
F
Felix Fietkau 已提交
1649
		__skb_queue_head_init(&txq->complete_q);
S
Sujith 已提交
1650 1651 1652 1653
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1654
		txq->axq_ampdu_depth = 0;
1655
		txq->axq_tx_inprogress = false;
1656
		sc->tx.txqsetup |= 1<<axq_qnum;
1657 1658 1659 1660

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1661
	}
1662
	return &sc->tx.txq[axq_qnum];
1663 1664
}

S
Sujith 已提交
1665 1666 1667
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1668
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1669 1670 1671
	int error = 0;
	struct ath9k_tx_queue_info qi;

1672
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1673 1674 1675 1676 1677 1678 1679 1680 1681

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1682 1683
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1695
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1696
	int qnum = sc->beacon.cabq->axq_qnum;
1697

S
Sujith 已提交
1698
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1699

1700
	qi.tqi_readyTime = (cur_conf->beacon_interval *
1701
			    ATH_CABQ_READY_TIME) / 100;
S
Sujith 已提交
1702 1703 1704
	ath_txq_update(sc, qnum, &qi);

	return 0;
1705 1706
}

1707
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1708
			       struct list_head *list)
1709
{
S
Sujith 已提交
1710 1711
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1712 1713 1714
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1715
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1716
	INIT_LIST_HEAD(&bf_head);
1717

1718 1719
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1720

1721
		if (bf->bf_state.stale) {
1722
			list_del(&bf->list);
1723

1724 1725
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1726
		}
1727

S
Sujith 已提交
1728
		lastbf = bf->bf_lastbf;
1729
		list_cut_position(&bf_head, list, &lastbf->list);
1730
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1731
	}
1732
}
1733

1734 1735 1736 1737 1738 1739
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
1740
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1741
{
F
Felix Fietkau 已提交
1742 1743
	ath_txq_lock(sc, txq);

1744
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1745
		int idx = txq->txq_tailidx;
1746

1747
		while (!list_empty(&txq->txq_fifo[idx])) {
1748
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1749 1750

			INCR(idx, ATH_TXFIFO_DEPTH);
1751
		}
1752
		txq->txq_tailidx = idx;
1753
	}
1754

1755 1756
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
1757
	ath_drain_txq_list(sc, txq, &txq->axq_q);
1758

F
Felix Fietkau 已提交
1759
	ath_txq_unlock_complete(sc, txq);
1760 1761
}

1762
bool ath_drain_all_txq(struct ath_softc *sc)
1763
{
1764
	struct ath_hw *ah = sc->sc_ah;
1765
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1766
	struct ath_txq *txq;
1767 1768
	int i;
	u32 npend = 0;
S
Sujith 已提交
1769

S
Sujith Manoharan 已提交
1770
	if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1771
		return true;
S
Sujith 已提交
1772

1773
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1774

1775
	/* Check if any queue remains active */
S
Sujith 已提交
1776
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1777 1778 1779
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

1780 1781
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1782 1783
	}

1784
	if (npend)
1785
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1786 1787

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
1798
		ath_draintxq(sc, txq);
S
Sujith 已提交
1799
	}
1800 1801

	return !npend;
S
Sujith 已提交
1802
}
1803

S
Sujith 已提交
1804
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1805
{
S
Sujith 已提交
1806 1807
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1808
}
1809

1810 1811 1812
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1813 1814
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1815
	struct ath_atx_ac *ac, *last_ac;
1816
	struct ath_atx_tid *tid, *last_tid;
1817
	bool sent = false;
1818

1819
	if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
1820
	    list_empty(&txq->axq_acq))
S
Sujith 已提交
1821
		return;
1822

1823 1824
	rcu_read_lock();

1825
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1826 1827
	while (!list_empty(&txq->axq_acq)) {
		bool stop = false;
1828

1829
		ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1830 1831 1832
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1833

1834
		while (!list_empty(&ac->tid_q)) {
1835

1836 1837 1838 1839
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1840

1841 1842
			if (tid->paused)
				continue;
1843

1844 1845
			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
				sent = true;
1846

1847 1848 1849 1850
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1851
			if (ath_tid_has_buffered(tid))
1852
				ath_tx_queue_tid(txq, tid);
1853

1854
			if (stop || tid == last_tid)
1855 1856
				break;
		}
1857

1858 1859 1860
		if (!list_empty(&ac->tid_q) && !ac->sched) {
			ac->sched = true;
			list_add_tail(&ac->list, &txq->axq_acq);
1861
		}
1862

1863
		if (stop)
1864
			break;
1865 1866 1867 1868 1869 1870 1871 1872 1873

		if (ac == last_ac) {
			if (!sent)
				break;

			sent = false;
			last_ac = list_entry(txq->axq_acq.prev,
					     struct ath_atx_ac, list);
		}
S
Sujith 已提交
1874
	}
1875 1876

	rcu_read_unlock();
S
Sujith 已提交
1877
}
1878

S
Sujith 已提交
1879 1880 1881 1882
/***********/
/* TX, DMA */
/***********/

1883
/*
S
Sujith 已提交
1884 1885
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1886
 */
S
Sujith 已提交
1887
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1888
			     struct list_head *head, bool internal)
1889
{
1890
	struct ath_hw *ah = sc->sc_ah;
1891
	struct ath_common *common = ath9k_hw_common(ah);
1892 1893 1894
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1895

S
Sujith 已提交
1896 1897 1898 1899
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1900

S
Sujith 已提交
1901 1902
	if (list_empty(head))
		return;
1903

1904
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1905
	bf = list_first_entry(head, struct ath_buf, list);
1906
	bf_last = list_entry(head->prev, struct ath_buf, list);
1907

1908 1909
	ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
		txq->axq_qnum, txq->axq_depth);
1910

1911 1912
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1913
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1914
		puttxbuf = true;
S
Sujith 已提交
1915
	} else {
1916 1917
		list_splice_tail_init(head, &txq->axq_q);

1918 1919
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1920
			ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
J
Joe Perches 已提交
1921 1922
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1923 1924 1925 1926 1927 1928 1929 1930 1931
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1932
		ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1933 1934 1935
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

L
Luis R. Rodriguez 已提交
1936
	if (!edma || sc->tx99_state) {
F
Felix Fietkau 已提交
1937
		TX_STAT_INC(txq->axq_qnum, txstart);
1938
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1939
	}
1940 1941

	if (!internal) {
1942 1943 1944 1945 1946 1947 1948
		while (bf) {
			txq->axq_depth++;
			if (bf_is_ampdu_not_probing(bf))
				txq->axq_ampdu_depth++;

			bf = bf->bf_lastbf->bf_next;
		}
1949
	}
S
Sujith 已提交
1950
}
1951

F
Felix Fietkau 已提交
1952
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1953
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1954
{
1955 1956
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
S
Sujith 已提交
1957 1958
	struct ath_buf *bf;

1959 1960 1961 1962
	bf = fi->bf;

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1963
	bf->bf_state.bf_type = 0;
S
Sujith 已提交
1964

1965
	bf->bf_next = NULL;
S
Sujith 已提交
1966
	bf->bf_lastbf = bf;
1967
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1968
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1969
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1970 1971
}

1972 1973 1974
static void setup_frame_info(struct ieee80211_hw *hw,
			     struct ieee80211_sta *sta,
			     struct sk_buff *skb,
1975
			     int framelen)
S
Sujith 已提交
1976 1977
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1978
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1979
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1980
	const struct ieee80211_rate *rate;
1981
	struct ath_frame_info *fi = get_frame_info(skb);
1982
	struct ath_node *an = NULL;
1983
	enum ath9k_key_type keytype;
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
	bool short_preamble = false;

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	if (tx_info->control.vif &&
	    tx_info->control.vif->bss_conf.use_short_preamble)
		short_preamble = true;
S
Sujith 已提交
1994

1995
	rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1996
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1997

1998 1999 2000
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

2001 2002 2003
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
2004 2005
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
2006 2007 2008 2009
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
2010 2011 2012

	if (!rate)
		return;
2013 2014 2015
	fi->rtscts_rate = rate->hw_value;
	if (short_preamble)
		fi->rtscts_rate |= rate->hw_value_short;
S
Sujith 已提交
2016 2017
}

2018 2019 2020 2021
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
2022

2023
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
2024
	    (chainmask == 0x7) && (rate < 0x90))
2025
		return 0x3;
2026 2027 2028
	else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
		 IS_CCK_RATE(rate))
		return 0x2;
2029 2030 2031 2032
	else
		return chainmask;
}

2033 2034 2035 2036
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
2037
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
2038
					   struct ath_txq *txq,
2039
					   struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
2040
					   struct sk_buff *skb)
2041
{
F
Felix Fietkau 已提交
2042
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2043
	struct ath_frame_info *fi = get_frame_info(skb);
2044
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
2045
	struct ath_buf *bf;
S
Sujith Manoharan 已提交
2046
	int fragno;
2047
	u16 seqno;
F
Felix Fietkau 已提交
2048 2049 2050

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
2051
		ath_dbg(common, XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
2052
		return NULL;
F
Felix Fietkau 已提交
2053
	}
2054

S
Sujith 已提交
2055
	ATH_TXBUF_RESET(bf);
2056

2057
	if (tid) {
S
Sujith Manoharan 已提交
2058
		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2059 2060
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
S
Sujith Manoharan 已提交
2061 2062 2063 2064 2065 2066 2067

		if (fragno)
			hdr->seq_ctrl |= cpu_to_le16(fragno);

		if (!ieee80211_has_morefrags(hdr->frame_control))
			INCR(tid->seq_next, IEEE80211_SEQ_MAX);

2068 2069 2070
		bf->bf_state.seqno = seqno;
	}

2071
	bf->bf_mpdu = skb;
2072

B
Ben Greear 已提交
2073 2074 2075
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
2076
		bf->bf_mpdu = NULL;
2077
		bf->bf_buf_addr = 0;
2078 2079
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
2080
		ath_tx_return_buffer(sc, bf);
F
Felix Fietkau 已提交
2081
		return NULL;
2082 2083
	}

2084
	fi->bf = bf;
F
Felix Fietkau 已提交
2085 2086 2087 2088

	return bf;
}

2089 2090
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
			  struct ath_tx_control *txctl)
2091
{
2092 2093
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2094
	struct ieee80211_sta *sta = txctl->sta;
2095
	struct ieee80211_vif *vif = info->control.vif;
2096
	struct ath_vif *avp;
2097
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
2098
	int frmlen = skb->len + FCS_LEN;
2099
	int padpos, padsize;
2100

2101 2102 2103
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;
2104 2105 2106 2107
	else if (vif && ieee80211_is_data(hdr->frame_control)) {
		avp = (void *)vif->drv_priv;
		txctl->an = &avp->mcast_node;
	}
2108

F
Felix Fietkau 已提交
2109 2110 2111
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

2112
	/*
S
Sujith 已提交
2113 2114 2115
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
2116
	 */
S
Sujith 已提交
2117 2118 2119 2120 2121
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2122 2123
	}

2124 2125 2126 2127 2128
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

2129
	/* Add the padding after the header if this is not already done */
2130
	padpos = ieee80211_hdrlen(hdr->frame_control);
2131 2132 2133 2134
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
2135

2136 2137
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
2138 2139
	}

2140
	setup_frame_info(hw, sta, skb, frmlen);
2141 2142 2143
	return 0;
}

2144

2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = txctl->sta;
	struct ieee80211_vif *vif = info->control.vif;
	struct ath_softc *sc = hw->priv;
	struct ath_txq *txq = txctl->txq;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf;
	int q;
	int ret;

	ret = ath_tx_prepare(hw, skb, txctl);
	if (ret)
	    return ret;

	hdr = (struct ieee80211_hdr *) skb->data;
2165 2166 2167 2168 2169
	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

2170
	q = skb_get_queue_mapping(skb);
F
Felix Fietkau 已提交
2171 2172

	ath_txq_lock(sc, txq);
2173
	if (txq == sc->tx.txq_map[q] &&
2174 2175
	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
	    !txq->stopped) {
2176
		ieee80211_stop_queue(sc->hw, q);
2177
		txq->stopped = true;
2178 2179
	}

2180 2181 2182 2183
	if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
		ath_txq_unlock(sc, txq);
		txq = sc->tx.uapsdq;
		ath_txq_lock(sc, txq);
2184 2185
	} else if (txctl->an &&
		   ieee80211_is_data_present(hdr->frame_control)) {
2186
		tid = ath_get_skb_tid(sc, txctl->an, skb);
2187 2188 2189

		WARN_ON(tid->ac->txq != txctl->txq);

2190 2191 2192
		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
			tid->ac->clear_ps_filter = true;

2193
		/*
2194 2195
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
2196
		 */
2197 2198 2199 2200 2201 2202
		TX_STAT_INC(txq->axq_qnum, a_queued_sw);
		__skb_queue_tail(&tid->buf_q, skb);
		if (!txctl->an->sleeping)
			ath_tx_queue_tid(txq, tid);

		ath_txq_schedule(sc, txq);
2203 2204 2205
		goto out;
	}

2206
	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2207
	if (!bf) {
2208
		ath_txq_skb_done(sc, txq, skb);
2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
		if (txctl->paprd)
			dev_kfree_skb_any(skb);
		else
			ieee80211_free_txskb(sc->hw, skb);
		goto out;
	}

	bf->bf_state.bfs_paprd = txctl->paprd;

	if (txctl->paprd)
		bf->bf_state.bfs_paprd_timestamp = jiffies;

2221
	ath_set_rates(vif, sta, bf);
2222
	ath_tx_send_normal(sc, txq, tid, skb);
F
Felix Fietkau 已提交
2223

2224
out:
F
Felix Fietkau 已提交
2225
	ath_txq_unlock(sc, txq);
F
Felix Fietkau 已提交
2226

2227
	return 0;
2228 2229
}

2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		 struct sk_buff *skb)
{
	struct ath_softc *sc = hw->priv;
	struct ath_tx_control txctl = {
		.txq = sc->beacon.cabq
	};
	struct ath_tx_info info = {};
	struct ieee80211_hdr *hdr;
	struct ath_buf *bf_tail = NULL;
	struct ath_buf *bf;
	LIST_HEAD(bf_q);
	int duration = 0;
	int max_duration;

	max_duration =
		sc->cur_beacon_conf.beacon_interval * 1000 *
		sc->cur_beacon_conf.dtim_period / ATH_BCBUF;

	do {
		struct ath_frame_info *fi = get_frame_info(skb);

		if (ath_tx_prepare(hw, skb, &txctl))
			break;

		bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
		if (!bf)
			break;

		bf->bf_lastbf = bf;
		ath_set_rates(vif, NULL, bf);
S
Sujith Manoharan 已提交
2261
		ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
		duration += info.rates[0].PktDuration;
		if (bf_tail)
			bf_tail->bf_next = bf;

		list_add_tail(&bf->list, &bf_q);
		bf_tail = bf;
		skb = NULL;

		if (duration > max_duration)
			break;

		skb = ieee80211_get_buffered_bc(hw, vif);
	} while(skb);

	if (skb)
		ieee80211_free_txskb(hw, skb);

	if (list_empty(&bf_q))
		return;

	bf = list_first_entry(&bf_q, struct ath_buf, list);
	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;

	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			sizeof(*hdr), DMA_TO_DEVICE);
	}

	ath_txq_lock(sc, txctl.txq);
	ath_tx_fill_desc(sc, bf, txctl.txq, 0);
	ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
	TX_STAT_INC(txctl.txq->axq_qnum, queued);
	ath_txq_unlock(sc, txctl.txq);
}

S
Sujith 已提交
2298 2299 2300
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
2301

S
Sujith 已提交
2302
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2303
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
2304
{
S
Sujith 已提交
2305
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2306
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2307
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2308
	int padpos, padsize;
S
Sujith Manoharan 已提交
2309
	unsigned long flags;
S
Sujith 已提交
2310

2311
	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
2312

2313
	if (sc->sc_ah->caldata)
2314
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2315

2316
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
2317 2318
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
2319

2320
	padpos = ieee80211_hdrlen(hdr->frame_control);
2321 2322 2323 2324 2325 2326 2327 2328
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
2329
	}
S
Sujith 已提交
2330

S
Sujith Manoharan 已提交
2331
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
2332
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
2333
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2334
		ath_dbg(common, PS,
J
Joe Perches 已提交
2335
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
2336 2337 2338 2339
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
2340
	}
S
Sujith Manoharan 已提交
2341
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2342

2343
	__skb_queue_tail(&txq->complete_q, skb);
2344
	ath_txq_skb_done(sc, txq, skb);
S
Sujith 已提交
2345
}
2346

S
Sujith 已提交
2347
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2348
				struct ath_txq *txq, struct list_head *bf_q,
2349
				struct ath_tx_status *ts, int txok)
2350
{
S
Sujith 已提交
2351
	struct sk_buff *skb = bf->bf_mpdu;
2352
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
2353
	unsigned long flags;
2354
	int tx_flags = 0;
2355

2356
	if (!txok)
2357
		tx_flags |= ATH_TX_ERROR;
2358

2359 2360 2361
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2362
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2363
	bf->bf_buf_addr = 0;
L
Luis R. Rodriguez 已提交
2364 2365
	if (sc->tx99_state)
		goto skip_tx_complete;
2366 2367

	if (bf->bf_state.bfs_paprd) {
2368 2369 2370
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2371
			dev_kfree_skb_any(skb);
2372
		else
2373
			complete(&sc->paprd_complete);
2374
	} else {
2375
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2376
		ath_tx_complete(sc, skb, tx_flags, txq);
2377
	}
L
Luis R. Rodriguez 已提交
2378
skip_tx_complete:
2379 2380 2381 2382
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2383 2384 2385 2386 2387 2388 2389

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2390 2391
}

F
Felix Fietkau 已提交
2392 2393
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2394
			     int txok)
2395
{
S
Sujith 已提交
2396
	struct sk_buff *skb = bf->bf_mpdu;
2397
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2398
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2399
	struct ieee80211_hw *hw = sc->hw;
2400
	struct ath_hw *ah = sc->sc_ah;
2401
	u8 i, tx_rateindex;
2402

S
Sujith 已提交
2403
	if (txok)
2404
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2405

2406
	tx_rateindex = ts->ts_rateindex;
2407 2408
	WARN_ON(tx_rateindex >= hw->max_rates);

2409
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2410
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2411

2412
		BUG_ON(nbad > nframes);
2413
	}
2414 2415
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2416

2417
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2418
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2431 2432 2433
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2434
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2435 2436
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2437
	}
2438

2439
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2440
		tx_info->status.rates[i].count = 0;
2441 2442
		tx_info->status.rates[i].idx = -1;
	}
2443

2444
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2445 2446
}

S
Sujith 已提交
2447
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2448
{
2449
	struct ath_hw *ah = sc->sc_ah;
2450
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2451
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2452
	struct list_head bf_head;
S
Sujith 已提交
2453
	struct ath_desc *ds;
2454
	struct ath_tx_status ts;
S
Sujith 已提交
2455
	int status;
2456

2457
	ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
J
Joe Perches 已提交
2458 2459
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2460

F
Felix Fietkau 已提交
2461
	ath_txq_lock(sc, txq);
2462
	for (;;) {
2463
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2464 2465
			break;

2466 2467
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2468
			ath_txq_schedule(sc, txq);
2469 2470 2471 2472
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2473 2474 2475 2476 2477 2478 2479 2480 2481
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
2482
		if (bf->bf_state.stale) {
S
Sujith 已提交
2483
			bf_held = bf;
2484
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2485
				break;
2486 2487 2488

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2489 2490 2491
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2492
		ds = lastbf->bf_desc;
2493

2494 2495
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2496
		if (status == -EINPROGRESS)
S
Sujith 已提交
2497
			break;
2498

2499
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2500

S
Sujith 已提交
2501 2502 2503 2504 2505
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
2506
		lastbf->bf_state.stale = true;
S
Sujith 已提交
2507 2508 2509 2510
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2511

2512
		if (bf_held) {
2513 2514
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2515
		}
2516

2517
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2518
	}
F
Felix Fietkau 已提交
2519
	ath_txq_unlock_complete(sc, txq);
2520 2521
}

S
Sujith 已提交
2522
void ath_tx_tasklet(struct ath_softc *sc)
2523
{
2524 2525
	struct ath_hw *ah = sc->sc_ah;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
S
Sujith 已提交
2526
	int i;
2527

S
Sujith 已提交
2528 2529 2530
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2531 2532 2533
	}
}

2534 2535
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2536
	struct ath_tx_status ts;
2537 2538 2539 2540 2541
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
2542
	struct list_head *fifo_list;
2543 2544 2545
	int status;

	for (;;) {
2546
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2547 2548
			break;

2549
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2550 2551 2552
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
2553
			ath_dbg(common, XMIT, "Error processing tx status\n");
2554 2555 2556
			break;
		}

2557 2558 2559 2560
		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) {
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2561 2562

			ath9k_csa_is_finished(sc);
2563
			continue;
2564
		}
2565

2566
		txq = &sc->tx.txq[ts.qid];
2567

F
Felix Fietkau 已提交
2568
		ath_txq_lock(sc, txq);
2569

2570 2571
		TX_STAT_INC(txq->axq_qnum, txprocdesc);

2572 2573
		fifo_list = &txq->txq_fifo[txq->txq_tailidx];
		if (list_empty(fifo_list)) {
F
Felix Fietkau 已提交
2574
			ath_txq_unlock(sc, txq);
2575 2576 2577
			return;
		}

2578
		bf = list_first_entry(fifo_list, struct ath_buf, list);
2579
		if (bf->bf_state.stale) {
2580 2581 2582 2583 2584
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

2585 2586 2587
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
2588 2589
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
2590
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2591

2592 2593
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2594

2595 2596 2597 2598 2599
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
2600
		} else {
2601
			lastbf->bf_state.stale = true;
2602 2603 2604
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
2605
		}
2606

2607
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
F
Felix Fietkau 已提交
2608
		ath_txq_unlock_complete(sc, txq);
2609 2610 2611
	}
}

S
Sujith 已提交
2612 2613 2614
/*****************/
/* Init, Cleanup */
/*****************/
2615

2616 2617 2618 2619 2620 2621
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
2622 2623
	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
					  &dd->dd_desc_paddr, GFP_KERNEL);
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

S
Sujith 已提交
2643
int ath_tx_init(struct ath_softc *sc, int nbufs)
2644
{
2645
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2646
	int error = 0;
2647

2648
	spin_lock_init(&sc->tx.txbuflock);
2649

2650
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2651
				  "tx", nbufs, 1, 1);
2652
	if (error != 0) {
2653 2654
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2655
		return error;
2656
	}
2657

2658
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2659
				  "beacon", ATH_BCBUF, 1, 1);
2660
	if (error != 0) {
2661 2662
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2663
		return error;
2664
	}
2665

2666 2667
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2668
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2669
		error = ath_tx_edma_init(sc);
2670

S
Sujith 已提交
2671
	return error;
2672 2673 2674 2675
}

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2676 2677 2678
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2679

2680
	for (tidno = 0, tid = &an->tid[tidno];
2681
	     tidno < IEEE80211_NUM_TIDS;
2682 2683 2684 2685 2686 2687 2688
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2689
		tid->paused    = false;
2690
		tid->active	   = false;
2691
		__skb_queue_head_init(&tid->buf_q);
2692
		__skb_queue_head_init(&tid->retry_q);
2693
		acno = TID_TO_WME_AC(tidno);
2694
		tid->ac = &an->ac[acno];
2695
	}
2696

2697
	for (acno = 0, ac = &an->ac[acno];
2698
	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2699
		ac->sched    = false;
2700
		ac->clear_ps_filter = true;
2701
		ac->txq = sc->tx.txq_map[acno];
2702
		INIT_LIST_HEAD(&ac->tid_q);
2703 2704 2705
	}
}

S
Sujith 已提交
2706
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2707
{
2708 2709
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2710
	struct ath_txq *txq;
2711
	int tidno;
S
Sujith 已提交
2712

2713
	for (tidno = 0, tid = &an->tid[tidno];
2714
	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2715

2716
		ac = tid->ac;
2717
		txq = ac->txq;
2718

F
Felix Fietkau 已提交
2719
		ath_txq_lock(sc, txq);
2720 2721 2722 2723 2724 2725 2726 2727 2728

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2729
		}
2730 2731

		ath_tid_drain(sc, txq, tid);
2732
		tid->active = false;
2733

F
Felix Fietkau 已提交
2734
		ath_txq_unlock(sc, txq);
2735 2736
	}
}
L
Luis R. Rodriguez 已提交
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779

int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
		    struct ath_tx_control *txctl)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ath_frame_info *fi = get_frame_info(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	int padpos, padsize;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;

	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize) {
			ath_dbg(common, XMIT,
				"tx99 padding failed\n");
		return -EINVAL;
		}

		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
	}

	fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->framelen = skb->len + FCS_LEN;
	fi->keytype = ATH9K_KEY_TYPE_CLEAR;

	bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
	if (!bf) {
		ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
		return -EINVAL;
	}

	ath_set_rates(sc->tx99_vif, NULL, bf);

	ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
	ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

	ath_tx_send_normal(sc, txctl->txq, NULL, skb);

	return 0;
}