xmit.c 63.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

S
Sujith 已提交
17
#include "ath9k.h"
18
#include "ar9003_mac.h"
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


35
static u16 bits_per_symbol[][2] = {
36 37 38 39 40 41 42 43 44 45 46 47 48
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
49 50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
			       struct ath_atx_tid *tid,
51
			       struct list_head *bf_head);
S
Sujith 已提交
52
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
53 54
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar);
55
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
S
Sujith 已提交
56
			     struct list_head *head);
57
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
58
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
59
			     int nframes, int nbad, int txok, bool update_rc);
60 61
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
62

63
enum {
64 65
	MCS_HT20,
	MCS_HT20_SGI,
66 67 68 69
	MCS_HT40,
	MCS_HT40_SGI,
};

70 71 72 73 74 75 76 77 78 79 80 81
static int ath_max_4ms_framelen[4][32] = {
	[MCS_HT20] = {
		3212,  6432,  9648,  12864,  19300,  25736,  28952,  32172,
		6424,  12852, 19280, 25708,  38568,  51424,  57852,  64280,
		9628,  19260, 28896, 38528,  57792,  65532,  65532,  65532,
		12828, 25656, 38488, 51320,  65532,  65532,  65532,  65532,
	},
	[MCS_HT20_SGI] = {
		3572,  7144,  10720,  14296,  21444,  28596,  32172,  35744,
		7140,  14284, 21428,  28568,  42856,  57144,  64288,  65532,
		10700, 21408, 32112,  42816,  64228,  65532,  65532,  65532,
		14256, 28516, 42780,  57040,  65532,  65532,  65532,  65532,
82 83
	},
	[MCS_HT40] = {
84 85 86 87
		6680,  13360,  20044,  26724,  40092,  53456,  60140,  65532,
		13348, 26700,  40052,  53400,  65532,  65532,  65532,  65532,
		20004, 40008,  60016,  65532,  65532,  65532,  65532,  65532,
		26644, 53292,  65532,  65532,  65532,  65532,  65532,  65532,
88 89
	},
	[MCS_HT40_SGI] = {
90 91 92 93
		7420,  14844,  22272,  29696,  44544,  59396,  65532,  65532,
		14832, 29668,  44504,  59340,  65532,  65532,  65532,  65532,
		22232, 44464,  65532,  65532,  65532,  65532,  65532,  65532,
		29616, 59232,  65532,  65532,  65532,  65532,  65532,  65532,
94 95 96
	}
};

S
Sujith 已提交
97 98 99
/*********************/
/* Aggregation logic */
/*********************/
100

S
Sujith 已提交
101
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
102
{
S
Sujith 已提交
103
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
104

S
Sujith 已提交
105 106
	if (tid->paused)
		return;
S
Sujith 已提交
107

S
Sujith 已提交
108 109
	if (tid->sched)
		return;
S
Sujith 已提交
110

S
Sujith 已提交
111 112
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
113

S
Sujith 已提交
114 115
	if (ac->sched)
		return;
116

S
Sujith 已提交
117 118 119
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
120

S
Sujith 已提交
121
static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
122
{
123
	struct ath_txq *txq = tid->ac->txq;
124

125
	WARN_ON(!tid->paused);
126

127 128
	spin_lock_bh(&txq->axq_lock);
	tid->paused = false;
129

S
Sujith 已提交
130 131
	if (list_empty(&tid->buf_q))
		goto unlock;
132

S
Sujith 已提交
133 134 135 136
	ath_tx_queue_tid(txq, tid);
	ath_txq_schedule(sc, txq);
unlock:
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
137
}
138

139
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
140 141
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
142 143 144
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
145 146
}

S
Sujith 已提交
147
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
148
{
149
	struct ath_txq *txq = tid->ac->txq;
S
Sujith 已提交
150 151
	struct ath_buf *bf;
	struct list_head bf_head;
152
	struct ath_tx_status ts;
153
	struct ath_frame_info *fi;
154

155
	INIT_LIST_HEAD(&bf_head);
156

157
	memset(&ts, 0, sizeof(ts));
158
	spin_lock_bh(&txq->axq_lock);
159

S
Sujith 已提交
160 161
	while (!list_empty(&tid->buf_q)) {
		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
S
Sujith 已提交
162
		list_move_tail(&bf->list, &bf_head);
163

164
		spin_unlock_bh(&txq->axq_lock);
165 166 167
		fi = get_frame_info(bf->bf_mpdu);
		if (fi->retries) {
			ath_tx_update_baw(sc, tid, fi->seqno);
168 169
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
		} else {
170
			ath_tx_send_normal(sc, txq, NULL, &bf_head);
171
		}
172
		spin_lock_bh(&txq->axq_lock);
S
Sujith 已提交
173
	}
174

S
Sujith 已提交
175
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
176
}
177

S
Sujith 已提交
178 179
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
180
{
S
Sujith 已提交
181
	int index, cindex;
182

S
Sujith 已提交
183 184
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
185

186
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
187

188
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
189 190 191
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
	}
S
Sujith 已提交
192
}
193

S
Sujith 已提交
194
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
195
			     u16 seqno)
S
Sujith 已提交
196
{
S
Sujith 已提交
197
	int index, cindex;
S
Sujith 已提交
198

199
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
200
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
201
	__set_bit(cindex, tid->tx_buf);
202

S
Sujith 已提交
203 204 205 206
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
207 208 209 210
	}
}

/*
S
Sujith 已提交
211 212 213 214
 * TODO: For frame(s) that are in the retry state, we will reuse the
 * sequence number(s) without setting the retry bit. The
 * alternative is to give up on these and BAR the receiver's window
 * forward.
215
 */
S
Sujith 已提交
216 217
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
218 219

{
S
Sujith 已提交
220 221
	struct ath_buf *bf;
	struct list_head bf_head;
222
	struct ath_tx_status ts;
223
	struct ath_frame_info *fi;
224 225

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
226
	INIT_LIST_HEAD(&bf_head);
227

S
Sujith 已提交
228 229 230
	for (;;) {
		if (list_empty(&tid->buf_q))
			break;
231

S
Sujith 已提交
232 233
		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
		list_move_tail(&bf->list, &bf_head);
234

235 236 237
		fi = get_frame_info(bf->bf_mpdu);
		if (fi->retries)
			ath_tx_update_baw(sc, tid, fi->seqno);
238

S
Sujith 已提交
239
		spin_unlock(&txq->axq_lock);
240
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
S
Sujith 已提交
241 242
		spin_lock(&txq->axq_lock);
	}
243

S
Sujith 已提交
244 245
	tid->seq_next = tid->seq_start;
	tid->baw_tail = tid->baw_head;
246 247
}

S
Sujith 已提交
248
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
249
			     struct sk_buff *skb)
250
{
251
	struct ath_frame_info *fi = get_frame_info(skb);
S
Sujith 已提交
252
	struct ieee80211_hdr *hdr;
253

S
Sujith 已提交
254
	TX_STAT_INC(txq->axq_qnum, a_retries);
255
	if (fi->retries++ > 0)
256
		return;
257

S
Sujith 已提交
258 259
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
260 261
}

262
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
263
{
264
	struct ath_buf *bf = NULL;
S
Sujith 已提交
265 266

	spin_lock_bh(&sc->tx.txbuflock);
267 268

	if (unlikely(list_empty(&sc->tx.txbuf))) {
269 270 271
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
272 273 274 275

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
276 277
	spin_unlock_bh(&sc->tx.txbuflock);

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
296 297
	ATH_TXBUF_RESET(tbf);

F
Felix Fietkau 已提交
298
	tbf->aphy = bf->aphy;
S
Sujith 已提交
299 300
	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
301
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
302 303 304 305 306
	tbf->bf_state = bf->bf_state;

	return tbf;
}

307 308 309 310
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
311
	struct ath_frame_info *fi;
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
327 328
		fi = get_frame_info(bf->bf_mpdu);
		ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
329 330 331 332 333 334 335 336 337 338

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
339 340
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
341
				 struct ath_tx_status *ts, int txok, bool retry)
342
{
S
Sujith 已提交
343 344
	struct ath_node *an = NULL;
	struct sk_buff *skb;
345
	struct ieee80211_sta *sta;
346
	struct ieee80211_hw *hw;
347
	struct ieee80211_hdr *hdr;
348
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
349
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
350
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
S
Sujith 已提交
351
	struct list_head bf_head, bf_pending;
352
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
353
	u32 ba[WME_BA_BMP_SIZE >> 5];
354 355
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
	bool rc_update = true;
356
	struct ieee80211_tx_rate rates[4];
357
	struct ath_frame_info *fi;
358
	int nframes;
359
	u8 tidno;
360

S
Sujith 已提交
361
	skb = bf->bf_mpdu;
362 363
	hdr = (struct ieee80211_hdr *)skb->data;

364
	tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
365
	hw = bf->aphy->hw;
366

367 368
	memcpy(rates, tx_info->control.rates, sizeof(rates));

369
	rcu_read_lock();
370

371
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
372 373
	if (!sta) {
		rcu_read_unlock();
374

375 376 377 378 379 380 381 382 383
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

			bf->bf_state.bf_type |= BUF_XRETRY;
			if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
			    !bf->bf_stale || bf_next != NULL)
				list_move_tail(&bf->list, &bf_head);

384
			ath_tx_rc_status(bf, ts, 1, 1, 0, false);
385 386 387 388 389
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				0, 0);

			bf = bf_next;
		}
390
		return;
391 392
	}

393
	an = (struct ath_node *)sta->drv_priv;
394 395
	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
	tid = ATH_AN_2_TID(an, tidno);
396

397 398 399 400 401
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
	 */
402
	if (tidno != ts->tid)
403 404
		txok = false;

S
Sujith 已提交
405
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
406
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
407

S
Sujith 已提交
408
	if (isaggr && txok) {
409 410 411
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
412
		} else {
S
Sujith 已提交
413 414 415 416 417 418 419
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
420
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
421
				needreset = 1;
S
Sujith 已提交
422
		}
423 424
	}

S
Sujith 已提交
425 426
	INIT_LIST_HEAD(&bf_pending);
	INIT_LIST_HEAD(&bf_head);
427

428
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
429
	while (bf) {
430
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
431
		bf_next = bf->bf_next;
432

433 434
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
435
		fi = get_frame_info(skb);
436

437
		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
S
Sujith 已提交
438 439
			/* transmit completion, subframe is
			 * acked by block ack */
440
			acked_cnt++;
S
Sujith 已提交
441 442
		} else if (!isaggr && txok) {
			/* transmit completion */
443
			acked_cnt++;
S
Sujith 已提交
444
		} else {
445
			if (!(tid->state & AGGR_CLEANUP) && retry) {
446 447
				if (fi->retries < ATH_MAX_SW_RETRIES) {
					ath_tx_set_retry(sc, txq, bf->bf_mpdu);
S
Sujith 已提交
448 449 450 451 452
					txpending = 1;
				} else {
					bf->bf_state.bf_type |= BUF_XRETRY;
					txfail = 1;
					sendbar = 1;
453
					txfail_cnt++;
S
Sujith 已提交
454 455 456 457 458 459 460 461 462
				}
			} else {
				/*
				 * cleanup in progress, just fail
				 * the un-acked sub-frames
				 */
				txfail = 1;
			}
		}
463

464 465
		if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
		    bf_next == NULL) {
466 467 468 469 470 471 472 473
			/*
			 * Make sure the last desc is reclaimed if it
			 * not a holding desc.
			 */
			if (!bf_last->bf_stale)
				list_move_tail(&bf->list, &bf_head);
			else
				INIT_LIST_HEAD(&bf_head);
S
Sujith 已提交
474
		} else {
475
			BUG_ON(list_empty(bf_q));
S
Sujith 已提交
476
			list_move_tail(&bf->list, &bf_head);
S
Sujith 已提交
477
		}
478

479
		if (!txpending || (tid->state & AGGR_CLEANUP)) {
S
Sujith 已提交
480 481 482 483 484
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
			spin_lock_bh(&txq->axq_lock);
485
			ath_tx_update_baw(sc, tid, fi->seqno);
S
Sujith 已提交
486
			spin_unlock_bh(&txq->axq_lock);
487

488
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
489
				memcpy(tx_info->control.rates, rates, sizeof(rates));
490
				ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
491 492
				rc_update = false;
			} else {
493
				ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
494 495
			}

496 497
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				!txfail, sendbar);
S
Sujith 已提交
498
		} else {
S
Sujith 已提交
499
			/* retry the un-acked ones */
500 501 502 503 504 505 506 507 508 509 510 511
			if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
				if (bf->bf_next == NULL && bf_last->bf_stale) {
					struct ath_buf *tbf;

					tbf = ath_clone_txbuf(sc, bf_last);
					/*
					 * Update tx baw and complete the
					 * frame with failed status if we
					 * run out of tx buf.
					 */
					if (!tbf) {
						spin_lock_bh(&txq->axq_lock);
512
						ath_tx_update_baw(sc, tid, fi->seqno);
513 514 515 516
						spin_unlock_bh(&txq->axq_lock);

						bf->bf_state.bf_type |=
							BUF_XRETRY;
517 518
						ath_tx_rc_status(bf, ts, nframes,
								nbad, 0, false);
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
						ath_tx_complete_buf(sc, bf, txq,
								    &bf_head,
								    ts, 0, 0);
						break;
					}

					ath9k_hw_cleartxdesc(sc->sc_ah,
							     tbf->bf_desc);
					list_add_tail(&tbf->list, &bf_head);
				} else {
					/*
					 * Clear descriptor status words for
					 * software retry
					 */
					ath9k_hw_cleartxdesc(sc->sc_ah,
							     bf->bf_desc);
535
				}
S
Sujith 已提交
536 537 538 539 540 541 542 543 544 545
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
			list_splice_tail_init(&bf_head, &bf_pending);
		}

		bf = bf_next;
546 547
	}

548 549 550 551 552 553 554 555
	/* prepend un-acked frames to the beginning of the pending frame queue */
	if (!list_empty(&bf_pending)) {
		spin_lock_bh(&txq->axq_lock);
		list_splice(&bf_pending, &tid->buf_q);
		ath_tx_queue_tid(txq, tid);
		spin_unlock_bh(&txq->axq_lock);
	}

S
Sujith 已提交
556
	if (tid->state & AGGR_CLEANUP) {
557 558
		ath_tx_flush_tid(sc, tid);

S
Sujith 已提交
559 560 561
		if (tid->baw_head == tid->baw_tail) {
			tid->state &= ~AGGR_ADDBA_COMPLETE;
			tid->state &= ~AGGR_CLEANUP;
S
Sujith 已提交
562
		}
S
Sujith 已提交
563
	}
564

565 566
	rcu_read_unlock();

S
Sujith 已提交
567 568 569
	if (needreset)
		ath_reset(sc, false);
}
570

S
Sujith 已提交
571 572
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
573
{
S
Sujith 已提交
574 575
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
576
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
577
	u32 max_4ms_framelen, frmlen;
578
	u16 aggr_limit, legacy = 0;
S
Sujith 已提交
579
	int i;
S
Sujith 已提交
580

S
Sujith 已提交
581
	skb = bf->bf_mpdu;
S
Sujith 已提交
582
	tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
583
	rates = tx_info->control.rates;
S
Sujith 已提交
584

S
Sujith 已提交
585 586 587 588 589 590
	/*
	 * Find the lowest frame length among the rate series that will have a
	 * 4ms transmit duration.
	 * TODO - TXOP limit needs to be considered.
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
591

S
Sujith 已提交
592 593
	for (i = 0; i < 4; i++) {
		if (rates[i].count) {
594 595
			int modeidx;
			if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
S
Sujith 已提交
596 597 598 599
				legacy = 1;
				break;
			}

600
			if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
601 602
				modeidx = MCS_HT40;
			else
603 604 605 606
				modeidx = MCS_HT20;

			if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
				modeidx++;
607 608

			frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
S
Sujith 已提交
609
			max_4ms_framelen = min(max_4ms_framelen, frmlen);
610 611
		}
	}
S
Sujith 已提交
612

613
	/*
S
Sujith 已提交
614 615 616
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
617
	 */
S
Sujith 已提交
618 619
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
620

621 622 623 624 625 626
	if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
		aggr_limit = min((max_4ms_framelen * 3) / 8,
				 (u32)ATH_AMPDU_LIMIT_MAX);
	else
		aggr_limit = min(max_4ms_framelen,
				 (u32)ATH_AMPDU_LIMIT_MAX);
627

S
Sujith 已提交
628 629 630 631
	/*
	 * h/w can accept aggregates upto 16 bit lengths (65535).
	 * The IE, however can hold upto 65536, which shows up here
	 * as zero. Ignore 65536 since we  are constrained by hw.
632
	 */
633 634
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
635

S
Sujith 已提交
636 637
	return aggr_limit;
}
638

S
Sujith 已提交
639
/*
S
Sujith 已提交
640
 * Returns the number of delimiters to be added to
S
Sujith 已提交
641 642 643 644 645 646 647
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
				  struct ath_buf *bf, u16 frmlen)
{
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
648
	u32 nsymbits, nsymbols;
S
Sujith 已提交
649
	u16 minlen;
650
	u8 flags, rix;
651
	int width, streams, half_gi, ndelim, mindelim;
652
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
653 654 655

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
656 657

	/*
S
Sujith 已提交
658 659 660 661
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
662
	 */
663
	if (fi->keyix != ATH9K_TXKEYIX_INVALID)
S
Sujith 已提交
664
		ndelim += ATH_AGGR_ENCRYPTDELIM;
665

S
Sujith 已提交
666 667 668 669 670
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
671
	 *
S
Sujith 已提交
672 673 674
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
675 676

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
677
		return ndelim;
678

S
Sujith 已提交
679 680 681 682
	rix = tx_info->control.rates[0].idx;
	flags = tx_info->control.rates[0].flags;
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
683

S
Sujith 已提交
684
	if (half_gi)
685
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
686
	else
687
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
688

S
Sujith 已提交
689 690
	if (nsymbols == 0)
		nsymbols = 1;
691

692 693
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
694
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
695

S
Sujith 已提交
696 697 698
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
699 700
	}

S
Sujith 已提交
701
	return ndelim;
702 703
}

S
Sujith 已提交
704
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
S
Sujith 已提交
705
					     struct ath_txq *txq,
S
Sujith 已提交
706
					     struct ath_atx_tid *tid,
707 708
					     struct list_head *bf_q,
					     int *aggr_len)
709
{
S
Sujith 已提交
710
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
S
Sujith 已提交
711 712
	struct ath_buf *bf, *bf_first, *bf_prev = NULL;
	int rl = 0, nframes = 0, ndelim, prev_al = 0;
S
Sujith 已提交
713 714 715
	u16 aggr_limit = 0, al = 0, bpad = 0,
		al_delta, h_baw = tid->baw_size / 2;
	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
716
	struct ieee80211_tx_info *tx_info;
717
	struct ath_frame_info *fi;
718

S
Sujith 已提交
719
	bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
720

S
Sujith 已提交
721 722
	do {
		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
723
		fi = get_frame_info(bf->bf_mpdu);
724

S
Sujith 已提交
725
		/* do not step over block-ack window */
726
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
S
Sujith 已提交
727 728 729
			status = ATH_AGGR_BAW_CLOSED;
			break;
		}
730

S
Sujith 已提交
731 732 733 734
		if (!rl) {
			aggr_limit = ath_lookup_rate(sc, bf, tid);
			rl = 1;
		}
735

S
Sujith 已提交
736
		/* do not exceed aggregation limit */
737
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
738

S
Sujith 已提交
739 740
		if (nframes &&
		    (aggr_limit < (al + bpad + al_delta + prev_al))) {
S
Sujith 已提交
741 742 743
			status = ATH_AGGR_LIMITED;
			break;
		}
744

745 746 747 748 749
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
			break;

S
Sujith 已提交
750 751
		/* do not exceed subframe limit */
		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
S
Sujith 已提交
752 753 754
			status = ATH_AGGR_LIMITED;
			break;
		}
S
Sujith 已提交
755
		nframes++;
756

S
Sujith 已提交
757
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
758
		al += bpad + al_delta;
759

S
Sujith 已提交
760 761 762 763
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
764
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
S
Sujith 已提交
765
		bpad = PADBYTES(al_delta) + (ndelim << 2);
766

S
Sujith 已提交
767
		bf->bf_next = NULL;
768
		ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
769

S
Sujith 已提交
770
		/* link buffers of this frame to the aggregate */
771 772
		if (!fi->retries)
			ath_tx_addto_baw(sc, tid, fi->seqno);
S
Sujith 已提交
773 774
		ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
		list_move_tail(&bf->list, bf_q);
S
Sujith 已提交
775 776
		if (bf_prev) {
			bf_prev->bf_next = bf;
777 778
			ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
					       bf->bf_daddr);
S
Sujith 已提交
779 780
		}
		bf_prev = bf;
S
Sujith 已提交
781

S
Sujith 已提交
782
	} while (!list_empty(&tid->buf_q));
783

784
	*aggr_len = al;
S
Sujith 已提交
785

S
Sujith 已提交
786 787 788
	return status;
#undef PADBYTES
}
789

S
Sujith 已提交
790 791 792
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid)
{
S
Sujith 已提交
793
	struct ath_buf *bf;
S
Sujith 已提交
794
	enum ATH_AGGR_STATUS status;
795
	struct ath_frame_info *fi;
S
Sujith 已提交
796
	struct list_head bf_q;
797
	int aggr_len;
798

S
Sujith 已提交
799 800 801
	do {
		if (list_empty(&tid->buf_q))
			return;
802

S
Sujith 已提交
803 804
		INIT_LIST_HEAD(&bf_q);

805
		status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
806 807

		/*
S
Sujith 已提交
808 809
		 * no frames picked up to be aggregated;
		 * block-ack window is not open.
810
		 */
S
Sujith 已提交
811 812
		if (list_empty(&bf_q))
			break;
813

S
Sujith 已提交
814
		bf = list_first_entry(&bf_q, struct ath_buf, list);
S
Sujith 已提交
815
		bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
816

S
Sujith 已提交
817
		/* if only one frame, send as non-aggregate */
818
		if (bf == bf->bf_lastbf) {
819 820
			fi = get_frame_info(bf->bf_mpdu);

S
Sujith 已提交
821
			bf->bf_state.bf_type &= ~BUF_AGGR;
S
Sujith 已提交
822
			ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
823
			ath_buf_set_rate(sc, bf, fi->framelen);
S
Sujith 已提交
824 825 826
			ath_tx_txqaddbuf(sc, txq, &bf_q);
			continue;
		}
827

S
Sujith 已提交
828
		/* setup first desc of aggregate */
S
Sujith 已提交
829
		bf->bf_state.bf_type |= BUF_AGGR;
830 831
		ath_buf_set_rate(sc, bf, aggr_len);
		ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
832

S
Sujith 已提交
833 834
		/* anchor last desc of aggregate */
		ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
835

S
Sujith 已提交
836
		ath_tx_txqaddbuf(sc, txq, &bf_q);
S
Sujith 已提交
837
		TX_STAT_INC(txq->axq_qnum, a_aggr);
838

839
	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
S
Sujith 已提交
840 841 842
		 status != ATH_AGGR_BAW_CLOSED);
}

843 844
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
845 846 847 848 849
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
850
	txtid = ATH_AN_2_TID(an, tid);
851 852 853 854

	if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
		return -EAGAIN;

S
Sujith 已提交
855
	txtid->state |= AGGR_ADDBA_PROGRESS;
856
	txtid->paused = true;
857
	*ssn = txtid->seq_start = txtid->seq_next;
858

859 860 861
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

862
	return 0;
S
Sujith 已提交
863
}
864

S
Sujith 已提交
865
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
866 867 868
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
869
	struct ath_txq *txq = txtid->ac->txq;
870

S
Sujith 已提交
871
	if (txtid->state & AGGR_CLEANUP)
S
Sujith 已提交
872
		return;
873

S
Sujith 已提交
874
	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
875
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
S
Sujith 已提交
876
		return;
S
Sujith 已提交
877
	}
878

S
Sujith 已提交
879
	spin_lock_bh(&txq->axq_lock);
880
	txtid->paused = true;
881

882 883 884 885 886 887 888
	/*
	 * If frames are still being transmitted for this TID, they will be
	 * cleaned up during tx completion. To prevent race conditions, this
	 * TID can only be reused after all in-progress subframes have been
	 * completed.
	 */
	if (txtid->baw_head != txtid->baw_tail)
S
Sujith 已提交
889
		txtid->state |= AGGR_CLEANUP;
890
	else
S
Sujith 已提交
891
		txtid->state &= ~AGGR_ADDBA_COMPLETE;
892 893 894
	spin_unlock_bh(&txq->axq_lock);

	ath_tx_flush_tid(sc, txtid);
S
Sujith 已提交
895
}
896

S
Sujith 已提交
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;

	if (sc->sc_flags & SC_OP_TXAGGR) {
		txtid = ATH_AN_2_TID(an, tid);
		txtid->baw_size =
			IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
		txtid->state |= AGGR_ADDBA_COMPLETE;
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
		ath_tx_resume_tid(sc, txtid);
	}
912 913
}

S
Sujith 已提交
914 915 916
/********************/
/* Queue Management */
/********************/
917

S
Sujith 已提交
918 919
static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
					  struct ath_txq *txq)
920
{
S
Sujith 已提交
921 922
	struct ath_atx_ac *ac, *ac_tmp;
	struct ath_atx_tid *tid, *tid_tmp;
923

S
Sujith 已提交
924 925 926 927 928 929 930 931
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		list_del(&ac->list);
		ac->sched = false;
		list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
			list_del(&tid->list);
			tid->sched = false;
			ath_tid_drain(sc, txq, tid);
		}
932 933 934
	}
}

S
Sujith 已提交
935
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
936
{
937
	struct ath_hw *ah = sc->sc_ah;
938
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
939
	struct ath9k_tx_queue_info qi;
940 941 942 943 944 945
	static const int subtype_txq_to_hwq[] = {
		[WME_AC_BE] = ATH_TXQ_AC_BE,
		[WME_AC_BK] = ATH_TXQ_AC_BK,
		[WME_AC_VI] = ATH_TXQ_AC_VI,
		[WME_AC_VO] = ATH_TXQ_AC_VO,
	};
946
	int axq_qnum, i;
947

S
Sujith 已提交
948
	memset(&qi, 0, sizeof(qi));
949
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
950 951 952 953
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
954 955

	/*
S
Sujith 已提交
956 957 958 959 960 961 962 963 964 965 966 967 968
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
969
	 */
970 971 972 973 974 975 976 977 978 979
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
				TXQ_FLAG_TXERRINT_ENABLE;
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
980 981
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
982
		/*
S
Sujith 已提交
983 984
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
985
		 */
S
Sujith 已提交
986
		return NULL;
987
	}
988
	if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
989
		ath_err(common, "qnum %u out of range, max %zu!\n",
990 991
			axq_qnum, ARRAY_SIZE(sc->tx.txq));
		ath9k_hw_releasetxqueue(ah, axq_qnum);
S
Sujith 已提交
992 993
		return NULL;
	}
994 995
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
996

997 998
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
999 1000 1001 1002 1003
		txq->axq_link = NULL;
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1004
		txq->axq_ampdu_depth = 0;
1005
		txq->axq_tx_inprogress = false;
1006
		sc->tx.txqsetup |= 1<<axq_qnum;
1007 1008 1009 1010 1011

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
		INIT_LIST_HEAD(&txq->txq_fifo_pending);
S
Sujith 已提交
1012
	}
1013
	return &sc->tx.txq[axq_qnum];
1014 1015
}

S
Sujith 已提交
1016 1017 1018
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1019
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
	int error = 0;
	struct ath9k_tx_queue_info qi;

	if (qnum == sc->beacon.beaconq) {
		/*
		 * XXX: for beacon queue, we just save the parameter.
		 * It will be picked up by ath_beaconq_config when
		 * it's necessary.
		 */
		sc->beacon.beacon_qi = *qinfo;
1030
		return 0;
S
Sujith 已提交
1031
	}
1032

1033
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1034 1035 1036 1037 1038 1039 1040 1041 1042

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1043 1044
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
	int qnum = sc->beacon.cabq->axq_qnum;
1057

S
Sujith 已提交
1058
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1059
	/*
S
Sujith 已提交
1060
	 * Ensure the readytime % is within the bounds.
1061
	 */
S
Sujith 已提交
1062 1063 1064 1065
	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1066

1067
	qi.tqi_readyTime = (sc->beacon_interval *
S
Sujith 已提交
1068
			    sc->config.cabqReadytime) / 100;
S
Sujith 已提交
1069 1070 1071
	ath_txq_update(sc, qnum, &qi);

	return 0;
1072 1073
}

1074 1075 1076 1077 1078 1079
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

S
Sujith 已提交
1080 1081 1082 1083 1084 1085 1086
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1087
{
S
Sujith 已提交
1088 1089
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1090 1091 1092
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
1093
	INIT_LIST_HEAD(&bf_head);
1094

S
Sujith 已提交
1095 1096
	for (;;) {
		spin_lock_bh(&txq->axq_lock);
1097

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
			if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
				txq->txq_headidx = txq->txq_tailidx = 0;
				spin_unlock_bh(&txq->axq_lock);
				break;
			} else {
				bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
						      struct ath_buf, list);
			}
		} else {
			if (list_empty(&txq->axq_q)) {
				txq->axq_link = NULL;
				spin_unlock_bh(&txq->axq_lock);
				break;
			}
			bf = list_first_entry(&txq->axq_q, struct ath_buf,
					      list);
1115

1116 1117 1118
			if (bf->bf_stale) {
				list_del(&bf->list);
				spin_unlock_bh(&txq->axq_lock);
1119

1120
				ath_tx_return_buffer(sc, bf);
1121 1122
				continue;
			}
S
Sujith 已提交
1123
		}
1124

S
Sujith 已提交
1125
		lastbf = bf->bf_lastbf;
1126

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
			list_cut_position(&bf_head,
					  &txq->txq_fifo[txq->txq_tailidx],
					  &lastbf->list);
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
		} else {
			/* remove ath_buf's of the same mpdu from txq */
			list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
		}

S
Sujith 已提交
1137
		txq->axq_depth--;
1138 1139
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
S
Sujith 已提交
1140 1141 1142
		spin_unlock_bh(&txq->axq_lock);

		if (bf_isampdu(bf))
1143 1144
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
					     retry_tx);
S
Sujith 已提交
1145
		else
1146
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1147 1148
	}

1149 1150 1151 1152
	spin_lock_bh(&txq->axq_lock);
	txq->axq_tx_inprogress = false;
	spin_unlock_bh(&txq->axq_lock);

1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		spin_lock_bh(&txq->axq_lock);
		while (!list_empty(&txq->txq_fifo_pending)) {
			bf = list_first_entry(&txq->txq_fifo_pending,
					      struct ath_buf, list);
			list_cut_position(&bf_head,
					  &txq->txq_fifo_pending,
					  &bf->bf_lastbf->list);
			spin_unlock_bh(&txq->axq_lock);

			if (bf_isampdu(bf))
				ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1165
						     &ts, 0, retry_tx);
1166 1167 1168 1169 1170 1171 1172
			else
				ath_tx_complete_buf(sc, bf, txq, &bf_head,
						    &ts, 0, 0);
			spin_lock_bh(&txq->axq_lock);
		}
		spin_unlock_bh(&txq->axq_lock);
	}
1173 1174 1175 1176 1177 1178 1179 1180 1181

	/* flush any pending frames if aggregation is enabled */
	if (sc->sc_flags & SC_OP_TXAGGR) {
		if (!retry_tx) {
			spin_lock_bh(&txq->axq_lock);
			ath_txq_drain_pending_buffers(sc, txq);
			spin_unlock_bh(&txq->axq_lock);
		}
	}
1182 1183
}

1184
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1185
{
1186
	struct ath_hw *ah = sc->sc_ah;
1187
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1188 1189 1190 1191
	struct ath_txq *txq;
	int i, npend = 0;

	if (sc->sc_flags & SC_OP_INVALID)
1192
		return true;
S
Sujith 已提交
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205

	/* Stop beacon queue */
	ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);

	/* Stop data queues */
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i)) {
			txq = &sc->tx.txq[i];
			ath9k_hw_stoptxdma(ah, txq->axq_qnum);
			npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
		}
	}

1206
	if (npend)
1207
		ath_err(common, "Failed to stop TX DMA!\n");
S
Sujith 已提交
1208 1209

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
		ath_draintxq(sc, txq, retry_tx);
S
Sujith 已提交
1221
	}
1222 1223

	return !npend;
S
Sujith 已提交
1224
}
1225

S
Sujith 已提交
1226
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1227
{
S
Sujith 已提交
1228 1229
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1230
}
1231

1232 1233 1234
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1235 1236
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1237 1238
	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
	struct ath_atx_tid *tid, *last_tid;
1239

1240 1241
	if (list_empty(&txq->axq_acq) ||
	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
S
Sujith 已提交
1242
		return;
1243

S
Sujith 已提交
1244
	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1245
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1246

1247 1248 1249 1250
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1251

1252 1253 1254 1255 1256
		while (!list_empty(&ac->tid_q)) {
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1257

1258 1259
			if (tid->paused)
				continue;
1260

1261
			ath_tx_sched_aggr(sc, txq, tid);
1262

1263 1264 1265 1266 1267 1268
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
			if (!list_empty(&tid->buf_q))
				ath_tx_queue_tid(txq, tid);
1269

1270 1271 1272 1273
			if (tid == last_tid ||
			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
				break;
		}
1274

1275 1276 1277 1278 1279
		if (!list_empty(&ac->tid_q)) {
			if (!ac->sched) {
				ac->sched = true;
				list_add_tail(&ac->list, &txq->axq_acq);
			}
1280
		}
1281 1282 1283 1284

		if (ac == last_ac ||
		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
			return;
S
Sujith 已提交
1285 1286
	}
}
1287

S
Sujith 已提交
1288 1289 1290 1291
/***********/
/* TX, DMA */
/***********/

1292
/*
S
Sujith 已提交
1293 1294
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1295
 */
S
Sujith 已提交
1296 1297
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
			     struct list_head *head)
1298
{
1299
	struct ath_hw *ah = sc->sc_ah;
1300
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
1301
	struct ath_buf *bf;
1302

S
Sujith 已提交
1303 1304 1305 1306
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1307

S
Sujith 已提交
1308 1309
	if (list_empty(head))
		return;
1310

S
Sujith 已提交
1311
	bf = list_first_entry(head, struct ath_buf, list);
1312

J
Joe Perches 已提交
1313 1314
	ath_dbg(common, ATH_DBG_QUEUE,
		"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1315

1316 1317 1318 1319 1320 1321
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
			list_splice_tail_init(head, &txq->txq_fifo_pending);
			return;
		}
		if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
J
Joe Perches 已提交
1322 1323 1324
			ath_dbg(common, ATH_DBG_XMIT,
				"Initializing tx fifo %d which is non-empty\n",
				txq->txq_headidx);
1325 1326 1327
		INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
		list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
F
Felix Fietkau 已提交
1328
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
S
Sujith 已提交
1329
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
J
Joe Perches 已提交
1330 1331
		ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
S
Sujith 已提交
1332
	} else {
1333 1334 1335
		list_splice_tail_init(head, &txq->axq_q);

		if (txq->axq_link == NULL) {
F
Felix Fietkau 已提交
1336
			TX_STAT_INC(txq->axq_qnum, puttxbuf);
1337
			ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
J
Joe Perches 已提交
1338 1339 1340
			ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
				txq->axq_qnum, ito64(bf->bf_daddr),
				bf->bf_desc);
1341 1342
		} else {
			*txq->axq_link = bf->bf_daddr;
J
Joe Perches 已提交
1343 1344 1345 1346
			ath_dbg(common, ATH_DBG_XMIT,
				"link[%u] (%p)=%llx (%p)\n",
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1347 1348 1349
		}
		ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
				       &txq->axq_link);
F
Felix Fietkau 已提交
1350
		TX_STAT_INC(txq->axq_qnum, txstart);
1351
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1352
	}
1353
	txq->axq_depth++;
1354 1355
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth++;
S
Sujith 已提交
1356
}
1357

S
Sujith 已提交
1358
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
1359
			      struct ath_buf *bf, struct ath_tx_control *txctl)
1360
{
1361
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
F
Felix Fietkau 已提交
1362
	struct list_head bf_head;
1363

S
Sujith 已提交
1364
	bf->bf_state.bf_type |= BUF_AMPDU;
1365

S
Sujith 已提交
1366 1367 1368 1369 1370 1371 1372 1373
	/*
	 * Do not queue to h/w when any of the following conditions is true:
	 * - there are pending frames in software queue
	 * - the TID is currently paused for ADDBA/BAR request
	 * - seqno is not within block-ack window
	 * - h/w queue depth exceeds low water mark
	 */
	if (!list_empty(&tid->buf_q) || tid->paused ||
1374
	    !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
1375
	    txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1376
		/*
S
Sujith 已提交
1377 1378
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
1379
		 */
1380
		TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
F
Felix Fietkau 已提交
1381
		list_add_tail(&bf->list, &tid->buf_q);
S
Sujith 已提交
1382 1383 1384 1385
		ath_tx_queue_tid(txctl->txq, tid);
		return;
	}

F
Felix Fietkau 已提交
1386 1387 1388
	INIT_LIST_HEAD(&bf_head);
	list_add(&bf->list, &bf_head);

S
Sujith 已提交
1389
	/* Add sub-frame to BAW */
1390 1391
	if (!fi->retries)
		ath_tx_addto_baw(sc, tid, fi->seqno);
S
Sujith 已提交
1392 1393

	/* Queue to h/w without aggregation */
1394
	TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
S
Sujith 已提交
1395
	bf->bf_lastbf = bf;
1396
	ath_buf_set_rate(sc, bf, fi->framelen);
F
Felix Fietkau 已提交
1397
	ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
S
Sujith 已提交
1398 1399
}

F
Felix Fietkau 已提交
1400 1401
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
			       struct ath_atx_tid *tid,
1402
			       struct list_head *bf_head)
S
Sujith 已提交
1403
{
1404
	struct ath_frame_info *fi;
S
Sujith 已提交
1405 1406 1407 1408 1409 1410
	struct ath_buf *bf;

	bf = list_first_entry(bf_head, struct ath_buf, list);
	bf->bf_state.bf_type &= ~BUF_AMPDU;

	/* update starting sequence number for subsequent ADDBA request */
F
Felix Fietkau 已提交
1411 1412
	if (tid)
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
S
Sujith 已提交
1413

S
Sujith 已提交
1414
	bf->bf_lastbf = bf;
1415 1416
	fi = get_frame_info(bf->bf_mpdu);
	ath_buf_set_rate(sc, bf, fi->framelen);
S
Sujith 已提交
1417
	ath_tx_txqaddbuf(sc, txq, bf_head);
S
Sujith 已提交
1418
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
}

static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;

	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
}

1444 1445
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
			     int framelen)
S
Sujith 已提交
1446
{
1447 1448
	struct ath_wiphy *aphy = hw->priv;
	struct ath_softc *sc = aphy->sc;
S
Sujith 已提交
1449
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1450 1451
	struct ieee80211_sta *sta = tx_info->control.sta;
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
S
Sujith 已提交
1452
	struct ieee80211_hdr *hdr;
1453
	struct ath_frame_info *fi = get_frame_info(skb);
S
Sujith 已提交
1454 1455
	struct ath_node *an;
	struct ath_atx_tid *tid;
1456 1457
	enum ath9k_key_type keytype;
	u16 seqno = 0;
1458
	u8 tidno;
S
Sujith 已提交
1459

1460
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1461 1462

	hdr = (struct ieee80211_hdr *)skb->data;
1463 1464
	if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
		conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
S
Sujith 已提交
1465

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
		an = (struct ath_node *) sta->drv_priv;
		tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;

		/*
		 * Override seqno set by upper layer with the one
		 * in tx aggregation state.
		 */
		tid = ATH_AN_2_TID(an, tidno);
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
		INCR(tid->seq_next, IEEE80211_SEQ_MAX);
	}

	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
	fi->seqno = seqno;
S
Sujith 已提交
1487 1488
}

F
Felix Fietkau 已提交
1489
static int setup_tx_flags(struct sk_buff *skb)
S
Sujith 已提交
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	int flags = 0;

	flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
	flags |= ATH9K_TXDESC_INTREQ;

	if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= ATH9K_TXDESC_NOACK;

F
Felix Fietkau 已提交
1500
	if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
L
Luis R. Rodriguez 已提交
1501 1502
		flags |= ATH9K_TXDESC_LDPC;

S
Sujith 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511
	return flags;
}

/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
1512
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
S
Sujith 已提交
1513 1514 1515
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
1516
	int streams;
S
Sujith 已提交
1517 1518

	/* find number of symbols: PLCP + data */
1519
	streams = HT_RC_2_STREAMS(rix);
S
Sujith 已提交
1520
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1521
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
	if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
			(curchan->channelFlags & CHANNEL_5GHZ) &&
			(chainmask == 0x7) && (rate < 0x90))
		return 0x3;
	else
		return chainmask;
}

1547
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
S
Sujith 已提交
1548
{
1549
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1550 1551 1552 1553
	struct ath9k_11n_rate_series series[4];
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
1554
	const struct ieee80211_rate *rate;
1555
	struct ieee80211_hdr *hdr;
1556 1557
	int i, flags = 0;
	u8 rix = 0, ctsrate = 0;
1558
	bool is_pspoll;
S
Sujith 已提交
1559 1560 1561

	memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);

S
Sujith 已提交
1562
	skb = bf->bf_mpdu;
S
Sujith 已提交
1563 1564
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;
1565 1566
	hdr = (struct ieee80211_hdr *)skb->data;
	is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
S
Sujith 已提交
1567 1568

	/*
1569 1570 1571
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
S
Sujith 已提交
1572
	 */
1573 1574
	rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
	ctsrate = rate->hw_value;
1575
	if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
1576
		ctsrate |= rate->hw_value_short;
S
Sujith 已提交
1577 1578

	for (i = 0; i < 4; i++) {
1579 1580 1581
		bool is_40, is_sgi, is_sp;
		int phy;

S
Sujith 已提交
1582 1583 1584 1585 1586 1587
		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
		series[i].Tries = rates[i].count;

F
Felix Fietkau 已提交
1588 1589
		if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
		    (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
1590
			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
F
Felix Fietkau 已提交
1591 1592 1593 1594 1595 1596
			flags |= ATH9K_TXDESC_RTSENA;
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			flags |= ATH9K_TXDESC_CTSENA;
		}

1597 1598 1599 1600
		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			series[i].RateFlags |= ATH9K_RATESERIES_2040;
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
S
Sujith 已提交
1601

1602 1603 1604 1605 1606 1607 1608
		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
			series[i].Rate = rix | 0x80;
1609 1610
			series[i].ChSel = ath_txchainmask_reduction(sc,
					common->tx_chainmask, series[i].Rate);
1611
			series[i].PktDuration = ath_pkt_duration(sc, rix, len,
1612
				 is_40, is_sgi, is_sp);
1613 1614
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
				series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1615 1616 1617
			continue;
		}

1618
		/* legacy rates */
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
		series[i].Rate = rate->hw_value;
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
				series[i].Rate |= rate->hw_value_short;
		} else {
			is_sp = false;
		}

1634 1635 1636 1637 1638 1639
		if (bf->bf_state.bfs_paprd)
			series[i].ChSel = common->tx_chainmask;
		else
			series[i].ChSel = ath_txchainmask_reduction(sc,
					common->tx_chainmask, series[i].Rate);

1640
		series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1641
			phy, rate->bitrate * 100, len, rix, is_sp);
1642 1643
	}

F
Felix Fietkau 已提交
1644
	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1645
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
F
Felix Fietkau 已提交
1646 1647 1648 1649 1650 1651
		flags &= ~ATH9K_TXDESC_RTSENA;

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
	if (flags & ATH9K_TXDESC_RTSENA)
		flags &= ~ATH9K_TXDESC_CTSENA;

S
Sujith 已提交
1652
	/* set dur_update_en for l-sig computation except for PS-Poll frames */
1653 1654
	ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
				     bf->bf_lastbf->bf_desc,
1655
				     !is_pspoll, ctsrate,
1656
				     0, series, 4, flags);
1657

S
Sujith 已提交
1658
	if (sc->config.ath_aggr_prot && flags)
1659
		ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1660 1661
}

F
Felix Fietkau 已提交
1662
static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
F
Felix Fietkau 已提交
1663
					   struct ath_txq *txq,
1664
					   struct sk_buff *skb)
1665
{
1666 1667
	struct ath_wiphy *aphy = hw->priv;
	struct ath_softc *sc = aphy->sc;
F
Felix Fietkau 已提交
1668
	struct ath_hw *ah = sc->sc_ah;
F
Felix Fietkau 已提交
1669
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1670
	struct ath_frame_info *fi = get_frame_info(skb);
F
Felix Fietkau 已提交
1671
	struct ath_buf *bf;
F
Felix Fietkau 已提交
1672 1673
	struct ath_desc *ds;
	int frm_type;
F
Felix Fietkau 已提交
1674 1675 1676

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
J
Joe Perches 已提交
1677
		ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
1678 1679
		return NULL;
	}
1680

S
Sujith 已提交
1681
	ATH_TXBUF_RESET(bf);
1682

F
Felix Fietkau 已提交
1683
	bf->aphy = aphy;
F
Felix Fietkau 已提交
1684
	bf->bf_flags = setup_tx_flags(skb);
1685
	bf->bf_mpdu = skb;
1686

B
Ben Greear 已提交
1687 1688 1689
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1690
		bf->bf_mpdu = NULL;
1691
		bf->bf_buf_addr = 0;
1692 1693
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
1694 1695
		ath_tx_return_buffer(sc, bf);
		return NULL;
1696 1697
	}

S
Sujith 已提交
1698
	frm_type = get_hw_packet_type(skb);
1699 1700

	ds = bf->bf_desc;
1701
	ath9k_hw_set_desc_link(ah, ds, 0);
1702

1703 1704
	ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
			       fi->keyix, fi->keytype, bf->bf_flags);
S
Sujith 已提交
1705 1706

	ath9k_hw_filltxdesc(ah, ds,
1707 1708 1709
			    skb->len,	/* segment length */
			    true,	/* first segment */
			    true,	/* last segment */
1710
			    ds,		/* first descriptor */
1711
			    bf->bf_buf_addr,
F
Felix Fietkau 已提交
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
			    txq->axq_qnum);


	return bf;
}

/* FIXME: tx power */
static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_control *txctl)
{
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct list_head bf_head;
1726
	struct ath_atx_tid *tid = NULL;
F
Felix Fietkau 已提交
1727
	u8 tidno;
1728

S
Sujith 已提交
1729
	spin_lock_bh(&txctl->txq->axq_lock);
1730

1731
	if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
1732 1733
		tidno = ieee80211_get_qos_ctl(hdr)[0] &
			IEEE80211_QOS_CTL_TID_MASK;
1734
		tid = ATH_AN_2_TID(txctl->an, tidno);
1735

1736
		WARN_ON(tid->ac->txq != txctl->txq);
1737 1738 1739
	}

	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
F
Felix Fietkau 已提交
1740 1741 1742 1743 1744
		/*
		 * Try aggregation if it's a unicast data frame
		 * and the destination is HT capable.
		 */
		ath_tx_send_ampdu(sc, tid, bf, txctl);
1745
	} else {
F
Felix Fietkau 已提交
1746 1747 1748
		INIT_LIST_HEAD(&bf_head);
		list_add_tail(&bf->list, &bf_head);

1749
		bf->bf_state.bfs_ftype = txctl->frame_type;
F
Felix Fietkau 已提交
1750 1751
		bf->bf_state.bfs_paprd = txctl->paprd;

1752
		if (bf->bf_state.bfs_paprd)
F
Felix Fietkau 已提交
1753 1754
			ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
						   bf->bf_state.bfs_paprd);
1755

1756
		ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1757
	}
S
Sujith 已提交
1758 1759

	spin_unlock_bh(&txctl->txq->axq_lock);
1760 1761
}

1762
/* Upon failure caller should free skb */
1763
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
S
Sujith 已提交
1764
		 struct ath_tx_control *txctl)
1765
{
1766 1767
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1768
	struct ieee80211_sta *sta = info->control.sta;
1769 1770
	struct ath_wiphy *aphy = hw->priv;
	struct ath_softc *sc = aphy->sc;
1771
	struct ath_txq *txq = txctl->txq;
S
Sujith 已提交
1772
	struct ath_buf *bf;
1773
	int padpos, padsize;
F
Felix Fietkau 已提交
1774
	int frmlen = skb->len + FCS_LEN;
1775
	int q;
1776

1777 1778 1779 1780
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;

F
Felix Fietkau 已提交
1781 1782 1783
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

1784
	/*
S
Sujith 已提交
1785 1786 1787
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
1788
	 */
S
Sujith 已提交
1789 1790 1791 1792 1793
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1794 1795
	}

S
Sujith 已提交
1796
	/* Add the padding after the header if this is not already done */
1797 1798
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
1799 1800 1801 1802
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;

S
Sujith 已提交
1803
		skb_push(skb, padsize);
1804
		memmove(skb->data, skb->data + padsize, padpos);
1805 1806
	}

1807 1808 1809 1810 1811 1812 1813 1814
	setup_frame_info(hw, skb, frmlen);

	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

	bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
1815 1816
	if (unlikely(!bf))
		return -ENOMEM;
1817

1818 1819 1820 1821 1822 1823
	q = skb_get_queue_mapping(skb);
	spin_lock_bh(&txq->axq_lock);
	if (txq == sc->tx.txq_map[q] &&
	    ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
		ath_mac80211_stop_queue(sc, q);
		txq->stopped = 1;
1824
	}
1825
	spin_unlock_bh(&txq->axq_lock);
1826

1827 1828 1829
	ath_tx_start_dma(sc, bf, txctl);

	return 0;
1830 1831
}

S
Sujith 已提交
1832 1833 1834
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
1835

S
Sujith 已提交
1836
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1837
			    struct ath_wiphy *aphy, int tx_flags, int ftype,
1838
			    struct ath_txq *txq)
S
Sujith 已提交
1839
{
S
Sujith 已提交
1840 1841
	struct ieee80211_hw *hw = sc->hw;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1842
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1843
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1844
	int q, padpos, padsize;
S
Sujith 已提交
1845

J
Joe Perches 已提交
1846
	ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
1847

F
Felix Fietkau 已提交
1848 1849
	if (aphy)
		hw = aphy->hw;
S
Sujith 已提交
1850

1851
	if (tx_flags & ATH_TX_BAR)
S
Sujith 已提交
1852 1853
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;

1854
	if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
S
Sujith 已提交
1855 1856
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
1857 1858
	}

1859 1860 1861
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
S
Sujith 已提交
1862 1863 1864 1865
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
1866
		memmove(skb->data + padsize, skb->data, padpos);
S
Sujith 已提交
1867 1868
		skb_pull(skb, padsize);
	}
S
Sujith 已提交
1869

S
Sujith 已提交
1870 1871
	if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
J
Joe Perches 已提交
1872 1873
		ath_dbg(common, ATH_DBG_PS,
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
1874 1875 1876 1877
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
1878 1879
	}

1880 1881
	if (unlikely(ftype))
		ath9k_tx_status(hw, skb, ftype);
1882 1883
	else {
		q = skb_get_queue_mapping(skb);
1884 1885 1886 1887
		if (txq == sc->tx.txq_map[q]) {
			spin_lock_bh(&txq->axq_lock);
			if (WARN_ON(--txq->pending_frames < 0))
				txq->pending_frames = 0;
1888 1889 1890 1891 1892

			if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
				if (ath_mac80211_start_queue(sc, q))
					txq->stopped = 0;
			}
1893 1894
			spin_unlock_bh(&txq->axq_lock);
		}
1895

F
Felix Fietkau 已提交
1896
		ieee80211_tx_status(hw, skb);
1897
	}
S
Sujith 已提交
1898
}
1899

S
Sujith 已提交
1900
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1901 1902
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar)
1903
{
S
Sujith 已提交
1904 1905
	struct sk_buff *skb = bf->bf_mpdu;
	unsigned long flags;
1906
	int tx_flags = 0;
1907

S
Sujith 已提交
1908
	if (sendbar)
1909
		tx_flags = ATH_TX_BAR;
1910

S
Sujith 已提交
1911
	if (!txok) {
1912
		tx_flags |= ATH_TX_ERROR;
1913

S
Sujith 已提交
1914
		if (bf_isxretried(bf))
1915
			tx_flags |= ATH_TX_XRETRY;
1916 1917
	}

B
Ben Greear 已提交
1918
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
1919
	bf->bf_buf_addr = 0;
1920 1921

	if (bf->bf_state.bfs_paprd) {
1922
		if (!sc->paprd_pending)
1923
			dev_kfree_skb_any(skb);
1924
		else
1925
			complete(&sc->paprd_complete);
1926
	} else {
1927
		ath_debug_stat_tx(sc, bf, ts);
1928 1929
		ath_tx_complete(sc, skb, bf->aphy, tx_flags,
				bf->bf_state.bfs_ftype, txq);
1930
	}
1931 1932 1933 1934
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
1935 1936 1937 1938 1939 1940 1941

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1942 1943
}

1944
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1945
			     int nframes, int nbad, int txok, bool update_rc)
1946
{
S
Sujith 已提交
1947
	struct sk_buff *skb = bf->bf_mpdu;
1948
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
1949
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
1950
	struct ieee80211_hw *hw = bf->aphy->hw;
1951 1952
	struct ath_softc *sc = bf->aphy->sc;
	struct ath_hw *ah = sc->sc_ah;
1953
	u8 i, tx_rateindex;
1954

S
Sujith 已提交
1955
	if (txok)
1956
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
1957

1958
	tx_rateindex = ts->ts_rateindex;
1959 1960
	WARN_ON(tx_rateindex >= hw->max_rates);

1961
	if (ts->ts_status & ATH9K_TXERR_FILT)
S
Sujith 已提交
1962
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1963
	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
1964
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1965

1966
		BUG_ON(nbad > nframes);
1967

1968 1969
		tx_info->status.ampdu_len = nframes;
		tx_info->status.ampdu_ack_len = nframes - nbad;
1970 1971
	}

1972
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
1973
	    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
		if (ieee80211_is_data(hdr->frame_control) &&
		    (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                     ATH9K_TX_DELIM_UNDERRUN)) &&
		    ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
1992
	}
1993

1994
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
1995
		tx_info->status.rates[i].count = 0;
1996 1997
		tx_info->status.rates[i].idx = -1;
	}
1998

1999
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2000 2001
}

S
Sujith 已提交
2002
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2003
{
2004
	struct ath_hw *ah = sc->sc_ah;
2005
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2006
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2007
	struct list_head bf_head;
S
Sujith 已提交
2008
	struct ath_desc *ds;
2009
	struct ath_tx_status ts;
2010
	int txok;
S
Sujith 已提交
2011
	int status;
2012

J
Joe Perches 已提交
2013 2014 2015
	ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2016 2017 2018 2019 2020

	for (;;) {
		spin_lock_bh(&txq->axq_lock);
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
B
Ben Greear 已提交
2021 2022
			if (sc->sc_flags & SC_OP_TXAGGR)
				ath_txq_schedule(sc, txq);
2023 2024 2025 2026 2027
			spin_unlock_bh(&txq->axq_lock);
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2028 2029 2030 2031 2032 2033 2034 2035 2036
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
S
Sujith 已提交
2037
		if (bf->bf_stale) {
S
Sujith 已提交
2038 2039
			bf_held = bf;
			if (list_is_last(&bf_held->list, &txq->axq_q)) {
2040
				spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
2041 2042 2043
				break;
			} else {
				bf = list_entry(bf_held->list.next,
2044
						struct ath_buf, list);
S
Sujith 已提交
2045
			}
2046 2047 2048
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2049
		ds = lastbf->bf_desc;
2050

2051 2052
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
S
Sujith 已提交
2053
		if (status == -EINPROGRESS) {
2054
			spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
2055
			break;
2056
		}
2057
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2058

S
Sujith 已提交
2059 2060 2061 2062 2063
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
S
Sujith 已提交
2064
		lastbf->bf_stale = true;
S
Sujith 已提交
2065 2066 2067 2068
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2069

S
Sujith 已提交
2070
		txq->axq_depth--;
2071
		txok = !(ts.ts_status & ATH9K_TXERR_MASK);
2072
		txq->axq_tx_inprogress = false;
2073 2074
		if (bf_held)
			list_del(&bf_held->list);
2075 2076 2077

		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
S
Sujith 已提交
2078
		spin_unlock_bh(&txq->axq_lock);
2079

2080 2081
		if (bf_held)
			ath_tx_return_buffer(sc, bf_held);
2082

S
Sujith 已提交
2083 2084 2085 2086 2087
		if (!bf_isampdu(bf)) {
			/*
			 * This frame is sent out as a single frame.
			 * Use hardware retry status for this frame.
			 */
2088
			if (ts.ts_status & ATH9K_TXERR_XRETRY)
S
Sujith 已提交
2089
				bf->bf_state.bf_type |= BUF_XRETRY;
2090
			ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
S
Sujith 已提交
2091
		}
2092

S
Sujith 已提交
2093
		if (bf_isampdu(bf))
2094 2095
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
					     true);
S
Sujith 已提交
2096
		else
2097
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2098

2099
		spin_lock_bh(&txq->axq_lock);
2100

S
Sujith 已提交
2101 2102 2103
		if (sc->sc_flags & SC_OP_TXAGGR)
			ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
2104 2105 2106
	}
}

S
Sujith 已提交
2107
static void ath_tx_complete_poll_work(struct work_struct *work)
2108 2109 2110 2111 2112 2113
{
	struct ath_softc *sc = container_of(work, struct ath_softc,
			tx_complete_work.work);
	struct ath_txq *txq;
	int i;
	bool needreset = false;
2114 2115 2116
#ifdef CONFIG_ATH9K_DEBUGFS
	sc->tx_complete_poll_work_seen++;
#endif
2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i)) {
			txq = &sc->tx.txq[i];
			spin_lock_bh(&txq->axq_lock);
			if (txq->axq_depth) {
				if (txq->axq_tx_inprogress) {
					needreset = true;
					spin_unlock_bh(&txq->axq_lock);
					break;
				} else {
					txq->axq_tx_inprogress = true;
				}
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
			} else {
				/* If the queue has pending buffers, then it
				 * should be doing tx work (and have axq_depth).
				 * Shouldn't get to this state I think..but
				 * we do.
				 */
				if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
				    (txq->pending_frames > 0 ||
				     !list_empty(&txq->axq_acq) ||
				     txq->stopped)) {
					ath_err(ath9k_hw_common(sc->sc_ah),
						"txq: %p axq_qnum: %u,"
						" mac80211_qnum: %i"
						" axq_link: %p"
						" pending frames: %i"
						" axq_acq empty: %i"
						" stopped: %i"
						" axq_depth: 0  Attempting to"
						" restart tx logic.\n",
						txq, txq->axq_qnum,
						txq->mac80211_qnum,
						txq->axq_link,
						txq->pending_frames,
						list_empty(&txq->axq_acq),
						txq->stopped);
					ath_txq_schedule(sc, txq);
				}
2157 2158 2159 2160 2161
			}
			spin_unlock_bh(&txq->axq_lock);
		}

	if (needreset) {
J
Joe Perches 已提交
2162 2163
		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
			"tx hung, resetting the chip\n");
S
Sujith 已提交
2164
		ath9k_ps_wakeup(sc);
2165
		ath_reset(sc, true);
S
Sujith 已提交
2166
		ath9k_ps_restore(sc);
2167 2168
	}

2169
	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2170 2171 2172 2173
			msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
}


2174

S
Sujith 已提交
2175
void ath_tx_tasklet(struct ath_softc *sc)
2176
{
S
Sujith 已提交
2177 2178
	int i;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2179

S
Sujith 已提交
2180
	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2181

S
Sujith 已提交
2182 2183 2184
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2185 2186 2187
	}
}

2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
	struct ath_tx_status txs;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
	int status;
	int txok;

	for (;;) {
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
J
Joe Perches 已提交
2204 2205
			ath_dbg(common, ATH_DBG_XMIT,
				"Error processing tx status\n");
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
			break;
		}

		/* Skip beacon completions */
		if (txs.qid == sc->beacon.beaconq)
			continue;

		txq = &sc->tx.txq[txs.qid];

		spin_lock_bh(&txq->axq_lock);
		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
			spin_unlock_bh(&txq->axq_lock);
			return;
		}

		bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
				      struct ath_buf, list);
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
		list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
				  &lastbf->list);
		INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
		txq->axq_depth--;
		txq->axq_tx_inprogress = false;
2231 2232
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
2233 2234 2235 2236 2237 2238 2239
		spin_unlock_bh(&txq->axq_lock);

		txok = !(txs.ts_status & ATH9K_TXERR_MASK);

		if (!bf_isampdu(bf)) {
			if (txs.ts_status & ATH9K_TXERR_XRETRY)
				bf->bf_state.bf_type |= BUF_XRETRY;
2240
			ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
2241 2242 2243
		}

		if (bf_isampdu(bf))
2244 2245
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
					     txok, true);
2246 2247 2248 2249 2250
		else
			ath_tx_complete_buf(sc, bf, txq, &bf_head,
					    &txs, txok, 0);

		spin_lock_bh(&txq->axq_lock);
2251

2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
		if (!list_empty(&txq->txq_fifo_pending)) {
			INIT_LIST_HEAD(&bf_head);
			bf = list_first_entry(&txq->txq_fifo_pending,
				struct ath_buf, list);
			list_cut_position(&bf_head, &txq->txq_fifo_pending,
				&bf->bf_lastbf->list);
			ath_tx_txqaddbuf(sc, txq, &bf_head);
		} else if (sc->sc_flags & SC_OP_TXAGGR)
			ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
2265 2266 2267
/*****************/
/* Init, Cleanup */
/*****************/
2268

2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

static void ath_tx_edma_cleanup(struct ath_softc *sc)
{
	struct ath_descdma *dd = &sc->txsdma;

	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
}

S
Sujith 已提交
2304
int ath_tx_init(struct ath_softc *sc, int nbufs)
2305
{
2306
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2307
	int error = 0;
2308

2309
	spin_lock_init(&sc->tx.txbuflock);
2310

2311
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2312
				  "tx", nbufs, 1, 1);
2313
	if (error != 0) {
2314 2315
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2316 2317
		goto err;
	}
2318

2319
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2320
				  "beacon", ATH_BCBUF, 1, 1);
2321
	if (error != 0) {
2322 2323
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2324 2325
		goto err;
	}
2326

2327 2328
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2329 2330 2331 2332 2333 2334
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		error = ath_tx_edma_init(sc);
		if (error)
			goto err;
	}

2335
err:
S
Sujith 已提交
2336 2337
	if (error != 0)
		ath_tx_cleanup(sc);
2338

S
Sujith 已提交
2339
	return error;
2340 2341
}

2342
void ath_tx_cleanup(struct ath_softc *sc)
S
Sujith 已提交
2343 2344 2345 2346 2347 2348
{
	if (sc->beacon.bdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);

	if (sc->tx.txdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2349 2350 2351

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_tx_edma_cleanup(sc);
S
Sujith 已提交
2352
}
2353 2354 2355

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2356 2357 2358
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2359

2360
	for (tidno = 0, tid = &an->tid[tidno];
2361 2362 2363 2364 2365 2366 2367 2368
	     tidno < WME_NUM_TID;
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2369
		tid->paused    = false;
2370
		tid->state &= ~AGGR_CLEANUP;
2371 2372
		INIT_LIST_HEAD(&tid->buf_q);
		acno = TID_TO_WME_AC(tidno);
2373
		tid->ac = &an->ac[acno];
2374 2375
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_ADDBA_PROGRESS;
2376
	}
2377

2378
	for (acno = 0, ac = &an->ac[acno];
2379 2380
	     acno < WME_NUM_AC; acno++, ac++) {
		ac->sched    = false;
2381
		ac->txq = sc->tx.txq_map[acno];
2382
		INIT_LIST_HEAD(&ac->tid_q);
2383 2384 2385
	}
}

S
Sujith 已提交
2386
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2387
{
2388 2389
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2390
	struct ath_txq *txq;
2391
	int tidno;
S
Sujith 已提交
2392

2393 2394
	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {
2395

2396
		ac = tid->ac;
2397
		txq = ac->txq;
2398

2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
		spin_lock_bh(&txq->axq_lock);

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2409
		}
2410 2411 2412 2413 2414 2415

		ath_tid_drain(sc, txq, tid);
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_CLEANUP;

		spin_unlock_bh(&txq->axq_lock);
2416 2417
	}
}