xmit.c 63.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

S
Sujith 已提交
17
#include "ath9k.h"
18
#include "ar9003_mac.h"
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


35
static u16 bits_per_symbol[][2] = {
36 37 38 39 40 41 42 43 44 45 46 47 48
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
49 50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
			       struct ath_atx_tid *tid,
51
			       struct list_head *bf_head);
S
Sujith 已提交
52
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
53 54
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar);
55
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
S
Sujith 已提交
56
			     struct list_head *head);
57
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
F
Felix Fietkau 已提交
58 59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
			     int txok, bool update_rc);
61 62
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
63

64
enum {
65 66
	MCS_HT20,
	MCS_HT20_SGI,
67 68 69 70
	MCS_HT40,
	MCS_HT40_SGI,
};

71 72 73 74 75 76 77 78 79 80 81 82
static int ath_max_4ms_framelen[4][32] = {
	[MCS_HT20] = {
		3212,  6432,  9648,  12864,  19300,  25736,  28952,  32172,
		6424,  12852, 19280, 25708,  38568,  51424,  57852,  64280,
		9628,  19260, 28896, 38528,  57792,  65532,  65532,  65532,
		12828, 25656, 38488, 51320,  65532,  65532,  65532,  65532,
	},
	[MCS_HT20_SGI] = {
		3572,  7144,  10720,  14296,  21444,  28596,  32172,  35744,
		7140,  14284, 21428,  28568,  42856,  57144,  64288,  65532,
		10700, 21408, 32112,  42816,  64228,  65532,  65532,  65532,
		14256, 28516, 42780,  57040,  65532,  65532,  65532,  65532,
83 84
	},
	[MCS_HT40] = {
85 86 87 88
		6680,  13360,  20044,  26724,  40092,  53456,  60140,  65532,
		13348, 26700,  40052,  53400,  65532,  65532,  65532,  65532,
		20004, 40008,  60016,  65532,  65532,  65532,  65532,  65532,
		26644, 53292,  65532,  65532,  65532,  65532,  65532,  65532,
89 90
	},
	[MCS_HT40_SGI] = {
91 92 93 94
		7420,  14844,  22272,  29696,  44544,  59396,  65532,  65532,
		14832, 29668,  44504,  59340,  65532,  65532,  65532,  65532,
		22232, 44464,  65532,  65532,  65532,  65532,  65532,  65532,
		29616, 59232,  65532,  65532,  65532,  65532,  65532,  65532,
95 96 97
	}
};

S
Sujith 已提交
98 99 100
/*********************/
/* Aggregation logic */
/*********************/
101

S
Sujith 已提交
102
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
103
{
S
Sujith 已提交
104
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
105

S
Sujith 已提交
106 107
	if (tid->paused)
		return;
S
Sujith 已提交
108

S
Sujith 已提交
109 110
	if (tid->sched)
		return;
S
Sujith 已提交
111

S
Sujith 已提交
112 113
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
114

S
Sujith 已提交
115 116
	if (ac->sched)
		return;
117

S
Sujith 已提交
118 119 120
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
121

S
Sujith 已提交
122
static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123
{
124
	struct ath_txq *txq = tid->ac->txq;
125

126
	WARN_ON(!tid->paused);
127

128 129
	spin_lock_bh(&txq->axq_lock);
	tid->paused = false;
130

S
Sujith 已提交
131 132
	if (list_empty(&tid->buf_q))
		goto unlock;
133

S
Sujith 已提交
134 135 136 137
	ath_tx_queue_tid(txq, tid);
	ath_txq_schedule(sc, txq);
unlock:
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
138
}
139

140
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
141 142
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
143 144 145
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
146 147
}

S
Sujith 已提交
148
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
149
{
150
	struct ath_txq *txq = tid->ac->txq;
S
Sujith 已提交
151 152
	struct ath_buf *bf;
	struct list_head bf_head;
153
	struct ath_tx_status ts;
154
	struct ath_frame_info *fi;
155

156
	INIT_LIST_HEAD(&bf_head);
157

158
	memset(&ts, 0, sizeof(ts));
159
	spin_lock_bh(&txq->axq_lock);
160

S
Sujith 已提交
161 162
	while (!list_empty(&tid->buf_q)) {
		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
S
Sujith 已提交
163
		list_move_tail(&bf->list, &bf_head);
164

165
		spin_unlock_bh(&txq->axq_lock);
166 167 168
		fi = get_frame_info(bf->bf_mpdu);
		if (fi->retries) {
			ath_tx_update_baw(sc, tid, fi->seqno);
169 170
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
		} else {
171
			ath_tx_send_normal(sc, txq, NULL, &bf_head);
172
		}
173
		spin_lock_bh(&txq->axq_lock);
S
Sujith 已提交
174
	}
175

S
Sujith 已提交
176
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
177
}
178

S
Sujith 已提交
179 180
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
181
{
S
Sujith 已提交
182
	int index, cindex;
183

S
Sujith 已提交
184 185
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186

187
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
188

189
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
190 191 192
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
	}
S
Sujith 已提交
193
}
194

S
Sujith 已提交
195
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
196
			     u16 seqno)
S
Sujith 已提交
197
{
S
Sujith 已提交
198
	int index, cindex;
S
Sujith 已提交
199

200
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
201
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
202
	__set_bit(cindex, tid->tx_buf);
203

S
Sujith 已提交
204 205 206 207
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 209 210 211
	}
}

/*
S
Sujith 已提交
212 213 214 215
 * TODO: For frame(s) that are in the retry state, we will reuse the
 * sequence number(s) without setting the retry bit. The
 * alternative is to give up on these and BAR the receiver's window
 * forward.
216
 */
S
Sujith 已提交
217 218
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
219 220

{
S
Sujith 已提交
221 222
	struct ath_buf *bf;
	struct list_head bf_head;
223
	struct ath_tx_status ts;
224
	struct ath_frame_info *fi;
225 226

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
227
	INIT_LIST_HEAD(&bf_head);
228

S
Sujith 已提交
229 230 231
	for (;;) {
		if (list_empty(&tid->buf_q))
			break;
232

S
Sujith 已提交
233 234
		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
		list_move_tail(&bf->list, &bf_head);
235

236 237 238
		fi = get_frame_info(bf->bf_mpdu);
		if (fi->retries)
			ath_tx_update_baw(sc, tid, fi->seqno);
239

S
Sujith 已提交
240
		spin_unlock(&txq->axq_lock);
241
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
S
Sujith 已提交
242 243
		spin_lock(&txq->axq_lock);
	}
244

S
Sujith 已提交
245 246
	tid->seq_next = tid->seq_start;
	tid->baw_tail = tid->baw_head;
247 248
}

S
Sujith 已提交
249
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
250
			     struct sk_buff *skb)
251
{
252
	struct ath_frame_info *fi = get_frame_info(skb);
S
Sujith 已提交
253
	struct ieee80211_hdr *hdr;
254

S
Sujith 已提交
255
	TX_STAT_INC(txq->axq_qnum, a_retries);
256
	if (fi->retries++ > 0)
257
		return;
258

S
Sujith 已提交
259 260
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261 262
}

263
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
264
{
265
	struct ath_buf *bf = NULL;
S
Sujith 已提交
266 267

	spin_lock_bh(&sc->tx.txbuflock);
268 269

	if (unlikely(list_empty(&sc->tx.txbuf))) {
270 271 272
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
273 274 275 276

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
277 278
	spin_unlock_bh(&sc->tx.txbuflock);

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
297 298 299 300
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
301
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
302 303 304 305 306
	tbf->bf_state = bf->bf_state;

	return tbf;
}

307 308 309 310
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
311
	struct ath_frame_info *fi;
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
327 328
		fi = get_frame_info(bf->bf_mpdu);
		ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
329 330 331 332 333 334 335 336 337 338

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
339 340
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
341
				 struct ath_tx_status *ts, int txok, bool retry)
342
{
S
Sujith 已提交
343 344
	struct ath_node *an = NULL;
	struct sk_buff *skb;
345
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
346
	struct ieee80211_hw *hw = sc->hw;
347
	struct ieee80211_hdr *hdr;
348
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
349
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
350
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
S
Sujith 已提交
351
	struct list_head bf_head, bf_pending;
352
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
353
	u32 ba[WME_BA_BMP_SIZE >> 5];
354 355
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
	bool rc_update = true;
356
	struct ieee80211_tx_rate rates[4];
357
	struct ath_frame_info *fi;
358
	int nframes;
359
	u8 tidno;
360

S
Sujith 已提交
361
	skb = bf->bf_mpdu;
362 363
	hdr = (struct ieee80211_hdr *)skb->data;

364 365
	tx_info = IEEE80211_SKB_CB(skb);

366 367
	memcpy(rates, tx_info->control.rates, sizeof(rates));

368
	rcu_read_lock();
369

370
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
371 372
	if (!sta) {
		rcu_read_unlock();
373

374 375 376 377 378 379 380 381 382
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

			bf->bf_state.bf_type |= BUF_XRETRY;
			if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
			    !bf->bf_stale || bf_next != NULL)
				list_move_tail(&bf->list, &bf_head);

F
Felix Fietkau 已提交
383
			ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
384 385 386 387 388
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				0, 0);

			bf = bf_next;
		}
389
		return;
390 391
	}

392
	an = (struct ath_node *)sta->drv_priv;
393 394
	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
	tid = ATH_AN_2_TID(an, tidno);
395

396 397 398 399 400
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
	 */
401
	if (tidno != ts->tid)
402 403
		txok = false;

S
Sujith 已提交
404
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
405
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
406

S
Sujith 已提交
407
	if (isaggr && txok) {
408 409 410
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
411
		} else {
S
Sujith 已提交
412 413 414 415 416 417 418
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
419
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
420
				needreset = 1;
S
Sujith 已提交
421
		}
422 423
	}

S
Sujith 已提交
424 425
	INIT_LIST_HEAD(&bf_pending);
	INIT_LIST_HEAD(&bf_head);
426

427
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
428
	while (bf) {
429
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
430
		bf_next = bf->bf_next;
431

432 433
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
434
		fi = get_frame_info(skb);
435

436
		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
S
Sujith 已提交
437 438
			/* transmit completion, subframe is
			 * acked by block ack */
439
			acked_cnt++;
S
Sujith 已提交
440 441
		} else if (!isaggr && txok) {
			/* transmit completion */
442
			acked_cnt++;
S
Sujith 已提交
443
		} else {
444
			if (!(tid->state & AGGR_CLEANUP) && retry) {
445 446
				if (fi->retries < ATH_MAX_SW_RETRIES) {
					ath_tx_set_retry(sc, txq, bf->bf_mpdu);
S
Sujith 已提交
447 448 449 450 451
					txpending = 1;
				} else {
					bf->bf_state.bf_type |= BUF_XRETRY;
					txfail = 1;
					sendbar = 1;
452
					txfail_cnt++;
S
Sujith 已提交
453 454 455 456 457 458 459 460 461
				}
			} else {
				/*
				 * cleanup in progress, just fail
				 * the un-acked sub-frames
				 */
				txfail = 1;
			}
		}
462

463 464
		if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
		    bf_next == NULL) {
465 466 467 468 469 470 471 472
			/*
			 * Make sure the last desc is reclaimed if it
			 * not a holding desc.
			 */
			if (!bf_last->bf_stale)
				list_move_tail(&bf->list, &bf_head);
			else
				INIT_LIST_HEAD(&bf_head);
S
Sujith 已提交
473
		} else {
474
			BUG_ON(list_empty(bf_q));
S
Sujith 已提交
475
			list_move_tail(&bf->list, &bf_head);
S
Sujith 已提交
476
		}
477

478
		if (!txpending || (tid->state & AGGR_CLEANUP)) {
S
Sujith 已提交
479 480 481 482 483
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
			spin_lock_bh(&txq->axq_lock);
484
			ath_tx_update_baw(sc, tid, fi->seqno);
S
Sujith 已提交
485
			spin_unlock_bh(&txq->axq_lock);
486

487
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
488
				memcpy(tx_info->control.rates, rates, sizeof(rates));
F
Felix Fietkau 已提交
489
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
490 491
				rc_update = false;
			} else {
F
Felix Fietkau 已提交
492
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
493 494
			}

495 496
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				!txfail, sendbar);
S
Sujith 已提交
497
		} else {
S
Sujith 已提交
498
			/* retry the un-acked ones */
499 500 501 502 503 504 505 506 507 508 509 510
			if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
				if (bf->bf_next == NULL && bf_last->bf_stale) {
					struct ath_buf *tbf;

					tbf = ath_clone_txbuf(sc, bf_last);
					/*
					 * Update tx baw and complete the
					 * frame with failed status if we
					 * run out of tx buf.
					 */
					if (!tbf) {
						spin_lock_bh(&txq->axq_lock);
511
						ath_tx_update_baw(sc, tid, fi->seqno);
512 513 514 515
						spin_unlock_bh(&txq->axq_lock);

						bf->bf_state.bf_type |=
							BUF_XRETRY;
F
Felix Fietkau 已提交
516
						ath_tx_rc_status(sc, bf, ts, nframes,
517
								nbad, 0, false);
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
						ath_tx_complete_buf(sc, bf, txq,
								    &bf_head,
								    ts, 0, 0);
						break;
					}

					ath9k_hw_cleartxdesc(sc->sc_ah,
							     tbf->bf_desc);
					list_add_tail(&tbf->list, &bf_head);
				} else {
					/*
					 * Clear descriptor status words for
					 * software retry
					 */
					ath9k_hw_cleartxdesc(sc->sc_ah,
							     bf->bf_desc);
534
				}
S
Sujith 已提交
535 536 537 538 539 540 541 542 543 544
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
			list_splice_tail_init(&bf_head, &bf_pending);
		}

		bf = bf_next;
545 546
	}

547 548 549 550 551 552 553 554
	/* prepend un-acked frames to the beginning of the pending frame queue */
	if (!list_empty(&bf_pending)) {
		spin_lock_bh(&txq->axq_lock);
		list_splice(&bf_pending, &tid->buf_q);
		ath_tx_queue_tid(txq, tid);
		spin_unlock_bh(&txq->axq_lock);
	}

S
Sujith 已提交
555
	if (tid->state & AGGR_CLEANUP) {
556 557
		ath_tx_flush_tid(sc, tid);

S
Sujith 已提交
558 559 560
		if (tid->baw_head == tid->baw_tail) {
			tid->state &= ~AGGR_ADDBA_COMPLETE;
			tid->state &= ~AGGR_CLEANUP;
S
Sujith 已提交
561
		}
S
Sujith 已提交
562
	}
563

564 565
	rcu_read_unlock();

566 567
	if (needreset) {
		spin_unlock_bh(&sc->sc_pcu_lock);
S
Sujith 已提交
568
		ath_reset(sc, false);
569 570
		spin_lock_bh(&sc->sc_pcu_lock);
	}
S
Sujith 已提交
571
}
572

S
Sujith 已提交
573 574
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
575
{
S
Sujith 已提交
576 577
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
578
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
579
	u32 max_4ms_framelen, frmlen;
580
	u16 aggr_limit, legacy = 0;
S
Sujith 已提交
581
	int i;
S
Sujith 已提交
582

S
Sujith 已提交
583
	skb = bf->bf_mpdu;
S
Sujith 已提交
584
	tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
585
	rates = tx_info->control.rates;
S
Sujith 已提交
586

S
Sujith 已提交
587 588 589 590 591 592
	/*
	 * Find the lowest frame length among the rate series that will have a
	 * 4ms transmit duration.
	 * TODO - TXOP limit needs to be considered.
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
593

S
Sujith 已提交
594 595
	for (i = 0; i < 4; i++) {
		if (rates[i].count) {
596 597
			int modeidx;
			if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
S
Sujith 已提交
598 599 600 601
				legacy = 1;
				break;
			}

602
			if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
603 604
				modeidx = MCS_HT40;
			else
605 606 607 608
				modeidx = MCS_HT20;

			if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
				modeidx++;
609 610

			frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
S
Sujith 已提交
611
			max_4ms_framelen = min(max_4ms_framelen, frmlen);
612 613
		}
	}
S
Sujith 已提交
614

615
	/*
S
Sujith 已提交
616 617 618
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
619
	 */
S
Sujith 已提交
620 621
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
622

623 624 625 626 627 628
	if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
		aggr_limit = min((max_4ms_framelen * 3) / 8,
				 (u32)ATH_AMPDU_LIMIT_MAX);
	else
		aggr_limit = min(max_4ms_framelen,
				 (u32)ATH_AMPDU_LIMIT_MAX);
629

S
Sujith 已提交
630 631 632 633
	/*
	 * h/w can accept aggregates upto 16 bit lengths (65535).
	 * The IE, however can hold upto 65536, which shows up here
	 * as zero. Ignore 65536 since we  are constrained by hw.
634
	 */
635 636
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
637

S
Sujith 已提交
638 639
	return aggr_limit;
}
640

S
Sujith 已提交
641
/*
S
Sujith 已提交
642
 * Returns the number of delimiters to be added to
S
Sujith 已提交
643 644 645 646 647 648 649
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
				  struct ath_buf *bf, u16 frmlen)
{
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
650
	u32 nsymbits, nsymbols;
S
Sujith 已提交
651
	u16 minlen;
652
	u8 flags, rix;
653
	int width, streams, half_gi, ndelim, mindelim;
654
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
655 656 657

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658 659

	/*
S
Sujith 已提交
660 661 662 663
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
664
	 */
665
	if (fi->keyix != ATH9K_TXKEYIX_INVALID)
S
Sujith 已提交
666
		ndelim += ATH_AGGR_ENCRYPTDELIM;
667

S
Sujith 已提交
668 669 670 671 672
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
673
	 *
S
Sujith 已提交
674 675 676
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
677 678

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
679
		return ndelim;
680

S
Sujith 已提交
681 682 683 684
	rix = tx_info->control.rates[0].idx;
	flags = tx_info->control.rates[0].flags;
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685

S
Sujith 已提交
686
	if (half_gi)
687
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
688
	else
689
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
690

S
Sujith 已提交
691 692
	if (nsymbols == 0)
		nsymbols = 1;
693

694 695
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
696
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697

S
Sujith 已提交
698 699 700
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
701 702
	}

S
Sujith 已提交
703
	return ndelim;
704 705
}

S
Sujith 已提交
706
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
S
Sujith 已提交
707
					     struct ath_txq *txq,
S
Sujith 已提交
708
					     struct ath_atx_tid *tid,
709 710
					     struct list_head *bf_q,
					     int *aggr_len)
711
{
S
Sujith 已提交
712
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
S
Sujith 已提交
713 714
	struct ath_buf *bf, *bf_first, *bf_prev = NULL;
	int rl = 0, nframes = 0, ndelim, prev_al = 0;
S
Sujith 已提交
715 716 717
	u16 aggr_limit = 0, al = 0, bpad = 0,
		al_delta, h_baw = tid->baw_size / 2;
	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
718
	struct ieee80211_tx_info *tx_info;
719
	struct ath_frame_info *fi;
720

S
Sujith 已提交
721
	bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722

S
Sujith 已提交
723 724
	do {
		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
725
		fi = get_frame_info(bf->bf_mpdu);
726

S
Sujith 已提交
727
		/* do not step over block-ack window */
728
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
S
Sujith 已提交
729 730 731
			status = ATH_AGGR_BAW_CLOSED;
			break;
		}
732

S
Sujith 已提交
733 734 735 736
		if (!rl) {
			aggr_limit = ath_lookup_rate(sc, bf, tid);
			rl = 1;
		}
737

S
Sujith 已提交
738
		/* do not exceed aggregation limit */
739
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
740

S
Sujith 已提交
741 742
		if (nframes &&
		    (aggr_limit < (al + bpad + al_delta + prev_al))) {
S
Sujith 已提交
743 744 745
			status = ATH_AGGR_LIMITED;
			break;
		}
746

747 748 749 750 751
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
			break;

S
Sujith 已提交
752 753
		/* do not exceed subframe limit */
		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
S
Sujith 已提交
754 755 756
			status = ATH_AGGR_LIMITED;
			break;
		}
S
Sujith 已提交
757
		nframes++;
758

S
Sujith 已提交
759
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
760
		al += bpad + al_delta;
761

S
Sujith 已提交
762 763 764 765
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
766
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
S
Sujith 已提交
767
		bpad = PADBYTES(al_delta) + (ndelim << 2);
768

S
Sujith 已提交
769
		bf->bf_next = NULL;
770
		ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
771

S
Sujith 已提交
772
		/* link buffers of this frame to the aggregate */
773 774
		if (!fi->retries)
			ath_tx_addto_baw(sc, tid, fi->seqno);
S
Sujith 已提交
775 776
		ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
		list_move_tail(&bf->list, bf_q);
S
Sujith 已提交
777 778
		if (bf_prev) {
			bf_prev->bf_next = bf;
779 780
			ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
					       bf->bf_daddr);
S
Sujith 已提交
781 782
		}
		bf_prev = bf;
S
Sujith 已提交
783

S
Sujith 已提交
784
	} while (!list_empty(&tid->buf_q));
785

786
	*aggr_len = al;
S
Sujith 已提交
787

S
Sujith 已提交
788 789 790
	return status;
#undef PADBYTES
}
791

S
Sujith 已提交
792 793 794
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid)
{
S
Sujith 已提交
795
	struct ath_buf *bf;
S
Sujith 已提交
796
	enum ATH_AGGR_STATUS status;
797
	struct ath_frame_info *fi;
S
Sujith 已提交
798
	struct list_head bf_q;
799
	int aggr_len;
800

S
Sujith 已提交
801 802 803
	do {
		if (list_empty(&tid->buf_q))
			return;
804

S
Sujith 已提交
805 806
		INIT_LIST_HEAD(&bf_q);

807
		status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
808 809

		/*
S
Sujith 已提交
810 811
		 * no frames picked up to be aggregated;
		 * block-ack window is not open.
812
		 */
S
Sujith 已提交
813 814
		if (list_empty(&bf_q))
			break;
815

S
Sujith 已提交
816
		bf = list_first_entry(&bf_q, struct ath_buf, list);
S
Sujith 已提交
817
		bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
818

S
Sujith 已提交
819
		/* if only one frame, send as non-aggregate */
820
		if (bf == bf->bf_lastbf) {
821 822
			fi = get_frame_info(bf->bf_mpdu);

S
Sujith 已提交
823
			bf->bf_state.bf_type &= ~BUF_AGGR;
S
Sujith 已提交
824
			ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
825
			ath_buf_set_rate(sc, bf, fi->framelen);
S
Sujith 已提交
826 827 828
			ath_tx_txqaddbuf(sc, txq, &bf_q);
			continue;
		}
829

S
Sujith 已提交
830
		/* setup first desc of aggregate */
S
Sujith 已提交
831
		bf->bf_state.bf_type |= BUF_AGGR;
832 833
		ath_buf_set_rate(sc, bf, aggr_len);
		ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
834

S
Sujith 已提交
835 836
		/* anchor last desc of aggregate */
		ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
837

S
Sujith 已提交
838
		ath_tx_txqaddbuf(sc, txq, &bf_q);
S
Sujith 已提交
839
		TX_STAT_INC(txq->axq_qnum, a_aggr);
840

841
	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
S
Sujith 已提交
842 843 844
		 status != ATH_AGGR_BAW_CLOSED);
}

845 846
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
847 848 849 850 851
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
852
	txtid = ATH_AN_2_TID(an, tid);
853 854 855 856

	if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
		return -EAGAIN;

S
Sujith 已提交
857
	txtid->state |= AGGR_ADDBA_PROGRESS;
858
	txtid->paused = true;
859
	*ssn = txtid->seq_start = txtid->seq_next;
860

861 862 863
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

864
	return 0;
S
Sujith 已提交
865
}
866

S
Sujith 已提交
867
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
868 869 870
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
871
	struct ath_txq *txq = txtid->ac->txq;
872

S
Sujith 已提交
873
	if (txtid->state & AGGR_CLEANUP)
S
Sujith 已提交
874
		return;
875

S
Sujith 已提交
876
	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
877
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
S
Sujith 已提交
878
		return;
S
Sujith 已提交
879
	}
880

S
Sujith 已提交
881
	spin_lock_bh(&txq->axq_lock);
882
	txtid->paused = true;
883

884 885 886 887 888 889 890
	/*
	 * If frames are still being transmitted for this TID, they will be
	 * cleaned up during tx completion. To prevent race conditions, this
	 * TID can only be reused after all in-progress subframes have been
	 * completed.
	 */
	if (txtid->baw_head != txtid->baw_tail)
S
Sujith 已提交
891
		txtid->state |= AGGR_CLEANUP;
892
	else
S
Sujith 已提交
893
		txtid->state &= ~AGGR_ADDBA_COMPLETE;
894 895 896
	spin_unlock_bh(&txq->axq_lock);

	ath_tx_flush_tid(sc, txtid);
S
Sujith 已提交
897
}
898

S
Sujith 已提交
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;

	if (sc->sc_flags & SC_OP_TXAGGR) {
		txtid = ATH_AN_2_TID(an, tid);
		txtid->baw_size =
			IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
		txtid->state |= AGGR_ADDBA_COMPLETE;
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
		ath_tx_resume_tid(sc, txtid);
	}
914 915
}

S
Sujith 已提交
916 917 918
/********************/
/* Queue Management */
/********************/
919

S
Sujith 已提交
920 921
static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
					  struct ath_txq *txq)
922
{
S
Sujith 已提交
923 924
	struct ath_atx_ac *ac, *ac_tmp;
	struct ath_atx_tid *tid, *tid_tmp;
925

S
Sujith 已提交
926 927 928 929 930 931 932 933
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		list_del(&ac->list);
		ac->sched = false;
		list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
			list_del(&tid->list);
			tid->sched = false;
			ath_tid_drain(sc, txq, tid);
		}
934 935 936
	}
}

S
Sujith 已提交
937
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938
{
939
	struct ath_hw *ah = sc->sc_ah;
940
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
941
	struct ath9k_tx_queue_info qi;
942 943 944 945 946 947
	static const int subtype_txq_to_hwq[] = {
		[WME_AC_BE] = ATH_TXQ_AC_BE,
		[WME_AC_BK] = ATH_TXQ_AC_BK,
		[WME_AC_VI] = ATH_TXQ_AC_VI,
		[WME_AC_VO] = ATH_TXQ_AC_VO,
	};
948
	int axq_qnum, i;
949

S
Sujith 已提交
950
	memset(&qi, 0, sizeof(qi));
951
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
952 953 954 955
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
956 957

	/*
S
Sujith 已提交
958 959 960 961 962 963 964 965 966 967 968 969 970
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
971
	 */
972 973 974 975 976 977 978 979 980 981
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
				TXQ_FLAG_TXERRINT_ENABLE;
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
982 983
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
984
		/*
S
Sujith 已提交
985 986
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
987
		 */
S
Sujith 已提交
988
		return NULL;
989
	}
990
	if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
991
		ath_err(common, "qnum %u out of range, max %zu!\n",
992 993
			axq_qnum, ARRAY_SIZE(sc->tx.txq));
		ath9k_hw_releasetxqueue(ah, axq_qnum);
S
Sujith 已提交
994 995
		return NULL;
	}
996 997
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
998

999 1000
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1001 1002 1003 1004 1005
		txq->axq_link = NULL;
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1006
		txq->axq_ampdu_depth = 0;
1007
		txq->axq_tx_inprogress = false;
1008
		sc->tx.txqsetup |= 1<<axq_qnum;
1009 1010 1011 1012 1013

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
		INIT_LIST_HEAD(&txq->txq_fifo_pending);
S
Sujith 已提交
1014
	}
1015
	return &sc->tx.txq[axq_qnum];
1016 1017
}

S
Sujith 已提交
1018 1019 1020
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1021
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
	int error = 0;
	struct ath9k_tx_queue_info qi;

	if (qnum == sc->beacon.beaconq) {
		/*
		 * XXX: for beacon queue, we just save the parameter.
		 * It will be picked up by ath_beaconq_config when
		 * it's necessary.
		 */
		sc->beacon.beacon_qi = *qinfo;
1032
		return 0;
S
Sujith 已提交
1033
	}
1034

1035
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1045 1046
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1058
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1059
	int qnum = sc->beacon.cabq->axq_qnum;
1060

S
Sujith 已提交
1061
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1062
	/*
S
Sujith 已提交
1063
	 * Ensure the readytime % is within the bounds.
1064
	 */
S
Sujith 已提交
1065 1066 1067 1068
	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1069

1070
	qi.tqi_readyTime = (cur_conf->beacon_interval *
S
Sujith 已提交
1071
			    sc->config.cabqReadytime) / 100;
S
Sujith 已提交
1072 1073 1074
	ath_txq_update(sc, qnum, &qi);

	return 0;
1075 1076
}

1077 1078 1079 1080 1081 1082
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

S
Sujith 已提交
1083 1084 1085 1086 1087 1088 1089
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1090
{
S
Sujith 已提交
1091 1092
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1093 1094 1095
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
1096
	INIT_LIST_HEAD(&bf_head);
1097

S
Sujith 已提交
1098 1099
	for (;;) {
		spin_lock_bh(&txq->axq_lock);
1100

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
			if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
				txq->txq_headidx = txq->txq_tailidx = 0;
				spin_unlock_bh(&txq->axq_lock);
				break;
			} else {
				bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
						      struct ath_buf, list);
			}
		} else {
			if (list_empty(&txq->axq_q)) {
				txq->axq_link = NULL;
				spin_unlock_bh(&txq->axq_lock);
				break;
			}
			bf = list_first_entry(&txq->axq_q, struct ath_buf,
					      list);
1118

1119 1120 1121
			if (bf->bf_stale) {
				list_del(&bf->list);
				spin_unlock_bh(&txq->axq_lock);
1122

1123
				ath_tx_return_buffer(sc, bf);
1124 1125
				continue;
			}
S
Sujith 已提交
1126
		}
1127

S
Sujith 已提交
1128
		lastbf = bf->bf_lastbf;
1129

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
			list_cut_position(&bf_head,
					  &txq->txq_fifo[txq->txq_tailidx],
					  &lastbf->list);
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
		} else {
			/* remove ath_buf's of the same mpdu from txq */
			list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
		}

S
Sujith 已提交
1140
		txq->axq_depth--;
1141 1142
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
S
Sujith 已提交
1143 1144 1145
		spin_unlock_bh(&txq->axq_lock);

		if (bf_isampdu(bf))
1146 1147
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
					     retry_tx);
S
Sujith 已提交
1148
		else
1149
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1150 1151
	}

1152 1153 1154 1155
	spin_lock_bh(&txq->axq_lock);
	txq->axq_tx_inprogress = false;
	spin_unlock_bh(&txq->axq_lock);

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		spin_lock_bh(&txq->axq_lock);
		while (!list_empty(&txq->txq_fifo_pending)) {
			bf = list_first_entry(&txq->txq_fifo_pending,
					      struct ath_buf, list);
			list_cut_position(&bf_head,
					  &txq->txq_fifo_pending,
					  &bf->bf_lastbf->list);
			spin_unlock_bh(&txq->axq_lock);

			if (bf_isampdu(bf))
				ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1168
						     &ts, 0, retry_tx);
1169 1170 1171 1172 1173 1174 1175
			else
				ath_tx_complete_buf(sc, bf, txq, &bf_head,
						    &ts, 0, 0);
			spin_lock_bh(&txq->axq_lock);
		}
		spin_unlock_bh(&txq->axq_lock);
	}
1176 1177 1178 1179 1180 1181 1182 1183 1184

	/* flush any pending frames if aggregation is enabled */
	if (sc->sc_flags & SC_OP_TXAGGR) {
		if (!retry_tx) {
			spin_lock_bh(&txq->axq_lock);
			ath_txq_drain_pending_buffers(sc, txq);
			spin_unlock_bh(&txq->axq_lock);
		}
	}
1185 1186
}

1187
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1188
{
1189
	struct ath_hw *ah = sc->sc_ah;
1190
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1191 1192 1193 1194
	struct ath_txq *txq;
	int i, npend = 0;

	if (sc->sc_flags & SC_OP_INVALID)
1195
		return true;
S
Sujith 已提交
1196

1197
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1198

1199
	/* Check if any queue remains active */
S
Sujith 已提交
1200
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1201 1202 1203 1204
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
S
Sujith 已提交
1205 1206
	}

1207
	if (npend)
1208
		ath_err(common, "Failed to stop TX DMA!\n");
S
Sujith 已提交
1209 1210

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
		ath_draintxq(sc, txq, retry_tx);
S
Sujith 已提交
1222
	}
1223 1224

	return !npend;
S
Sujith 已提交
1225
}
1226

S
Sujith 已提交
1227
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1228
{
S
Sujith 已提交
1229 1230
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1231
}
1232

1233 1234 1235
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1236 1237
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1238 1239
	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
	struct ath_atx_tid *tid, *last_tid;
1240

1241 1242
	if (list_empty(&txq->axq_acq) ||
	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
S
Sujith 已提交
1243
		return;
1244

S
Sujith 已提交
1245
	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1246
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1247

1248 1249 1250 1251
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1252

1253 1254 1255 1256 1257
		while (!list_empty(&ac->tid_q)) {
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1258

1259 1260
			if (tid->paused)
				continue;
1261

1262
			ath_tx_sched_aggr(sc, txq, tid);
1263

1264 1265 1266 1267 1268 1269
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
			if (!list_empty(&tid->buf_q))
				ath_tx_queue_tid(txq, tid);
1270

1271 1272 1273 1274
			if (tid == last_tid ||
			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
				break;
		}
1275

1276 1277 1278 1279 1280
		if (!list_empty(&ac->tid_q)) {
			if (!ac->sched) {
				ac->sched = true;
				list_add_tail(&ac->list, &txq->axq_acq);
			}
1281
		}
1282 1283 1284 1285

		if (ac == last_ac ||
		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
			return;
S
Sujith 已提交
1286 1287
	}
}
1288

S
Sujith 已提交
1289 1290 1291 1292
/***********/
/* TX, DMA */
/***********/

1293
/*
S
Sujith 已提交
1294 1295
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1296
 */
S
Sujith 已提交
1297 1298
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
			     struct list_head *head)
1299
{
1300
	struct ath_hw *ah = sc->sc_ah;
1301
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
1302
	struct ath_buf *bf;
1303

S
Sujith 已提交
1304 1305 1306 1307
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1308

S
Sujith 已提交
1309 1310
	if (list_empty(head))
		return;
1311

S
Sujith 已提交
1312
	bf = list_first_entry(head, struct ath_buf, list);
1313

J
Joe Perches 已提交
1314 1315
	ath_dbg(common, ATH_DBG_QUEUE,
		"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1316

1317 1318 1319 1320 1321 1322
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
			list_splice_tail_init(head, &txq->txq_fifo_pending);
			return;
		}
		if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
J
Joe Perches 已提交
1323 1324 1325
			ath_dbg(common, ATH_DBG_XMIT,
				"Initializing tx fifo %d which is non-empty\n",
				txq->txq_headidx);
1326 1327 1328
		INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
		list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
F
Felix Fietkau 已提交
1329
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
S
Sujith 已提交
1330
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
J
Joe Perches 已提交
1331 1332
		ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
S
Sujith 已提交
1333
	} else {
1334 1335 1336
		list_splice_tail_init(head, &txq->axq_q);

		if (txq->axq_link == NULL) {
F
Felix Fietkau 已提交
1337
			TX_STAT_INC(txq->axq_qnum, puttxbuf);
1338
			ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
J
Joe Perches 已提交
1339 1340 1341
			ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
				txq->axq_qnum, ito64(bf->bf_daddr),
				bf->bf_desc);
1342 1343
		} else {
			*txq->axq_link = bf->bf_daddr;
J
Joe Perches 已提交
1344 1345 1346 1347
			ath_dbg(common, ATH_DBG_XMIT,
				"link[%u] (%p)=%llx (%p)\n",
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1348 1349 1350
		}
		ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
				       &txq->axq_link);
F
Felix Fietkau 已提交
1351
		TX_STAT_INC(txq->axq_qnum, txstart);
1352
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1353
	}
1354
	txq->axq_depth++;
1355 1356
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth++;
S
Sujith 已提交
1357
}
1358

S
Sujith 已提交
1359
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
F
Felix Fietkau 已提交
1360
			      struct ath_buf *bf, struct ath_tx_control *txctl)
1361
{
1362
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
F
Felix Fietkau 已提交
1363
	struct list_head bf_head;
1364

S
Sujith 已提交
1365
	bf->bf_state.bf_type |= BUF_AMPDU;
1366

S
Sujith 已提交
1367 1368 1369 1370 1371 1372 1373 1374
	/*
	 * Do not queue to h/w when any of the following conditions is true:
	 * - there are pending frames in software queue
	 * - the TID is currently paused for ADDBA/BAR request
	 * - seqno is not within block-ack window
	 * - h/w queue depth exceeds low water mark
	 */
	if (!list_empty(&tid->buf_q) || tid->paused ||
1375
	    !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
1376
	    txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1377
		/*
S
Sujith 已提交
1378 1379
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
1380
		 */
1381
		TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
F
Felix Fietkau 已提交
1382
		list_add_tail(&bf->list, &tid->buf_q);
S
Sujith 已提交
1383 1384 1385 1386
		ath_tx_queue_tid(txctl->txq, tid);
		return;
	}

F
Felix Fietkau 已提交
1387 1388 1389
	INIT_LIST_HEAD(&bf_head);
	list_add(&bf->list, &bf_head);

S
Sujith 已提交
1390
	/* Add sub-frame to BAW */
1391 1392
	if (!fi->retries)
		ath_tx_addto_baw(sc, tid, fi->seqno);
S
Sujith 已提交
1393 1394

	/* Queue to h/w without aggregation */
1395
	TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
S
Sujith 已提交
1396
	bf->bf_lastbf = bf;
1397
	ath_buf_set_rate(sc, bf, fi->framelen);
F
Felix Fietkau 已提交
1398
	ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
S
Sujith 已提交
1399 1400
}

F
Felix Fietkau 已提交
1401 1402
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
			       struct ath_atx_tid *tid,
1403
			       struct list_head *bf_head)
S
Sujith 已提交
1404
{
1405
	struct ath_frame_info *fi;
S
Sujith 已提交
1406 1407 1408 1409 1410 1411
	struct ath_buf *bf;

	bf = list_first_entry(bf_head, struct ath_buf, list);
	bf->bf_state.bf_type &= ~BUF_AMPDU;

	/* update starting sequence number for subsequent ADDBA request */
F
Felix Fietkau 已提交
1412 1413
	if (tid)
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
S
Sujith 已提交
1414

S
Sujith 已提交
1415
	bf->bf_lastbf = bf;
1416 1417
	fi = get_frame_info(bf->bf_mpdu);
	ath_buf_set_rate(sc, bf, fi->framelen);
S
Sujith 已提交
1418
	ath_tx_txqaddbuf(sc, txq, bf_head);
S
Sujith 已提交
1419
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
}

static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;

	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
}

1445 1446
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
			     int framelen)
S
Sujith 已提交
1447
{
1448
	struct ath_softc *sc = hw->priv;
S
Sujith 已提交
1449
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1450 1451
	struct ieee80211_sta *sta = tx_info->control.sta;
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
S
Sujith 已提交
1452
	struct ieee80211_hdr *hdr;
1453
	struct ath_frame_info *fi = get_frame_info(skb);
S
Sujith 已提交
1454 1455
	struct ath_node *an;
	struct ath_atx_tid *tid;
1456 1457
	enum ath9k_key_type keytype;
	u16 seqno = 0;
1458
	u8 tidno;
S
Sujith 已提交
1459

1460
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1461 1462

	hdr = (struct ieee80211_hdr *)skb->data;
1463 1464
	if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
		conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
S
Sujith 已提交
1465

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
		an = (struct ath_node *) sta->drv_priv;
		tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;

		/*
		 * Override seqno set by upper layer with the one
		 * in tx aggregation state.
		 */
		tid = ATH_AN_2_TID(an, tidno);
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
		INCR(tid->seq_next, IEEE80211_SEQ_MAX);
	}

	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
	fi->seqno = seqno;
S
Sujith 已提交
1487 1488
}

F
Felix Fietkau 已提交
1489
static int setup_tx_flags(struct sk_buff *skb)
S
Sujith 已提交
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	int flags = 0;

	flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
	flags |= ATH9K_TXDESC_INTREQ;

	if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= ATH9K_TXDESC_NOACK;

F
Felix Fietkau 已提交
1500
	if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
L
Luis R. Rodriguez 已提交
1501 1502
		flags |= ATH9K_TXDESC_LDPC;

S
Sujith 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511
	return flags;
}

/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
1512
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
S
Sujith 已提交
1513 1514 1515
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
1516
	int streams;
S
Sujith 已提交
1517 1518

	/* find number of symbols: PLCP + data */
1519
	streams = HT_RC_2_STREAMS(rix);
S
Sujith 已提交
1520
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1521
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
	if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
			(curchan->channelFlags & CHANNEL_5GHZ) &&
			(chainmask == 0x7) && (rate < 0x90))
		return 0x3;
	else
		return chainmask;
}

1547
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
S
Sujith 已提交
1548
{
1549
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1550 1551 1552 1553
	struct ath9k_11n_rate_series series[4];
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
1554
	const struct ieee80211_rate *rate;
1555
	struct ieee80211_hdr *hdr;
1556 1557
	int i, flags = 0;
	u8 rix = 0, ctsrate = 0;
1558
	bool is_pspoll;
S
Sujith 已提交
1559 1560 1561

	memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);

S
Sujith 已提交
1562
	skb = bf->bf_mpdu;
S
Sujith 已提交
1563 1564
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;
1565 1566
	hdr = (struct ieee80211_hdr *)skb->data;
	is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
S
Sujith 已提交
1567 1568

	/*
1569 1570 1571
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
S
Sujith 已提交
1572
	 */
1573 1574
	rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
	ctsrate = rate->hw_value;
1575
	if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
1576
		ctsrate |= rate->hw_value_short;
S
Sujith 已提交
1577 1578

	for (i = 0; i < 4; i++) {
1579 1580 1581
		bool is_40, is_sgi, is_sp;
		int phy;

S
Sujith 已提交
1582 1583 1584 1585 1586 1587
		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
		series[i].Tries = rates[i].count;

F
Felix Fietkau 已提交
1588 1589
		if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
		    (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
1590
			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
F
Felix Fietkau 已提交
1591 1592 1593 1594 1595 1596
			flags |= ATH9K_TXDESC_RTSENA;
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			flags |= ATH9K_TXDESC_CTSENA;
		}

1597 1598 1599 1600
		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			series[i].RateFlags |= ATH9K_RATESERIES_2040;
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
S
Sujith 已提交
1601

1602 1603 1604 1605 1606 1607 1608
		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
			series[i].Rate = rix | 0x80;
1609 1610
			series[i].ChSel = ath_txchainmask_reduction(sc,
					common->tx_chainmask, series[i].Rate);
1611
			series[i].PktDuration = ath_pkt_duration(sc, rix, len,
1612
				 is_40, is_sgi, is_sp);
1613 1614
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
				series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1615 1616 1617
			continue;
		}

1618
		/* legacy rates */
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
		series[i].Rate = rate->hw_value;
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
				series[i].Rate |= rate->hw_value_short;
		} else {
			is_sp = false;
		}

1634 1635 1636 1637 1638 1639
		if (bf->bf_state.bfs_paprd)
			series[i].ChSel = common->tx_chainmask;
		else
			series[i].ChSel = ath_txchainmask_reduction(sc,
					common->tx_chainmask, series[i].Rate);

1640
		series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1641
			phy, rate->bitrate * 100, len, rix, is_sp);
1642 1643
	}

F
Felix Fietkau 已提交
1644
	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1645
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
F
Felix Fietkau 已提交
1646 1647 1648 1649 1650 1651
		flags &= ~ATH9K_TXDESC_RTSENA;

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
	if (flags & ATH9K_TXDESC_RTSENA)
		flags &= ~ATH9K_TXDESC_CTSENA;

S
Sujith 已提交
1652
	/* set dur_update_en for l-sig computation except for PS-Poll frames */
1653 1654
	ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
				     bf->bf_lastbf->bf_desc,
1655
				     !is_pspoll, ctsrate,
1656
				     0, series, 4, flags);
1657

S
Sujith 已提交
1658
	if (sc->config.ath_aggr_prot && flags)
1659
		ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1660 1661
}

F
Felix Fietkau 已提交
1662
static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
F
Felix Fietkau 已提交
1663
					   struct ath_txq *txq,
1664
					   struct sk_buff *skb)
1665
{
1666
	struct ath_softc *sc = hw->priv;
F
Felix Fietkau 已提交
1667
	struct ath_hw *ah = sc->sc_ah;
F
Felix Fietkau 已提交
1668
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1669
	struct ath_frame_info *fi = get_frame_info(skb);
F
Felix Fietkau 已提交
1670
	struct ath_buf *bf;
F
Felix Fietkau 已提交
1671 1672
	struct ath_desc *ds;
	int frm_type;
F
Felix Fietkau 已提交
1673 1674 1675

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
J
Joe Perches 已提交
1676
		ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
F
Felix Fietkau 已提交
1677 1678
		return NULL;
	}
1679

S
Sujith 已提交
1680
	ATH_TXBUF_RESET(bf);
1681

F
Felix Fietkau 已提交
1682
	bf->bf_flags = setup_tx_flags(skb);
1683
	bf->bf_mpdu = skb;
1684

B
Ben Greear 已提交
1685 1686 1687
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1688
		bf->bf_mpdu = NULL;
1689
		bf->bf_buf_addr = 0;
1690 1691
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
1692 1693
		ath_tx_return_buffer(sc, bf);
		return NULL;
1694 1695
	}

S
Sujith 已提交
1696
	frm_type = get_hw_packet_type(skb);
1697 1698

	ds = bf->bf_desc;
1699
	ath9k_hw_set_desc_link(ah, ds, 0);
1700

1701 1702
	ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
			       fi->keyix, fi->keytype, bf->bf_flags);
S
Sujith 已提交
1703 1704

	ath9k_hw_filltxdesc(ah, ds,
1705 1706 1707
			    skb->len,	/* segment length */
			    true,	/* first segment */
			    true,	/* last segment */
1708
			    ds,		/* first descriptor */
1709
			    bf->bf_buf_addr,
F
Felix Fietkau 已提交
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
			    txq->axq_qnum);


	return bf;
}

/* FIXME: tx power */
static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_control *txctl)
{
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct list_head bf_head;
1724
	struct ath_atx_tid *tid = NULL;
F
Felix Fietkau 已提交
1725
	u8 tidno;
1726

S
Sujith 已提交
1727
	spin_lock_bh(&txctl->txq->axq_lock);
1728

1729
	if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
1730 1731
		tidno = ieee80211_get_qos_ctl(hdr)[0] &
			IEEE80211_QOS_CTL_TID_MASK;
1732
		tid = ATH_AN_2_TID(txctl->an, tidno);
1733

1734
		WARN_ON(tid->ac->txq != txctl->txq);
1735 1736 1737
	}

	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
F
Felix Fietkau 已提交
1738 1739 1740 1741 1742
		/*
		 * Try aggregation if it's a unicast data frame
		 * and the destination is HT capable.
		 */
		ath_tx_send_ampdu(sc, tid, bf, txctl);
1743
	} else {
F
Felix Fietkau 已提交
1744 1745 1746
		INIT_LIST_HEAD(&bf_head);
		list_add_tail(&bf->list, &bf_head);

1747
		bf->bf_state.bfs_ftype = txctl->frame_type;
F
Felix Fietkau 已提交
1748 1749
		bf->bf_state.bfs_paprd = txctl->paprd;

1750
		if (bf->bf_state.bfs_paprd)
F
Felix Fietkau 已提交
1751 1752
			ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
						   bf->bf_state.bfs_paprd);
1753

1754 1755 1756
		if (txctl->paprd)
			bf->bf_state.bfs_paprd_timestamp = jiffies;

1757
		ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1758
	}
S
Sujith 已提交
1759 1760

	spin_unlock_bh(&txctl->txq->axq_lock);
1761 1762
}

1763
/* Upon failure caller should free skb */
1764
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
S
Sujith 已提交
1765
		 struct ath_tx_control *txctl)
1766
{
1767 1768
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1769
	struct ieee80211_sta *sta = info->control.sta;
1770
	struct ath_softc *sc = hw->priv;
1771
	struct ath_txq *txq = txctl->txq;
S
Sujith 已提交
1772
	struct ath_buf *bf;
1773
	int padpos, padsize;
F
Felix Fietkau 已提交
1774
	int frmlen = skb->len + FCS_LEN;
1775
	int q;
1776

1777 1778 1779 1780
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;

F
Felix Fietkau 已提交
1781 1782 1783
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

1784
	/*
S
Sujith 已提交
1785 1786 1787
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
1788
	 */
S
Sujith 已提交
1789 1790 1791 1792 1793
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1794 1795
	}

S
Sujith 已提交
1796
	/* Add the padding after the header if this is not already done */
1797 1798
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
1799 1800 1801 1802
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;

S
Sujith 已提交
1803
		skb_push(skb, padsize);
1804
		memmove(skb->data, skb->data + padsize, padpos);
1805 1806
	}

1807 1808 1809 1810 1811 1812 1813 1814
	setup_frame_info(hw, skb, frmlen);

	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

	bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
1815 1816
	if (unlikely(!bf))
		return -ENOMEM;
1817

1818 1819 1820 1821
	q = skb_get_queue_mapping(skb);
	spin_lock_bh(&txq->axq_lock);
	if (txq == sc->tx.txq_map[q] &&
	    ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1822
		ieee80211_stop_queue(sc->hw, q);
1823
		txq->stopped = 1;
1824
	}
1825
	spin_unlock_bh(&txq->axq_lock);
1826

1827 1828 1829
	ath_tx_start_dma(sc, bf, txctl);

	return 0;
1830 1831
}

S
Sujith 已提交
1832 1833 1834
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
1835

S
Sujith 已提交
1836
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
F
Felix Fietkau 已提交
1837
			    int tx_flags, int ftype, struct ath_txq *txq)
S
Sujith 已提交
1838
{
S
Sujith 已提交
1839 1840
	struct ieee80211_hw *hw = sc->hw;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1841
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1842
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1843
	int q, padpos, padsize;
S
Sujith 已提交
1844

J
Joe Perches 已提交
1845
	ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
1846

1847
	if (tx_flags & ATH_TX_BAR)
S
Sujith 已提交
1848 1849
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;

1850
	if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
S
Sujith 已提交
1851 1852
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
1853 1854
	}

1855 1856 1857
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
S
Sujith 已提交
1858 1859 1860 1861
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
1862
		memmove(skb->data + padsize, skb->data, padpos);
S
Sujith 已提交
1863 1864
		skb_pull(skb, padsize);
	}
S
Sujith 已提交
1865

S
Sujith 已提交
1866 1867
	if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
J
Joe Perches 已提交
1868 1869
		ath_dbg(common, ATH_DBG_PS,
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
1870 1871 1872 1873
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
1874 1875
	}

1876 1877 1878 1879 1880
	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.txq_map[q]) {
		spin_lock_bh(&txq->axq_lock);
		if (WARN_ON(--txq->pending_frames < 0))
			txq->pending_frames = 0;
1881

1882 1883 1884
		if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
			ieee80211_wake_queue(sc->hw, q);
			txq->stopped = 0;
1885
		}
1886
		spin_unlock_bh(&txq->axq_lock);
1887
	}
1888 1889

	ieee80211_tx_status(hw, skb);
S
Sujith 已提交
1890
}
1891

S
Sujith 已提交
1892
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1893 1894
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar)
1895
{
S
Sujith 已提交
1896 1897
	struct sk_buff *skb = bf->bf_mpdu;
	unsigned long flags;
1898
	int tx_flags = 0;
1899

S
Sujith 已提交
1900
	if (sendbar)
1901
		tx_flags = ATH_TX_BAR;
1902

S
Sujith 已提交
1903
	if (!txok) {
1904
		tx_flags |= ATH_TX_ERROR;
1905

S
Sujith 已提交
1906
		if (bf_isxretried(bf))
1907
			tx_flags |= ATH_TX_XRETRY;
1908 1909
	}

B
Ben Greear 已提交
1910
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
1911
	bf->bf_buf_addr = 0;
1912 1913

	if (bf->bf_state.bfs_paprd) {
1914 1915 1916
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1917
			dev_kfree_skb_any(skb);
1918
		else
1919
			complete(&sc->paprd_complete);
1920
	} else {
1921
		ath_debug_stat_tx(sc, bf, ts, txq);
F
Felix Fietkau 已提交
1922
		ath_tx_complete(sc, skb, tx_flags,
1923
				bf->bf_state.bfs_ftype, txq);
1924
	}
1925 1926 1927 1928
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
1929 1930 1931 1932 1933 1934 1935

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1936 1937
}

F
Felix Fietkau 已提交
1938 1939 1940
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
			     int txok, bool update_rc)
1941
{
S
Sujith 已提交
1942
	struct sk_buff *skb = bf->bf_mpdu;
1943
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
1944
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
1945
	struct ieee80211_hw *hw = sc->hw;
1946
	struct ath_hw *ah = sc->sc_ah;
1947
	u8 i, tx_rateindex;
1948

S
Sujith 已提交
1949
	if (txok)
1950
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
1951

1952
	tx_rateindex = ts->ts_rateindex;
1953 1954
	WARN_ON(tx_rateindex >= hw->max_rates);

1955
	if (ts->ts_status & ATH9K_TXERR_FILT)
S
Sujith 已提交
1956
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1957
	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
1958
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1959

1960
		BUG_ON(nbad > nframes);
1961

1962 1963
		tx_info->status.ampdu_len = nframes;
		tx_info->status.ampdu_ack_len = nframes - nbad;
1964 1965
	}

1966
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
1967
	    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
		if (ieee80211_is_data(hdr->frame_control) &&
		    (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                     ATH9K_TX_DELIM_UNDERRUN)) &&
		    ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
1986
	}
1987

1988
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
1989
		tx_info->status.rates[i].count = 0;
1990 1991
		tx_info->status.rates[i].idx = -1;
	}
1992

1993
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
1994 1995
}

S
Sujith 已提交
1996
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1997
{
1998
	struct ath_hw *ah = sc->sc_ah;
1999
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2000
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2001
	struct list_head bf_head;
S
Sujith 已提交
2002
	struct ath_desc *ds;
2003
	struct ath_tx_status ts;
2004
	int txok;
S
Sujith 已提交
2005
	int status;
2006

J
Joe Perches 已提交
2007 2008 2009
	ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2010 2011 2012 2013 2014

	for (;;) {
		spin_lock_bh(&txq->axq_lock);
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2015
			if (sc->sc_flags & SC_OP_TXAGGR)
B
Ben Greear 已提交
2016
				ath_txq_schedule(sc, txq);
2017 2018 2019 2020 2021
			spin_unlock_bh(&txq->axq_lock);
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2022 2023 2024 2025 2026 2027 2028 2029 2030
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
S
Sujith 已提交
2031
		if (bf->bf_stale) {
S
Sujith 已提交
2032 2033
			bf_held = bf;
			if (list_is_last(&bf_held->list, &txq->axq_q)) {
2034
				spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
2035 2036 2037
				break;
			} else {
				bf = list_entry(bf_held->list.next,
2038
						struct ath_buf, list);
S
Sujith 已提交
2039
			}
2040 2041 2042
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2043
		ds = lastbf->bf_desc;
2044

2045 2046
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
S
Sujith 已提交
2047
		if (status == -EINPROGRESS) {
2048
			spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
2049
			break;
2050
		}
2051
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2052

S
Sujith 已提交
2053 2054 2055 2056 2057
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
S
Sujith 已提交
2058
		lastbf->bf_stale = true;
S
Sujith 已提交
2059 2060 2061 2062
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2063

S
Sujith 已提交
2064
		txq->axq_depth--;
2065
		txok = !(ts.ts_status & ATH9K_TXERR_MASK);
2066
		txq->axq_tx_inprogress = false;
2067 2068
		if (bf_held)
			list_del(&bf_held->list);
2069 2070 2071

		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
2072

S
Sujith 已提交
2073
		spin_unlock_bh(&txq->axq_lock);
2074

2075 2076
		if (bf_held)
			ath_tx_return_buffer(sc, bf_held);
2077

S
Sujith 已提交
2078 2079 2080 2081 2082
		if (!bf_isampdu(bf)) {
			/*
			 * This frame is sent out as a single frame.
			 * Use hardware retry status for this frame.
			 */
2083
			if (ts.ts_status & ATH9K_TXERR_XRETRY)
S
Sujith 已提交
2084
				bf->bf_state.bf_type |= BUF_XRETRY;
F
Felix Fietkau 已提交
2085
			ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
S
Sujith 已提交
2086
		}
2087

S
Sujith 已提交
2088
		if (bf_isampdu(bf))
2089 2090
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
					     true);
S
Sujith 已提交
2091
		else
2092
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2093

2094
		spin_lock_bh(&txq->axq_lock);
2095

2096
		if (sc->sc_flags & SC_OP_TXAGGR)
S
Sujith 已提交
2097 2098
			ath_txq_schedule(sc, txq);
		spin_unlock_bh(&txq->axq_lock);
2099 2100 2101
	}
}

2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
static void ath_hw_pll_work(struct work_struct *work)
{
	struct ath_softc *sc = container_of(work, struct ath_softc,
					    hw_pll_work.work);
	static int count;

	if (AR_SREV_9485(sc->sc_ah)) {
		if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
			count++;

			if (count == 3) {
				/* Rx is hung for more than 500ms. Reset it */
				ath_reset(sc, true);
				count = 0;
			}
		} else
			count = 0;

		ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
	}
}

S
Sujith 已提交
2124
static void ath_tx_complete_poll_work(struct work_struct *work)
2125 2126 2127 2128 2129 2130
{
	struct ath_softc *sc = container_of(work, struct ath_softc,
			tx_complete_work.work);
	struct ath_txq *txq;
	int i;
	bool needreset = false;
2131 2132 2133
#ifdef CONFIG_ATH9K_DEBUGFS
	sc->tx_complete_poll_work_seen++;
#endif
2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i)) {
			txq = &sc->tx.txq[i];
			spin_lock_bh(&txq->axq_lock);
			if (txq->axq_depth) {
				if (txq->axq_tx_inprogress) {
					needreset = true;
					spin_unlock_bh(&txq->axq_lock);
					break;
				} else {
					txq->axq_tx_inprogress = true;
				}
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
			} else {
				/* If the queue has pending buffers, then it
				 * should be doing tx work (and have axq_depth).
				 * Shouldn't get to this state I think..but
				 * we do.
				 */
				if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
				    (txq->pending_frames > 0 ||
				     !list_empty(&txq->axq_acq) ||
				     txq->stopped)) {
					ath_err(ath9k_hw_common(sc->sc_ah),
						"txq: %p axq_qnum: %u,"
						" mac80211_qnum: %i"
						" axq_link: %p"
						" pending frames: %i"
						" axq_acq empty: %i"
						" stopped: %i"
						" axq_depth: 0  Attempting to"
						" restart tx logic.\n",
						txq, txq->axq_qnum,
						txq->mac80211_qnum,
						txq->axq_link,
						txq->pending_frames,
						list_empty(&txq->axq_acq),
						txq->stopped);
					ath_txq_schedule(sc, txq);
				}
2174 2175 2176 2177 2178
			}
			spin_unlock_bh(&txq->axq_lock);
		}

	if (needreset) {
J
Joe Perches 已提交
2179 2180
		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
			"tx hung, resetting the chip\n");
2181
		ath_reset(sc, true);
2182 2183
	}

2184
	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2185 2186 2187 2188
			msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
}


2189

S
Sujith 已提交
2190
void ath_tx_tasklet(struct ath_softc *sc)
2191
{
S
Sujith 已提交
2192 2193
	int i;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2194

S
Sujith 已提交
2195
	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2196

S
Sujith 已提交
2197 2198 2199
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2200 2201 2202
	}
}

2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
	struct ath_tx_status txs;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
	int status;
	int txok;

	for (;;) {
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
J
Joe Perches 已提交
2219 2220
			ath_dbg(common, ATH_DBG_XMIT,
				"Error processing tx status\n");
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
			break;
		}

		/* Skip beacon completions */
		if (txs.qid == sc->beacon.beaconq)
			continue;

		txq = &sc->tx.txq[txs.qid];

		spin_lock_bh(&txq->axq_lock);
		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
			spin_unlock_bh(&txq->axq_lock);
			return;
		}

		bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
				      struct ath_buf, list);
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
		list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
				  &lastbf->list);
		INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
		txq->axq_depth--;
		txq->axq_tx_inprogress = false;
2246 2247
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
2248 2249 2250 2251 2252 2253 2254
		spin_unlock_bh(&txq->axq_lock);

		txok = !(txs.ts_status & ATH9K_TXERR_MASK);

		if (!bf_isampdu(bf)) {
			if (txs.ts_status & ATH9K_TXERR_XRETRY)
				bf->bf_state.bf_type |= BUF_XRETRY;
F
Felix Fietkau 已提交
2255
			ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
2256 2257 2258
		}

		if (bf_isampdu(bf))
2259 2260
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
					     txok, true);
2261 2262 2263 2264 2265
		else
			ath_tx_complete_buf(sc, bf, txq, &bf_head,
					    &txs, txok, 0);

		spin_lock_bh(&txq->axq_lock);
2266

2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
		if (!list_empty(&txq->txq_fifo_pending)) {
			INIT_LIST_HEAD(&bf_head);
			bf = list_first_entry(&txq->txq_fifo_pending,
					      struct ath_buf, list);
			list_cut_position(&bf_head,
					  &txq->txq_fifo_pending,
					  &bf->bf_lastbf->list);
			ath_tx_txqaddbuf(sc, txq, &bf_head);
		} else if (sc->sc_flags & SC_OP_TXAGGR)
			ath_txq_schedule(sc, txq);

2278 2279 2280 2281
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
2282 2283 2284
/*****************/
/* Init, Cleanup */
/*****************/
2285

2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

static void ath_tx_edma_cleanup(struct ath_softc *sc)
{
	struct ath_descdma *dd = &sc->txsdma;

	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
}

S
Sujith 已提交
2321
int ath_tx_init(struct ath_softc *sc, int nbufs)
2322
{
2323
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2324
	int error = 0;
2325

2326
	spin_lock_init(&sc->tx.txbuflock);
2327

2328
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2329
				  "tx", nbufs, 1, 1);
2330
	if (error != 0) {
2331 2332
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2333 2334
		goto err;
	}
2335

2336
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2337
				  "beacon", ATH_BCBUF, 1, 1);
2338
	if (error != 0) {
2339 2340
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2341 2342
		goto err;
	}
2343

2344
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2345
	INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
2346

2347 2348 2349 2350 2351 2352
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		error = ath_tx_edma_init(sc);
		if (error)
			goto err;
	}

2353
err:
S
Sujith 已提交
2354 2355
	if (error != 0)
		ath_tx_cleanup(sc);
2356

S
Sujith 已提交
2357
	return error;
2358 2359
}

2360
void ath_tx_cleanup(struct ath_softc *sc)
S
Sujith 已提交
2361 2362 2363 2364 2365 2366
{
	if (sc->beacon.bdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);

	if (sc->tx.txdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2367 2368 2369

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_tx_edma_cleanup(sc);
S
Sujith 已提交
2370
}
2371 2372 2373

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2374 2375 2376
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2377

2378
	for (tidno = 0, tid = &an->tid[tidno];
2379 2380 2381 2382 2383 2384 2385 2386
	     tidno < WME_NUM_TID;
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2387
		tid->paused    = false;
2388
		tid->state &= ~AGGR_CLEANUP;
2389 2390
		INIT_LIST_HEAD(&tid->buf_q);
		acno = TID_TO_WME_AC(tidno);
2391
		tid->ac = &an->ac[acno];
2392 2393
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_ADDBA_PROGRESS;
2394
	}
2395

2396
	for (acno = 0, ac = &an->ac[acno];
2397 2398
	     acno < WME_NUM_AC; acno++, ac++) {
		ac->sched    = false;
2399
		ac->txq = sc->tx.txq_map[acno];
2400
		INIT_LIST_HEAD(&ac->tid_q);
2401 2402 2403
	}
}

S
Sujith 已提交
2404
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2405
{
2406 2407
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2408
	struct ath_txq *txq;
2409
	int tidno;
S
Sujith 已提交
2410

2411 2412
	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {
2413

2414
		ac = tid->ac;
2415
		txq = ac->txq;
2416

2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
		spin_lock_bh(&txq->axq_lock);

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2427
		}
2428 2429 2430 2431 2432 2433

		ath_tid_drain(sc, txq, tid);
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_CLEANUP;

		spin_unlock_bh(&txq->axq_lock);
2434 2435
	}
}