xmit.c 63.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


36
static u16 bits_per_symbol[][2] = {
37 38 39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 56
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
59
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
F
Felix Fietkau 已提交
60 61 62
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
			     int txok, bool update_rc);
63 64
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
65 66 67 68
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
					   struct sk_buff *skb);
69

70
enum {
71 72
	MCS_HT20,
	MCS_HT20_SGI,
73 74 75 76
	MCS_HT40,
	MCS_HT40_SGI,
};

77 78 79 80 81 82 83 84 85 86 87 88
static int ath_max_4ms_framelen[4][32] = {
	[MCS_HT20] = {
		3212,  6432,  9648,  12864,  19300,  25736,  28952,  32172,
		6424,  12852, 19280, 25708,  38568,  51424,  57852,  64280,
		9628,  19260, 28896, 38528,  57792,  65532,  65532,  65532,
		12828, 25656, 38488, 51320,  65532,  65532,  65532,  65532,
	},
	[MCS_HT20_SGI] = {
		3572,  7144,  10720,  14296,  21444,  28596,  32172,  35744,
		7140,  14284, 21428,  28568,  42856,  57144,  64288,  65532,
		10700, 21408, 32112,  42816,  64228,  65532,  65532,  65532,
		14256, 28516, 42780,  57040,  65532,  65532,  65532,  65532,
89 90
	},
	[MCS_HT40] = {
91 92 93 94
		6680,  13360,  20044,  26724,  40092,  53456,  60140,  65532,
		13348, 26700,  40052,  53400,  65532,  65532,  65532,  65532,
		20004, 40008,  60016,  65532,  65532,  65532,  65532,  65532,
		26644, 53292,  65532,  65532,  65532,  65532,  65532,  65532,
95 96
	},
	[MCS_HT40_SGI] = {
97 98 99 100
		7420,  14844,  22272,  29696,  44544,  59396,  65532,  65532,
		14832, 29668,  44504,  59340,  65532,  65532,  65532,  65532,
		22232, 44464,  65532,  65532,  65532,  65532,  65532,  65532,
		29616, 59232,  65532,  65532,  65532,  65532,  65532,  65532,
101 102 103
	}
};

S
Sujith 已提交
104 105 106
/*********************/
/* Aggregation logic */
/*********************/
107

S
Sujith 已提交
108
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
109
{
S
Sujith 已提交
110
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
111

S
Sujith 已提交
112 113
	if (tid->paused)
		return;
S
Sujith 已提交
114

S
Sujith 已提交
115 116
	if (tid->sched)
		return;
S
Sujith 已提交
117

S
Sujith 已提交
118 119
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
120

S
Sujith 已提交
121 122
	if (ac->sched)
		return;
123

S
Sujith 已提交
124 125 126
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
127

S
Sujith 已提交
128
static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
129
{
130
	struct ath_txq *txq = tid->ac->txq;
131

132
	WARN_ON(!tid->paused);
133

134 135
	spin_lock_bh(&txq->axq_lock);
	tid->paused = false;
136

137
	if (skb_queue_empty(&tid->buf_q))
S
Sujith 已提交
138
		goto unlock;
139

S
Sujith 已提交
140 141 142 143
	ath_tx_queue_tid(txq, tid);
	ath_txq_schedule(sc, txq);
unlock:
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
144
}
145

146
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
147 148
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
149 150 151
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
152 153
}

S
Sujith 已提交
154
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
155
{
156
	struct ath_txq *txq = tid->ac->txq;
157
	struct sk_buff *skb;
S
Sujith 已提交
158 159
	struct ath_buf *bf;
	struct list_head bf_head;
160
	struct ath_tx_status ts;
161
	struct ath_frame_info *fi;
162

163
	INIT_LIST_HEAD(&bf_head);
164

165
	memset(&ts, 0, sizeof(ts));
166
	spin_lock_bh(&txq->axq_lock);
167

168 169 170 171
	while ((skb = __skb_dequeue(&tid->buf_q))) {
		fi = get_frame_info(skb);
		bf = fi->bf;

172
		spin_unlock_bh(&txq->axq_lock);
173 174
		if (bf && fi->retries) {
			list_add_tail(&bf->list, &bf_head);
175
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
176
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
177
		} else {
178
			ath_tx_send_normal(sc, txq, NULL, skb);
179
		}
180
		spin_lock_bh(&txq->axq_lock);
S
Sujith 已提交
181
	}
182

S
Sujith 已提交
183
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
184
}
185

S
Sujith 已提交
186 187
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
188
{
S
Sujith 已提交
189
	int index, cindex;
190

S
Sujith 已提交
191 192
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
193

194
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
195

196
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
197 198 199
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
	}
S
Sujith 已提交
200
}
201

S
Sujith 已提交
202
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
203
			     u16 seqno)
S
Sujith 已提交
204
{
S
Sujith 已提交
205
	int index, cindex;
S
Sujith 已提交
206

207
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
208
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
209
	__set_bit(cindex, tid->tx_buf);
210

S
Sujith 已提交
211 212 213 214
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
215 216 217 218
	}
}

/*
S
Sujith 已提交
219 220 221 222
 * TODO: For frame(s) that are in the retry state, we will reuse the
 * sequence number(s) without setting the retry bit. The
 * alternative is to give up on these and BAR the receiver's window
 * forward.
223
 */
S
Sujith 已提交
224 225
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
226 227

{
228
	struct sk_buff *skb;
S
Sujith 已提交
229 230
	struct ath_buf *bf;
	struct list_head bf_head;
231
	struct ath_tx_status ts;
232
	struct ath_frame_info *fi;
233 234

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
235
	INIT_LIST_HEAD(&bf_head);
236

237 238 239
	while ((skb = __skb_dequeue(&tid->buf_q))) {
		fi = get_frame_info(skb);
		bf = fi->bf;
240

241 242 243 244 245 246 247
		if (!bf) {
			spin_unlock(&txq->axq_lock);
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			spin_lock(&txq->axq_lock);
			continue;
		}

248
		list_add_tail(&bf->list, &bf_head);
249

250
		if (fi->retries)
251
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
252

S
Sujith 已提交
253
		spin_unlock(&txq->axq_lock);
254
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
S
Sujith 已提交
255 256
		spin_lock(&txq->axq_lock);
	}
257

S
Sujith 已提交
258 259
	tid->seq_next = tid->seq_start;
	tid->baw_tail = tid->baw_head;
260 261
}

S
Sujith 已提交
262
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
263
			     struct sk_buff *skb)
264
{
265
	struct ath_frame_info *fi = get_frame_info(skb);
S
Sujith 已提交
266
	struct ieee80211_hdr *hdr;
267

S
Sujith 已提交
268
	TX_STAT_INC(txq->axq_qnum, a_retries);
269
	if (fi->retries++ > 0)
270
		return;
271

S
Sujith 已提交
272 273
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
274 275
}

276
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
277
{
278
	struct ath_buf *bf = NULL;
S
Sujith 已提交
279 280

	spin_lock_bh(&sc->tx.txbuflock);
281 282

	if (unlikely(list_empty(&sc->tx.txbuf))) {
283 284 285
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
286 287 288 289

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
290 291
	spin_unlock_bh(&sc->tx.txbuflock);

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
310 311 312 313
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
314
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
315 316 317 318 319
	tbf->bf_state = bf->bf_state;

	return tbf;
}

320 321 322 323
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
324
	struct ath_frame_info *fi;
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
340
		fi = get_frame_info(bf->bf_mpdu);
341
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
342 343 344 345 346 347 348 349 350 351

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
352 353
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
354
				 struct ath_tx_status *ts, int txok, bool retry)
355
{
S
Sujith 已提交
356 357
	struct ath_node *an = NULL;
	struct sk_buff *skb;
358
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
359
	struct ieee80211_hw *hw = sc->hw;
360
	struct ieee80211_hdr *hdr;
361
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
362
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
363
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
364 365
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
366
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
367
	u32 ba[WME_BA_BMP_SIZE >> 5];
368 369
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
	bool rc_update = true;
370
	struct ieee80211_tx_rate rates[4];
371
	struct ath_frame_info *fi;
372
	int nframes;
373
	u8 tidno;
374
	bool clear_filter;
375

S
Sujith 已提交
376
	skb = bf->bf_mpdu;
377 378
	hdr = (struct ieee80211_hdr *)skb->data;

379 380
	tx_info = IEEE80211_SKB_CB(skb);

381 382
	memcpy(rates, tx_info->control.rates, sizeof(rates));

383
	rcu_read_lock();
384

385
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
386 387
	if (!sta) {
		rcu_read_unlock();
388

389 390 391 392
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

393
			if (!bf->bf_stale || bf_next != NULL)
394 395
				list_move_tail(&bf->list, &bf_head);

F
Felix Fietkau 已提交
396
			ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
397 398 399 400 401
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				0, 0);

			bf = bf_next;
		}
402
		return;
403 404
	}

405
	an = (struct ath_node *)sta->drv_priv;
406 407
	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
	tid = ATH_AN_2_TID(an, tidno);
408

409 410 411 412 413
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
	 */
414
	if (tidno != ts->tid)
415 416
		txok = false;

S
Sujith 已提交
417
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
418
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
419

S
Sujith 已提交
420
	if (isaggr && txok) {
421 422 423
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
424
		} else {
S
Sujith 已提交
425 426 427 428 429 430 431
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
432
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
433
				needreset = 1;
S
Sujith 已提交
434
		}
435 436
	}

437
	__skb_queue_head_init(&bf_pending);
438

439
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
440
	while (bf) {
441 442
		u16 seqno = bf->bf_state.seqno;

443
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
444
		bf_next = bf->bf_next;
445

446 447
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
448
		fi = get_frame_info(skb);
449

450
		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
451 452
			/* transmit completion, subframe is
			 * acked by block ack */
453
			acked_cnt++;
S
Sujith 已提交
454 455
		} else if (!isaggr && txok) {
			/* transmit completion */
456
			acked_cnt++;
S
Sujith 已提交
457
		} else {
458
			if ((tid->state & AGGR_CLEANUP) || !retry) {
S
Sujith 已提交
459 460 461 462 463
				/*
				 * cleanup in progress, just fail
				 * the un-acked sub-frames
				 */
				txfail = 1;
464 465 466 467 468 469 470 471 472 473 474
			} else if (fi->retries < ATH_MAX_SW_RETRIES) {
				if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
				    !an->sleeping)
					ath_tx_set_retry(sc, txq, bf->bf_mpdu);

				clear_filter = true;
				txpending = 1;
			} else {
				txfail = 1;
				sendbar = 1;
				txfail_cnt++;
S
Sujith 已提交
475 476
			}
		}
477

478 479 480 481
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
482 483 484
		INIT_LIST_HEAD(&bf_head);
		if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
		    bf_next != NULL || !bf_last->bf_stale)
S
Sujith 已提交
485
			list_move_tail(&bf->list, &bf_head);
486

487
		if (!txpending || (tid->state & AGGR_CLEANUP)) {
S
Sujith 已提交
488 489 490 491 492
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
			spin_lock_bh(&txq->axq_lock);
493
			ath_tx_update_baw(sc, tid, seqno);
S
Sujith 已提交
494
			spin_unlock_bh(&txq->axq_lock);
495

496
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
497
				memcpy(tx_info->control.rates, rates, sizeof(rates));
F
Felix Fietkau 已提交
498
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
499 500
				rc_update = false;
			} else {
F
Felix Fietkau 已提交
501
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
502 503
			}

504 505
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				!txfail, sendbar);
S
Sujith 已提交
506
		} else {
S
Sujith 已提交
507
			/* retry the un-acked ones */
508
			ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
509 510 511 512 513 514 515 516 517 518 519 520
			if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
				if (bf->bf_next == NULL && bf_last->bf_stale) {
					struct ath_buf *tbf;

					tbf = ath_clone_txbuf(sc, bf_last);
					/*
					 * Update tx baw and complete the
					 * frame with failed status if we
					 * run out of tx buf.
					 */
					if (!tbf) {
						spin_lock_bh(&txq->axq_lock);
521
						ath_tx_update_baw(sc, tid, seqno);
522 523
						spin_unlock_bh(&txq->axq_lock);

F
Felix Fietkau 已提交
524
						ath_tx_rc_status(sc, bf, ts, nframes,
525
								nbad, 0, false);
526 527
						ath_tx_complete_buf(sc, bf, txq,
								    &bf_head,
528
								    ts, 0, 1);
529 530 531 532 533
						break;
					}

					ath9k_hw_cleartxdesc(sc->sc_ah,
							     tbf->bf_desc);
534
					fi->bf = tbf;
535 536 537 538 539 540 541
				} else {
					/*
					 * Clear descriptor status words for
					 * software retry
					 */
					ath9k_hw_cleartxdesc(sc->sc_ah,
							     bf->bf_desc);
542
				}
S
Sujith 已提交
543 544 545 546 547 548
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
549
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
550 551 552
		}

		bf = bf_next;
553 554
	}

555
	/* prepend un-acked frames to the beginning of the pending frame queue */
556
	if (!skb_queue_empty(&bf_pending)) {
557 558 559
		if (an->sleeping)
			ieee80211_sta_set_tim(sta);

560
		spin_lock_bh(&txq->axq_lock);
561 562
		if (clear_filter)
			tid->ac->clear_ps_filter = true;
563
		skb_queue_splice(&bf_pending, &tid->buf_q);
564 565
		if (!an->sleeping)
			ath_tx_queue_tid(txq, tid);
566 567 568
		spin_unlock_bh(&txq->axq_lock);
	}

S
Sujith 已提交
569
	if (tid->state & AGGR_CLEANUP) {
570 571
		ath_tx_flush_tid(sc, tid);

S
Sujith 已提交
572 573 574
		if (tid->baw_head == tid->baw_tail) {
			tid->state &= ~AGGR_ADDBA_COMPLETE;
			tid->state &= ~AGGR_CLEANUP;
S
Sujith 已提交
575
		}
S
Sujith 已提交
576
	}
577

578 579
	rcu_read_unlock();

580
	if (needreset)
581
		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
S
Sujith 已提交
582
}
583

584 585 586 587 588 589 590 591 592 593 594
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

595 596 597 598
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

599 600 601 602 603 604 605
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
606 607
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
608
{
S
Sujith 已提交
609 610
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
611
	struct ieee80211_tx_rate *rates;
S
Sujith 已提交
612
	u32 max_4ms_framelen, frmlen;
613
	u16 aggr_limit, legacy = 0;
S
Sujith 已提交
614
	int i;
S
Sujith 已提交
615

S
Sujith 已提交
616
	skb = bf->bf_mpdu;
S
Sujith 已提交
617
	tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
618
	rates = tx_info->control.rates;
S
Sujith 已提交
619

S
Sujith 已提交
620 621 622 623 624 625
	/*
	 * Find the lowest frame length among the rate series that will have a
	 * 4ms transmit duration.
	 * TODO - TXOP limit needs to be considered.
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
626

S
Sujith 已提交
627 628
	for (i = 0; i < 4; i++) {
		if (rates[i].count) {
629 630
			int modeidx;
			if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
S
Sujith 已提交
631 632 633 634
				legacy = 1;
				break;
			}

635
			if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
636 637
				modeidx = MCS_HT40;
			else
638 639 640 641
				modeidx = MCS_HT20;

			if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
				modeidx++;
642 643

			frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
S
Sujith 已提交
644
			max_4ms_framelen = min(max_4ms_framelen, frmlen);
645 646
		}
	}
S
Sujith 已提交
647

648
	/*
S
Sujith 已提交
649 650 651
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
652
	 */
S
Sujith 已提交
653 654
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
655

656 657 658 659 660 661
	if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
		aggr_limit = min((max_4ms_framelen * 3) / 8,
				 (u32)ATH_AMPDU_LIMIT_MAX);
	else
		aggr_limit = min(max_4ms_framelen,
				 (u32)ATH_AMPDU_LIMIT_MAX);
662

S
Sujith 已提交
663
	/*
L
Lucas De Marchi 已提交
664 665
	 * h/w can accept aggregates up to 16 bit lengths (65535).
	 * The IE, however can hold up to 65536, which shows up here
S
Sujith 已提交
666
	 * as zero. Ignore 65536 since we  are constrained by hw.
667
	 */
668 669
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
670

S
Sujith 已提交
671 672
	return aggr_limit;
}
673

S
Sujith 已提交
674
/*
S
Sujith 已提交
675
 * Returns the number of delimiters to be added to
S
Sujith 已提交
676 677 678
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
679 680
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
681
{
682
#define FIRST_DESC_NDELIMS 60
S
Sujith 已提交
683 684
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
685
	u32 nsymbits, nsymbols;
S
Sujith 已提交
686
	u16 minlen;
687
	u8 flags, rix;
688
	int width, streams, half_gi, ndelim, mindelim;
689
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
690 691 692

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
693 694

	/*
S
Sujith 已提交
695 696 697 698
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
699
	 */
700 701
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
702
		ndelim += ATH_AGGR_ENCRYPTDELIM;
703

704 705 706 707
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
708 709
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
710 711
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
712 713 714 715 716
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
717
	 *
S
Sujith 已提交
718 719 720
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
721 722

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
723
		return ndelim;
724

S
Sujith 已提交
725 726 727 728
	rix = tx_info->control.rates[0].idx;
	flags = tx_info->control.rates[0].flags;
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
729

S
Sujith 已提交
730
	if (half_gi)
731
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
732
	else
733
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
734

S
Sujith 已提交
735 736
	if (nsymbols == 0)
		nsymbols = 1;
737

738 739
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
740
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
741

S
Sujith 已提交
742 743 744
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
745 746
	}

S
Sujith 已提交
747
	return ndelim;
748 749
}

S
Sujith 已提交
750
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
S
Sujith 已提交
751
					     struct ath_txq *txq,
S
Sujith 已提交
752
					     struct ath_atx_tid *tid,
753 754
					     struct list_head *bf_q,
					     int *aggr_len)
755
{
S
Sujith 已提交
756
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
757
	struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
S
Sujith 已提交
758
	int rl = 0, nframes = 0, ndelim, prev_al = 0;
S
Sujith 已提交
759 760 761
	u16 aggr_limit = 0, al = 0, bpad = 0,
		al_delta, h_baw = tid->baw_size / 2;
	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
762
	struct ieee80211_tx_info *tx_info;
763
	struct ath_frame_info *fi;
764
	struct sk_buff *skb;
765
	u16 seqno;
766

S
Sujith 已提交
767
	do {
768 769 770
		skb = skb_peek(&tid->buf_q);
		fi = get_frame_info(skb);
		bf = fi->bf;
771 772
		if (!fi->bf)
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
773

774 775 776
		if (!bf)
			continue;

777
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
778
		seqno = bf->bf_state.seqno;
779 780
		if (!bf_first)
			bf_first = bf;
781

S
Sujith 已提交
782
		/* do not step over block-ack window */
783
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
S
Sujith 已提交
784 785 786
			status = ATH_AGGR_BAW_CLOSED;
			break;
		}
787

S
Sujith 已提交
788 789 790 791
		if (!rl) {
			aggr_limit = ath_lookup_rate(sc, bf, tid);
			rl = 1;
		}
792

S
Sujith 已提交
793
		/* do not exceed aggregation limit */
794
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
795

S
Sujith 已提交
796
		if (nframes &&
797 798
		    ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
		     ath_lookup_legacy(bf))) {
S
Sujith 已提交
799 800 801
			status = ATH_AGGR_LIMITED;
			break;
		}
802

803 804 805 806 807
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
		if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
			!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
			break;

S
Sujith 已提交
808 809
		/* do not exceed subframe limit */
		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
S
Sujith 已提交
810 811 812
			status = ATH_AGGR_LIMITED;
			break;
		}
813

S
Sujith 已提交
814
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
815
		al += bpad + al_delta;
816

S
Sujith 已提交
817 818 819 820
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
821 822
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
823
		bpad = PADBYTES(al_delta) + (ndelim << 2);
824

825
		nframes++;
S
Sujith 已提交
826
		bf->bf_next = NULL;
827

S
Sujith 已提交
828
		/* link buffers of this frame to the aggregate */
829
		if (!fi->retries)
830
			ath_tx_addto_baw(sc, tid, seqno);
831
		bf->bf_state.ndelim = ndelim;
832 833 834

		__skb_unlink(skb, &tid->buf_q);
		list_add_tail(&bf->list, bf_q);
835
		if (bf_prev)
S
Sujith 已提交
836
			bf_prev->bf_next = bf;
837

S
Sujith 已提交
838
		bf_prev = bf;
S
Sujith 已提交
839

840
	} while (!skb_queue_empty(&tid->buf_q));
841

842
	*aggr_len = al;
S
Sujith 已提交
843

S
Sujith 已提交
844 845 846
	return status;
#undef PADBYTES
}
847

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, int len)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	struct ath_buf *bf_first = bf;

	bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
	bool clrdmask = !!(tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT);

	u32 ds_next;

	ath_buf_set_rate(sc, bf, len);

	while (bf) {
		if (bf->bf_next)
			ds_next = bf->bf_next->bf_daddr;
		else
			ds_next = 0;

		ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, clrdmask);
		if (!aggr)
			ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
		else if (!bf->bf_next)
			ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_desc);
		else {
			if (bf == bf_first)
				ath9k_hw_set11n_aggr_first(sc->sc_ah,
					bf->bf_desc, len);

			ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc,
				bf->bf_state.ndelim);
		}

		ath9k_hw_set_desc_link(ah, bf->bf_desc, ds_next);
		bf = bf->bf_next;
	}
}

S
Sujith 已提交
886 887 888
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid)
{
S
Sujith 已提交
889
	struct ath_buf *bf;
S
Sujith 已提交
890
	enum ATH_AGGR_STATUS status;
891
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
892
	struct list_head bf_q;
893
	int aggr_len;
894

S
Sujith 已提交
895
	do {
896
		if (skb_queue_empty(&tid->buf_q))
S
Sujith 已提交
897
			return;
898

S
Sujith 已提交
899 900
		INIT_LIST_HEAD(&bf_q);

901
		status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
902 903

		/*
S
Sujith 已提交
904 905
		 * no frames picked up to be aggregated;
		 * block-ack window is not open.
906
		 */
S
Sujith 已提交
907 908
		if (list_empty(&bf_q))
			break;
909

S
Sujith 已提交
910
		bf = list_first_entry(&bf_q, struct ath_buf, list);
S
Sujith 已提交
911
		bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
912
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
913

914 915
		if (tid->ac->clear_ps_filter) {
			tid->ac->clear_ps_filter = false;
916 917 918
			tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
		} else {
			tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
919 920
		}

S
Sujith 已提交
921
		/* if only one frame, send as non-aggregate */
922
		if (bf == bf->bf_lastbf) {
923 924 925 926
			aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
			bf->bf_state.bf_type = BUF_AMPDU;
		} else {
			TX_STAT_INC(txq->axq_qnum, a_aggr);
S
Sujith 已提交
927
		}
928

929
		ath_tx_fill_desc(sc, bf, aggr_len);
930
		ath_tx_txqaddbuf(sc, txq, &bf_q, false);
931
	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
S
Sujith 已提交
932 933 934
		 status != ATH_AGGR_BAW_CLOSED);
}

935 936
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
937 938 939 940 941
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
942
	txtid = ATH_AN_2_TID(an, tid);
943 944 945 946

	if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
		return -EAGAIN;

S
Sujith 已提交
947
	txtid->state |= AGGR_ADDBA_PROGRESS;
948
	txtid->paused = true;
949
	*ssn = txtid->seq_start = txtid->seq_next;
950

951 952 953
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

954
	return 0;
S
Sujith 已提交
955
}
956

S
Sujith 已提交
957
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
958 959 960
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
961
	struct ath_txq *txq = txtid->ac->txq;
962

S
Sujith 已提交
963
	if (txtid->state & AGGR_CLEANUP)
S
Sujith 已提交
964
		return;
965

S
Sujith 已提交
966
	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
967
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
S
Sujith 已提交
968
		return;
S
Sujith 已提交
969
	}
970

S
Sujith 已提交
971
	spin_lock_bh(&txq->axq_lock);
972
	txtid->paused = true;
973

974 975 976 977 978 979 980
	/*
	 * If frames are still being transmitted for this TID, they will be
	 * cleaned up during tx completion. To prevent race conditions, this
	 * TID can only be reused after all in-progress subframes have been
	 * completed.
	 */
	if (txtid->baw_head != txtid->baw_tail)
S
Sujith 已提交
981
		txtid->state |= AGGR_CLEANUP;
982
	else
S
Sujith 已提交
983
		txtid->state &= ~AGGR_ADDBA_COMPLETE;
984 985 986
	spin_unlock_bh(&txq->axq_lock);

	ath_tx_flush_tid(sc, txtid);
S
Sujith 已提交
987
}
988

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	bool buffered = false;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {

		if (!tid->sched)
			continue;

		ac = tid->ac;
		txq = ac->txq;

		spin_lock_bh(&txq->axq_lock);

1008
		if (!skb_queue_empty(&tid->buf_q))
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
			buffered = true;

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

		spin_unlock_bh(&txq->axq_lock);
	}

	return buffered;
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {

		ac = tid->ac;
		txq = ac->txq;

		spin_lock_bh(&txq->axq_lock);
		ac->clear_ps_filter = true;

1041
		if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
1042 1043 1044 1045 1046 1047 1048 1049
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;

	if (sc->sc_flags & SC_OP_TXAGGR) {
		txtid = ATH_AN_2_TID(an, tid);
		txtid->baw_size =
			IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
		txtid->state |= AGGR_ADDBA_COMPLETE;
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
		ath_tx_resume_tid(sc, txtid);
	}
1065 1066
}

S
Sujith 已提交
1067 1068 1069
/********************/
/* Queue Management */
/********************/
1070

S
Sujith 已提交
1071 1072
static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
					  struct ath_txq *txq)
1073
{
S
Sujith 已提交
1074 1075
	struct ath_atx_ac *ac, *ac_tmp;
	struct ath_atx_tid *tid, *tid_tmp;
1076

S
Sujith 已提交
1077 1078 1079 1080 1081 1082 1083 1084
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		list_del(&ac->list);
		ac->sched = false;
		list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
			list_del(&tid->list);
			tid->sched = false;
			ath_tid_drain(sc, txq, tid);
		}
1085 1086 1087
	}
}

S
Sujith 已提交
1088
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1089
{
1090
	struct ath_hw *ah = sc->sc_ah;
1091
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
1092
	struct ath9k_tx_queue_info qi;
1093 1094 1095 1096 1097 1098
	static const int subtype_txq_to_hwq[] = {
		[WME_AC_BE] = ATH_TXQ_AC_BE,
		[WME_AC_BK] = ATH_TXQ_AC_BK,
		[WME_AC_VI] = ATH_TXQ_AC_VI,
		[WME_AC_VO] = ATH_TXQ_AC_VO,
	};
1099
	int axq_qnum, i;
1100

S
Sujith 已提交
1101
	memset(&qi, 0, sizeof(qi));
1102
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1103 1104 1105 1106
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1107 1108

	/*
S
Sujith 已提交
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1122
	 */
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
				TXQ_FLAG_TXERRINT_ENABLE;
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1133 1134
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1135
		/*
S
Sujith 已提交
1136 1137
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1138
		 */
S
Sujith 已提交
1139
		return NULL;
1140
	}
1141
	if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
1142
		ath_err(common, "qnum %u out of range, max %zu!\n",
1143 1144
			axq_qnum, ARRAY_SIZE(sc->tx.txq));
		ath9k_hw_releasetxqueue(ah, axq_qnum);
S
Sujith 已提交
1145 1146
		return NULL;
	}
1147 1148
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1149

1150 1151
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1152 1153 1154 1155 1156
		txq->axq_link = NULL;
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1157
		txq->axq_ampdu_depth = 0;
1158
		txq->axq_tx_inprogress = false;
1159
		sc->tx.txqsetup |= 1<<axq_qnum;
1160 1161 1162 1163

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1164
	}
1165
	return &sc->tx.txq[axq_qnum];
1166 1167
}

S
Sujith 已提交
1168 1169 1170
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1171
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	int error = 0;
	struct ath9k_tx_queue_info qi;

	if (qnum == sc->beacon.beaconq) {
		/*
		 * XXX: for beacon queue, we just save the parameter.
		 * It will be picked up by ath_beaconq_config when
		 * it's necessary.
		 */
		sc->beacon.beacon_qi = *qinfo;
1182
		return 0;
S
Sujith 已提交
1183
	}
1184

1185
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1186 1187 1188 1189 1190 1191 1192 1193 1194

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1195 1196
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1208
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1209
	int qnum = sc->beacon.cabq->axq_qnum;
1210

S
Sujith 已提交
1211
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1212
	/*
S
Sujith 已提交
1213
	 * Ensure the readytime % is within the bounds.
1214
	 */
S
Sujith 已提交
1215 1216 1217 1218
	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1219

1220
	qi.tqi_readyTime = (cur_conf->beacon_interval *
S
Sujith 已提交
1221
			    sc->config.cabqReadytime) / 100;
S
Sujith 已提交
1222 1223 1224
	ath_txq_update(sc, qnum, &qi);

	return 0;
1225 1226
}

1227 1228 1229 1230 1231 1232
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

1233 1234
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
			       struct list_head *list, bool retry_tx)
R
Rajkumar Manoharan 已提交
1235 1236
	__releases(txq->axq_lock)
	__acquires(txq->axq_lock)
1237
{
S
Sujith 已提交
1238 1239
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1240 1241 1242
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
1243
	INIT_LIST_HEAD(&bf_head);
1244

1245 1246
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1247

1248 1249
		if (bf->bf_stale) {
			list_del(&bf->list);
1250

1251 1252
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1253
		}
1254

S
Sujith 已提交
1255
		lastbf = bf->bf_lastbf;
1256
		list_cut_position(&bf_head, list, &lastbf->list);
1257

S
Sujith 已提交
1258
		txq->axq_depth--;
1259 1260
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
S
Sujith 已提交
1261

1262
		spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
1263
		if (bf_isampdu(bf))
1264 1265
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
					     retry_tx);
S
Sujith 已提交
1266
		else
1267
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1268
		spin_lock_bh(&txq->axq_lock);
1269
	}
1270
}
1271

1272 1273 1274 1275 1276 1277 1278 1279
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
1280
	spin_lock_bh(&txq->axq_lock);
1281
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1282
		int idx = txq->txq_tailidx;
1283

1284 1285 1286 1287 1288
		while (!list_empty(&txq->txq_fifo[idx])) {
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
					   retry_tx);

			INCR(idx, ATH_TXFIFO_DEPTH);
1289
		}
1290
		txq->txq_tailidx = idx;
1291
	}
1292

1293 1294 1295 1296
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
	ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);

1297
	/* flush any pending frames if aggregation is enabled */
1298 1299 1300 1301
	if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
		ath_txq_drain_pending_buffers(sc, txq);

	spin_unlock_bh(&txq->axq_lock);
1302 1303
}

1304
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1305
{
1306
	struct ath_hw *ah = sc->sc_ah;
1307
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1308 1309 1310 1311
	struct ath_txq *txq;
	int i, npend = 0;

	if (sc->sc_flags & SC_OP_INVALID)
1312
		return true;
S
Sujith 已提交
1313

1314
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1315

1316
	/* Check if any queue remains active */
S
Sujith 已提交
1317
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1318 1319 1320 1321
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
S
Sujith 已提交
1322 1323
	}

1324
	if (npend)
1325
		ath_err(common, "Failed to stop TX DMA!\n");
S
Sujith 已提交
1326 1327

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
		ath_draintxq(sc, txq, retry_tx);
S
Sujith 已提交
1339
	}
1340 1341

	return !npend;
S
Sujith 已提交
1342
}
1343

S
Sujith 已提交
1344
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1345
{
S
Sujith 已提交
1346 1347
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1348
}
1349

1350 1351 1352
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1353 1354
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1355 1356
	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
	struct ath_atx_tid *tid, *last_tid;
1357

1358
	if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
1359
	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
S
Sujith 已提交
1360
		return;
1361

S
Sujith 已提交
1362
	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1363
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1364

1365 1366 1367 1368
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1369

1370 1371 1372 1373 1374
		while (!list_empty(&ac->tid_q)) {
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1375

1376 1377
			if (tid->paused)
				continue;
1378

1379
			ath_tx_sched_aggr(sc, txq, tid);
1380

1381 1382 1383 1384
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1385
			if (!skb_queue_empty(&tid->buf_q))
1386
				ath_tx_queue_tid(txq, tid);
1387

1388 1389 1390 1391
			if (tid == last_tid ||
			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
				break;
		}
1392

1393 1394 1395 1396 1397
		if (!list_empty(&ac->tid_q)) {
			if (!ac->sched) {
				ac->sched = true;
				list_add_tail(&ac->list, &txq->axq_acq);
			}
1398
		}
1399 1400 1401 1402

		if (ac == last_ac ||
		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
			return;
S
Sujith 已提交
1403 1404
	}
}
1405

S
Sujith 已提交
1406 1407 1408 1409
/***********/
/* TX, DMA */
/***********/

1410
/*
S
Sujith 已提交
1411 1412
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1413
 */
S
Sujith 已提交
1414
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1415
			     struct list_head *head, bool internal)
1416
{
1417
	struct ath_hw *ah = sc->sc_ah;
1418
	struct ath_common *common = ath9k_hw_common(ah);
1419 1420 1421
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1422

S
Sujith 已提交
1423 1424 1425 1426
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1427

S
Sujith 已提交
1428 1429
	if (list_empty(head))
		return;
1430

1431
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1432
	bf = list_first_entry(head, struct ath_buf, list);
1433
	bf_last = list_entry(head->prev, struct ath_buf, list);
1434

J
Joe Perches 已提交
1435 1436
	ath_dbg(common, ATH_DBG_QUEUE,
		"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1437

1438 1439
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1440
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1441
		puttxbuf = true;
S
Sujith 已提交
1442
	} else {
1443 1444
		list_splice_tail_init(head, &txq->axq_q);

1445 1446
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
J
Joe Perches 已提交
1447 1448 1449 1450
			ath_dbg(common, ATH_DBG_XMIT,
				"link[%u] (%p)=%llx (%p)\n",
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
		ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

	if (!edma) {
F
Felix Fietkau 已提交
1465
		TX_STAT_INC(txq->axq_qnum, txstart);
1466
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1467
	}
1468 1469 1470 1471 1472 1473

	if (!internal) {
		txq->axq_depth++;
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth++;
	}
S
Sujith 已提交
1474
}
1475

S
Sujith 已提交
1476
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1477
			      struct sk_buff *skb, struct ath_tx_control *txctl)
1478
{
1479
	struct ath_frame_info *fi = get_frame_info(skb);
F
Felix Fietkau 已提交
1480
	struct list_head bf_head;
1481
	struct ath_buf *bf;
1482

S
Sujith 已提交
1483 1484 1485 1486 1487 1488 1489
	/*
	 * Do not queue to h/w when any of the following conditions is true:
	 * - there are pending frames in software queue
	 * - the TID is currently paused for ADDBA/BAR request
	 * - seqno is not within block-ack window
	 * - h/w queue depth exceeds low water mark
	 */
1490
	if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
1491
	    !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1492
	    txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1493
		/*
S
Sujith 已提交
1494 1495
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
1496
		 */
1497
		TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1498
		__skb_queue_tail(&tid->buf_q, skb);
1499 1500
		if (!txctl->an || !txctl->an->sleeping)
			ath_tx_queue_tid(txctl->txq, tid);
S
Sujith 已提交
1501 1502 1503
		return;
	}

1504 1505 1506 1507
	bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
	if (!bf)
		return;

1508
	bf->bf_state.bf_type = BUF_AMPDU;
F
Felix Fietkau 已提交
1509 1510 1511
	INIT_LIST_HEAD(&bf_head);
	list_add(&bf->list, &bf_head);

S
Sujith 已提交
1512
	/* Add sub-frame to BAW */
1513
	ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
S
Sujith 已提交
1514 1515

	/* Queue to h/w without aggregation */
1516
	TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
S
Sujith 已提交
1517
	bf->bf_lastbf = bf;
1518
	ath_tx_fill_desc(sc, bf, fi->framelen);
1519
	ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
S
Sujith 已提交
1520 1521
}

F
Felix Fietkau 已提交
1522
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1523
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1524
{
1525 1526
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
S
Sujith 已提交
1527 1528
	struct ath_buf *bf;

1529 1530 1531 1532 1533 1534 1535 1536 1537
	bf = fi->bf;
	if (!bf)
		bf = ath_tx_setup_buffer(sc, txq, tid, skb);

	if (!bf)
		return;

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1538
	bf->bf_state.bf_type = 0;
S
Sujith 已提交
1539 1540

	/* update starting sequence number for subsequent ADDBA request */
F
Felix Fietkau 已提交
1541 1542
	if (tid)
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
S
Sujith 已提交
1543

S
Sujith 已提交
1544
	bf->bf_lastbf = bf;
1545
	ath_tx_fill_desc(sc, bf, fi->framelen);
1546
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1547
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
}

static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;

	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
}

1573 1574
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
			     int framelen)
S
Sujith 已提交
1575 1576
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1577 1578
	struct ieee80211_sta *sta = tx_info->control.sta;
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1579
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1580
	struct ath_frame_info *fi = get_frame_info(skb);
1581
	struct ath_node *an = NULL;
1582
	enum ath9k_key_type keytype;
S
Sujith 已提交
1583

1584
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1585

1586 1587 1588
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

1589 1590 1591
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
1592 1593
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
1594 1595 1596 1597
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
S
Sujith 已提交
1598 1599
}

F
Felix Fietkau 已提交
1600
static int setup_tx_flags(struct sk_buff *skb)
S
Sujith 已提交
1601 1602 1603 1604 1605 1606 1607 1608 1609
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	int flags = 0;

	flags |= ATH9K_TXDESC_INTREQ;

	if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= ATH9K_TXDESC_NOACK;

F
Felix Fietkau 已提交
1610
	if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
L
Luis R. Rodriguez 已提交
1611 1612
		flags |= ATH9K_TXDESC_LDPC;

S
Sujith 已提交
1613 1614 1615 1616 1617 1618 1619 1620 1621
	return flags;
}

/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
1622
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
S
Sujith 已提交
1623 1624 1625
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
1626
	int streams;
S
Sujith 已提交
1627 1628

	/* find number of symbols: PLCP + data */
1629
	streams = HT_RC_2_STREAMS(rix);
S
Sujith 已提交
1630
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1631
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

1645 1646 1647 1648
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
1649 1650 1651
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
	    (curchan->channelFlags & CHANNEL_5GHZ) &&
	    (chainmask == 0x7) && (rate < 0x90))
1652 1653 1654 1655 1656
		return 0x3;
	else
		return chainmask;
}

1657
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
S
Sujith 已提交
1658
{
1659
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1660 1661 1662 1663
	struct ath9k_11n_rate_series series[4];
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
1664
	const struct ieee80211_rate *rate;
1665
	struct ieee80211_hdr *hdr;
1666 1667
	int i, flags = 0;
	u8 rix = 0, ctsrate = 0;
1668
	bool is_pspoll;
S
Sujith 已提交
1669 1670 1671

	memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);

S
Sujith 已提交
1672
	skb = bf->bf_mpdu;
S
Sujith 已提交
1673 1674
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;
1675 1676
	hdr = (struct ieee80211_hdr *)skb->data;
	is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
S
Sujith 已提交
1677 1678

	/*
1679 1680 1681
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
S
Sujith 已提交
1682
	 */
1683 1684
	rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
	ctsrate = rate->hw_value;
1685
	if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
1686
		ctsrate |= rate->hw_value_short;
S
Sujith 已提交
1687 1688

	for (i = 0; i < 4; i++) {
1689 1690 1691
		bool is_40, is_sgi, is_sp;
		int phy;

S
Sujith 已提交
1692 1693 1694 1695 1696 1697
		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
		series[i].Tries = rates[i].count;

1698
		    if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1699
			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
F
Felix Fietkau 已提交
1700 1701 1702 1703 1704 1705
			flags |= ATH9K_TXDESC_RTSENA;
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			flags |= ATH9K_TXDESC_CTSENA;
		}

1706 1707 1708 1709
		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			series[i].RateFlags |= ATH9K_RATESERIES_2040;
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
			series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
S
Sujith 已提交
1710

1711 1712 1713 1714 1715 1716 1717
		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
			series[i].Rate = rix | 0x80;
1718
			series[i].ChSel = ath_txchainmask_reduction(sc,
1719
					ah->txchainmask, series[i].Rate);
1720
			series[i].PktDuration = ath_pkt_duration(sc, rix, len,
1721
				 is_40, is_sgi, is_sp);
1722 1723
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
				series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1724 1725 1726
			continue;
		}

1727
		/* legacy rates */
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
		series[i].Rate = rate->hw_value;
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
				series[i].Rate |= rate->hw_value_short;
		} else {
			is_sp = false;
		}

1743
		if (bf->bf_state.bfs_paprd)
1744
			series[i].ChSel = ah->txchainmask;
1745 1746
		else
			series[i].ChSel = ath_txchainmask_reduction(sc,
1747
					ah->txchainmask, series[i].Rate);
1748

1749
		series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1750
			phy, rate->bitrate * 100, len, rix, is_sp);
1751 1752
	}

F
Felix Fietkau 已提交
1753
	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1754
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
F
Felix Fietkau 已提交
1755 1756 1757 1758 1759 1760
		flags &= ~ATH9K_TXDESC_RTSENA;

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
	if (flags & ATH9K_TXDESC_RTSENA)
		flags &= ~ATH9K_TXDESC_CTSENA;

S
Sujith 已提交
1761
	/* set dur_update_en for l-sig computation except for PS-Poll frames */
1762 1763
	ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
				     bf->bf_lastbf->bf_desc,
1764
				     !is_pspoll, ctsrate,
1765
				     0, series, 4, flags);
1766 1767 1768

}

1769 1770 1771 1772
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
1773
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
1774
					   struct ath_txq *txq,
1775
					   struct ath_atx_tid *tid,
1776
					   struct sk_buff *skb)
1777
{
F
Felix Fietkau 已提交
1778
	struct ath_hw *ah = sc->sc_ah;
F
Felix Fietkau 已提交
1779
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1780
	struct ath_frame_info *fi = get_frame_info(skb);
1781
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
1782
	struct ath_buf *bf;
F
Felix Fietkau 已提交
1783 1784
	struct ath_desc *ds;
	int frm_type;
1785
	u16 seqno;
F
Felix Fietkau 已提交
1786 1787 1788

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
J
Joe Perches 已提交
1789
		ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
1790
		goto error;
F
Felix Fietkau 已提交
1791
	}
1792

S
Sujith 已提交
1793
	ATH_TXBUF_RESET(bf);
1794

1795 1796 1797 1798 1799 1800 1801
	if (tid) {
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
		INCR(tid->seq_next, IEEE80211_SEQ_MAX);
		bf->bf_state.seqno = seqno;
	}

F
Felix Fietkau 已提交
1802
	bf->bf_flags = setup_tx_flags(skb);
1803
	bf->bf_mpdu = skb;
1804

B
Ben Greear 已提交
1805 1806 1807
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1808
		bf->bf_mpdu = NULL;
1809
		bf->bf_buf_addr = 0;
1810 1811
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
1812
		ath_tx_return_buffer(sc, bf);
1813
		goto error;
1814 1815
	}

S
Sujith 已提交
1816
	frm_type = get_hw_packet_type(skb);
1817 1818

	ds = bf->bf_desc;
1819 1820
	ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
			       fi->keyix, fi->keytype, bf->bf_flags);
S
Sujith 已提交
1821 1822

	ath9k_hw_filltxdesc(ah, ds,
1823 1824 1825
			    skb->len,	/* segment length */
			    true,	/* first segment */
			    true,	/* last segment */
1826
			    ds,		/* first descriptor */
1827
			    bf->bf_buf_addr,
F
Felix Fietkau 已提交
1828 1829
			    txq->axq_qnum);

1830
	fi->bf = bf;
F
Felix Fietkau 已提交
1831 1832

	return bf;
1833 1834 1835 1836

error:
	dev_kfree_skb_any(skb);
	return NULL;
F
Felix Fietkau 已提交
1837 1838 1839
}

/* FIXME: tx power */
1840
static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
F
Felix Fietkau 已提交
1841 1842 1843 1844
			     struct ath_tx_control *txctl)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1845
	struct ath_atx_tid *tid = NULL;
1846
	struct ath_buf *bf;
F
Felix Fietkau 已提交
1847
	u8 tidno;
1848

S
Sujith 已提交
1849
	spin_lock_bh(&txctl->txq->axq_lock);
1850 1851
	if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
		ieee80211_is_data_qos(hdr->frame_control)) {
1852 1853
		tidno = ieee80211_get_qos_ctl(hdr)[0] &
			IEEE80211_QOS_CTL_TID_MASK;
1854
		tid = ATH_AN_2_TID(txctl->an, tidno);
1855

1856
		WARN_ON(tid->ac->txq != txctl->txq);
1857 1858 1859
	}

	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
F
Felix Fietkau 已提交
1860 1861 1862 1863
		/*
		 * Try aggregation if it's a unicast data frame
		 * and the destination is HT capable.
		 */
1864
		ath_tx_send_ampdu(sc, tid, skb, txctl);
1865
	} else {
1866 1867 1868
		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
		if (!bf)
			goto out;
F
Felix Fietkau 已提交
1869

F
Felix Fietkau 已提交
1870 1871
		bf->bf_state.bfs_paprd = txctl->paprd;

1872
		if (bf->bf_state.bfs_paprd)
F
Felix Fietkau 已提交
1873 1874
			ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
						   bf->bf_state.bfs_paprd);
1875

1876 1877 1878
		if (txctl->paprd)
			bf->bf_state.bfs_paprd_timestamp = jiffies;

1879
		ath_tx_send_normal(sc, txctl->txq, tid, skb);
1880
	}
S
Sujith 已提交
1881

1882
out:
S
Sujith 已提交
1883
	spin_unlock_bh(&txctl->txq->axq_lock);
1884 1885
}

1886
/* Upon failure caller should free skb */
1887
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
S
Sujith 已提交
1888
		 struct ath_tx_control *txctl)
1889
{
1890 1891
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1892
	struct ieee80211_sta *sta = info->control.sta;
1893
	struct ieee80211_vif *vif = info->control.vif;
1894
	struct ath_softc *sc = hw->priv;
1895
	struct ath_txq *txq = txctl->txq;
1896
	int padpos, padsize;
F
Felix Fietkau 已提交
1897
	int frmlen = skb->len + FCS_LEN;
1898
	int q;
1899

1900 1901 1902 1903
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;

F
Felix Fietkau 已提交
1904 1905 1906
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

1907
	/*
S
Sujith 已提交
1908 1909 1910
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
1911
	 */
S
Sujith 已提交
1912 1913 1914 1915 1916
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1917 1918
	}

S
Sujith 已提交
1919
	/* Add the padding after the header if this is not already done */
1920 1921
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
1922 1923 1924 1925
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;

S
Sujith 已提交
1926
		skb_push(skb, padsize);
1927
		memmove(skb->data, skb->data + padsize, padpos);
1928 1929
	}

1930 1931 1932 1933 1934
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

1935 1936 1937 1938 1939 1940 1941
	setup_frame_info(hw, skb, frmlen);

	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

1942 1943 1944 1945
	q = skb_get_queue_mapping(skb);
	spin_lock_bh(&txq->axq_lock);
	if (txq == sc->tx.txq_map[q] &&
	    ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1946
		ieee80211_stop_queue(sc->hw, q);
1947
		txq->stopped = 1;
1948
	}
1949
	spin_unlock_bh(&txq->axq_lock);
1950

1951 1952
	ath_tx_start_dma(sc, skb, txctl);
	return 0;
1953 1954
}

S
Sujith 已提交
1955 1956 1957
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
1958

S
Sujith 已提交
1959
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1960
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
1961
{
S
Sujith 已提交
1962 1963
	struct ieee80211_hw *hw = sc->hw;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1964
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1965
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1966
	int q, padpos, padsize;
S
Sujith 已提交
1967

J
Joe Perches 已提交
1968
	ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
1969

1970
	if (tx_flags & ATH_TX_BAR)
S
Sujith 已提交
1971 1972
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;

1973
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
1974 1975
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
1976

1977 1978 1979
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
S
Sujith 已提交
1980 1981 1982 1983
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
1984
		memmove(skb->data + padsize, skb->data, padpos);
S
Sujith 已提交
1985 1986
		skb_pull(skb, padsize);
	}
S
Sujith 已提交
1987

S
Sujith 已提交
1988 1989
	if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
J
Joe Perches 已提交
1990 1991
		ath_dbg(common, ATH_DBG_PS,
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
1992 1993 1994 1995
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
1996 1997
	}

1998 1999 2000 2001 2002
	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.txq_map[q]) {
		spin_lock_bh(&txq->axq_lock);
		if (WARN_ON(--txq->pending_frames < 0))
			txq->pending_frames = 0;
2003

2004 2005 2006
		if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
			ieee80211_wake_queue(sc->hw, q);
			txq->stopped = 0;
2007
		}
2008
		spin_unlock_bh(&txq->axq_lock);
2009
	}
2010 2011

	ieee80211_tx_status(hw, skb);
S
Sujith 已提交
2012
}
2013

S
Sujith 已提交
2014
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2015 2016
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar)
2017
{
S
Sujith 已提交
2018 2019
	struct sk_buff *skb = bf->bf_mpdu;
	unsigned long flags;
2020
	int tx_flags = 0;
2021

S
Sujith 已提交
2022
	if (sendbar)
2023
		tx_flags = ATH_TX_BAR;
2024

2025
	if (!txok)
2026
		tx_flags |= ATH_TX_ERROR;
2027

B
Ben Greear 已提交
2028
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2029
	bf->bf_buf_addr = 0;
2030 2031

	if (bf->bf_state.bfs_paprd) {
2032 2033 2034
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2035
			dev_kfree_skb_any(skb);
2036
		else
2037
			complete(&sc->paprd_complete);
2038
	} else {
2039
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2040
		ath_tx_complete(sc, skb, tx_flags, txq);
2041
	}
2042 2043 2044 2045
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2046 2047 2048 2049 2050 2051 2052

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2053 2054
}

F
Felix Fietkau 已提交
2055 2056 2057
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
			     int txok, bool update_rc)
2058
{
S
Sujith 已提交
2059
	struct sk_buff *skb = bf->bf_mpdu;
2060
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2061
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2062
	struct ieee80211_hw *hw = sc->hw;
2063
	struct ath_hw *ah = sc->sc_ah;
2064
	u8 i, tx_rateindex;
2065

S
Sujith 已提交
2066
	if (txok)
2067
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2068

2069
	tx_rateindex = ts->ts_rateindex;
2070 2071
	WARN_ON(tx_rateindex >= hw->max_rates);

2072
	if (ts->ts_status & ATH9K_TXERR_FILT)
S
Sujith 已提交
2073
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2074
	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
2075
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2076

2077
		BUG_ON(nbad > nframes);
2078

2079 2080
		tx_info->status.ampdu_len = nframes;
		tx_info->status.ampdu_ack_len = nframes - nbad;
2081 2082
	}

2083
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2084
	    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
		if (ieee80211_is_data(hdr->frame_control) &&
		    (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                     ATH9K_TX_DELIM_UNDERRUN)) &&
2100
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2101 2102
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2103
	}
2104

2105
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2106
		tx_info->status.rates[i].count = 0;
2107 2108
		tx_info->status.rates[i].idx = -1;
	}
2109

2110
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2111 2112
}

2113 2114 2115
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
R
Rajkumar Manoharan 已提交
2116 2117
	__releases(txq->axq_lock)
	__acquires(txq->axq_lock)
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
{
	int txok;

	txq->axq_depth--;
	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	txq->axq_tx_inprogress = false;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	spin_unlock_bh(&txq->axq_lock);

	if (!bf_isampdu(bf)) {
		ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);

	spin_lock_bh(&txq->axq_lock);

	if (sc->sc_flags & SC_OP_TXAGGR)
		ath_txq_schedule(sc, txq);
}

S
Sujith 已提交
2141
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2142
{
2143
	struct ath_hw *ah = sc->sc_ah;
2144
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2145
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2146
	struct list_head bf_head;
S
Sujith 已提交
2147
	struct ath_desc *ds;
2148
	struct ath_tx_status ts;
S
Sujith 已提交
2149
	int status;
2150

J
Joe Perches 已提交
2151 2152 2153
	ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2154

2155
	spin_lock_bh(&txq->axq_lock);
2156
	for (;;) {
2157 2158 2159
		if (work_pending(&sc->hw_reset_work))
			break;

2160 2161
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2162
			if (sc->sc_flags & SC_OP_TXAGGR)
B
Ben Greear 已提交
2163
				ath_txq_schedule(sc, txq);
2164 2165 2166 2167
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2168 2169 2170 2171 2172 2173 2174 2175 2176
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
S
Sujith 已提交
2177
		if (bf->bf_stale) {
S
Sujith 已提交
2178
			bf_held = bf;
2179
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2180
				break;
2181 2182 2183

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2184 2185 2186
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2187
		ds = lastbf->bf_desc;
2188

2189 2190
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2191
		if (status == -EINPROGRESS)
S
Sujith 已提交
2192
			break;
2193

2194
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2195

S
Sujith 已提交
2196 2197 2198 2199 2200
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
S
Sujith 已提交
2201
		lastbf->bf_stale = true;
S
Sujith 已提交
2202 2203 2204 2205
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2206

2207
		if (bf_held) {
2208 2209
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2210
		}
2211

2212
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2213
	}
2214
	spin_unlock_bh(&txq->axq_lock);
2215 2216
}

S
Sujith 已提交
2217
static void ath_tx_complete_poll_work(struct work_struct *work)
2218 2219 2220 2221 2222 2223
{
	struct ath_softc *sc = container_of(work, struct ath_softc,
			tx_complete_work.work);
	struct ath_txq *txq;
	int i;
	bool needreset = false;
2224 2225 2226
#ifdef CONFIG_ATH9K_DEBUGFS
	sc->tx_complete_poll_work_seen++;
#endif
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i)) {
			txq = &sc->tx.txq[i];
			spin_lock_bh(&txq->axq_lock);
			if (txq->axq_depth) {
				if (txq->axq_tx_inprogress) {
					needreset = true;
					spin_unlock_bh(&txq->axq_lock);
					break;
				} else {
					txq->axq_tx_inprogress = true;
				}
			}
			spin_unlock_bh(&txq->axq_lock);
		}

	if (needreset) {
J
Joe Perches 已提交
2245 2246
		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
			"tx hung, resetting the chip\n");
2247
		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
2248 2249
	}

2250
	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2251 2252 2253 2254
			msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
}


2255

S
Sujith 已提交
2256
void ath_tx_tasklet(struct ath_softc *sc)
2257
{
S
Sujith 已提交
2258 2259
	int i;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2260

S
Sujith 已提交
2261
	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2262

S
Sujith 已提交
2263 2264 2265
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2266 2267 2268
	}
}

2269 2270
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2271
	struct ath_tx_status ts;
2272 2273 2274 2275 2276 2277 2278 2279
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
	int status;

	for (;;) {
2280 2281 2282
		if (work_pending(&sc->hw_reset_work))
			break;

2283
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2284 2285 2286
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
J
Joe Perches 已提交
2287 2288
			ath_dbg(common, ATH_DBG_XMIT,
				"Error processing tx status\n");
2289 2290 2291 2292
			break;
		}

		/* Skip beacon completions */
2293
		if (ts.qid == sc->beacon.beaconq)
2294 2295
			continue;

2296
		txq = &sc->tx.txq[ts.qid];
2297 2298

		spin_lock_bh(&txq->axq_lock);
2299

2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
			spin_unlock_bh(&txq->axq_lock);
			return;
		}

		bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
				      struct ath_buf, list);
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
		list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
				  &lastbf->list);

2313 2314
		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2315

2316 2317
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2318

2319 2320 2321 2322 2323 2324
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
		}
2325

2326
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2327 2328 2329 2330
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
2331 2332 2333
/*****************/
/* Init, Cleanup */
/*****************/
2334

2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

static void ath_tx_edma_cleanup(struct ath_softc *sc)
{
	struct ath_descdma *dd = &sc->txsdma;

	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
}

S
Sujith 已提交
2370
int ath_tx_init(struct ath_softc *sc, int nbufs)
2371
{
2372
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2373
	int error = 0;
2374

2375
	spin_lock_init(&sc->tx.txbuflock);
2376

2377
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2378
				  "tx", nbufs, 1, 1);
2379
	if (error != 0) {
2380 2381
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2382 2383
		goto err;
	}
2384

2385
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2386
				  "beacon", ATH_BCBUF, 1, 1);
2387
	if (error != 0) {
2388 2389
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2390 2391
		goto err;
	}
2392

2393 2394
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2395 2396 2397 2398 2399 2400
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		error = ath_tx_edma_init(sc);
		if (error)
			goto err;
	}

2401
err:
S
Sujith 已提交
2402 2403
	if (error != 0)
		ath_tx_cleanup(sc);
2404

S
Sujith 已提交
2405
	return error;
2406 2407
}

2408
void ath_tx_cleanup(struct ath_softc *sc)
S
Sujith 已提交
2409 2410 2411 2412 2413 2414
{
	if (sc->beacon.bdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);

	if (sc->tx.txdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2415 2416 2417

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_tx_edma_cleanup(sc);
S
Sujith 已提交
2418
}
2419 2420 2421

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2422 2423 2424
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2425

2426
	for (tidno = 0, tid = &an->tid[tidno];
2427 2428 2429 2430 2431 2432 2433 2434
	     tidno < WME_NUM_TID;
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2435
		tid->paused    = false;
2436
		tid->state &= ~AGGR_CLEANUP;
2437
		__skb_queue_head_init(&tid->buf_q);
2438
		acno = TID_TO_WME_AC(tidno);
2439
		tid->ac = &an->ac[acno];
2440 2441
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_ADDBA_PROGRESS;
2442
	}
2443

2444
	for (acno = 0, ac = &an->ac[acno];
2445 2446
	     acno < WME_NUM_AC; acno++, ac++) {
		ac->sched    = false;
2447
		ac->txq = sc->tx.txq_map[acno];
2448
		INIT_LIST_HEAD(&ac->tid_q);
2449 2450 2451
	}
}

S
Sujith 已提交
2452
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2453
{
2454 2455
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2456
	struct ath_txq *txq;
2457
	int tidno;
S
Sujith 已提交
2458

2459 2460
	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {
2461

2462
		ac = tid->ac;
2463
		txq = ac->txq;
2464

2465 2466 2467 2468 2469 2470 2471 2472 2473 2474
		spin_lock_bh(&txq->axq_lock);

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2475
		}
2476 2477 2478 2479 2480 2481

		ath_tid_drain(sc, txq, tid);
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_CLEANUP;

		spin_unlock_bh(&txq->axq_lock);
2482 2483
	}
}