xmit.c 63.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35

#define BITS_PER_BYTE           8
#define OFDM_PLCP_BITS          22
#define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
#define L_STF                   8
#define L_LTF                   8
#define L_SIG                   4
#define HT_SIG                  8
#define HT_STF                  4
#define HT_LTF(_ns)             (4 * (_ns))
#define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)


36
static u16 bits_per_symbol[][2] = {
37 38 39 40 41 42 43 44 45 46 47 48 49
	/* 20MHz 40MHz */
	{    26,   54 },     /*  0: BPSK */
	{    52,  108 },     /*  1: QPSK 1/2 */
	{    78,  162 },     /*  2: QPSK 3/4 */
	{   104,  216 },     /*  3: 16-QAM 1/2 */
	{   156,  324 },     /*  4: 16-QAM 3/4 */
	{   208,  432 },     /*  5: 64-QAM 2/3 */
	{   234,  486 },     /*  6: 64-QAM 3/4 */
	{   260,  540 },     /*  7: 64-QAM 5/6 */
};

#define IS_HT_RATE(_rate)     ((_rate) & 0x80)

F
Felix Fietkau 已提交
50
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 52 53
			       struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq);
S
Sujith 已提交
54
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 56
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar);
57
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58
			     struct list_head *head, bool internal);
F
Felix Fietkau 已提交
59 60
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
61
			     int txok);
62 63
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno);
64 65 66 67
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
					   struct ath_txq *txq,
					   struct ath_atx_tid *tid,
					   struct sk_buff *skb);
68

69
enum {
70 71
	MCS_HT20,
	MCS_HT20_SGI,
72 73 74 75
	MCS_HT40,
	MCS_HT40_SGI,
};

76 77 78 79 80 81 82 83 84 85 86 87
static int ath_max_4ms_framelen[4][32] = {
	[MCS_HT20] = {
		3212,  6432,  9648,  12864,  19300,  25736,  28952,  32172,
		6424,  12852, 19280, 25708,  38568,  51424,  57852,  64280,
		9628,  19260, 28896, 38528,  57792,  65532,  65532,  65532,
		12828, 25656, 38488, 51320,  65532,  65532,  65532,  65532,
	},
	[MCS_HT20_SGI] = {
		3572,  7144,  10720,  14296,  21444,  28596,  32172,  35744,
		7140,  14284, 21428,  28568,  42856,  57144,  64288,  65532,
		10700, 21408, 32112,  42816,  64228,  65532,  65532,  65532,
		14256, 28516, 42780,  57040,  65532,  65532,  65532,  65532,
88 89
	},
	[MCS_HT40] = {
90 91 92 93
		6680,  13360,  20044,  26724,  40092,  53456,  60140,  65532,
		13348, 26700,  40052,  53400,  65532,  65532,  65532,  65532,
		20004, 40008,  60016,  65532,  65532,  65532,  65532,  65532,
		26644, 53292,  65532,  65532,  65532,  65532,  65532,  65532,
94 95
	},
	[MCS_HT40_SGI] = {
96 97 98 99
		7420,  14844,  22272,  29696,  44544,  59396,  65532,  65532,
		14832, 29668,  44504,  59340,  65532,  65532,  65532,  65532,
		22232, 44464,  65532,  65532,  65532,  65532,  65532,  65532,
		29616, 59232,  65532,  65532,  65532,  65532,  65532,  65532,
100 101 102
	}
};

S
Sujith 已提交
103 104 105
/*********************/
/* Aggregation logic */
/*********************/
106

S
Sujith 已提交
107
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
S
Sujith 已提交
108
{
S
Sujith 已提交
109
	struct ath_atx_ac *ac = tid->ac;
S
Sujith 已提交
110

S
Sujith 已提交
111 112
	if (tid->paused)
		return;
S
Sujith 已提交
113

S
Sujith 已提交
114 115
	if (tid->sched)
		return;
S
Sujith 已提交
116

S
Sujith 已提交
117 118
	tid->sched = true;
	list_add_tail(&tid->list, &ac->tid_q);
S
Sujith 已提交
119

S
Sujith 已提交
120 121
	if (ac->sched)
		return;
122

S
Sujith 已提交
123 124 125
	ac->sched = true;
	list_add_tail(&ac->list, &txq->axq_acq);
}
126

S
Sujith 已提交
127
static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128
{
129
	struct ath_txq *txq = tid->ac->txq;
130

131
	WARN_ON(!tid->paused);
132

133 134
	spin_lock_bh(&txq->axq_lock);
	tid->paused = false;
135

136
	if (skb_queue_empty(&tid->buf_q))
S
Sujith 已提交
137
		goto unlock;
138

S
Sujith 已提交
139 140 141 142
	ath_tx_queue_tid(txq, tid);
	ath_txq_schedule(sc, txq);
unlock:
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
143
}
144

145
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
146 147
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
148 149 150
	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
		     sizeof(tx_info->rate_driver_data));
	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
151 152
}

S
Sujith 已提交
153
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
S
Sujith 已提交
154
{
155
	struct ath_txq *txq = tid->ac->txq;
156
	struct sk_buff *skb;
S
Sujith 已提交
157 158
	struct ath_buf *bf;
	struct list_head bf_head;
159
	struct ath_tx_status ts;
160
	struct ath_frame_info *fi;
161

162
	INIT_LIST_HEAD(&bf_head);
163

164
	memset(&ts, 0, sizeof(ts));
165
	spin_lock_bh(&txq->axq_lock);
166

167 168 169 170
	while ((skb = __skb_dequeue(&tid->buf_q))) {
		fi = get_frame_info(skb);
		bf = fi->bf;

171
		spin_unlock_bh(&txq->axq_lock);
172 173
		if (bf && fi->retries) {
			list_add_tail(&bf->list, &bf_head);
174
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
175
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
176
		} else {
177
			ath_tx_send_normal(sc, txq, NULL, skb);
178
		}
179
		spin_lock_bh(&txq->axq_lock);
S
Sujith 已提交
180
	}
181

182 183 184 185 186
	if (tid->baw_head == tid->baw_tail) {
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_CLEANUP;
	}

S
Sujith 已提交
187
	spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
188
}
189

S
Sujith 已提交
190 191
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
			      int seqno)
S
Sujith 已提交
192
{
S
Sujith 已提交
193
	int index, cindex;
194

S
Sujith 已提交
195 196
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
197

198
	__clear_bit(cindex, tid->tx_buf);
S
Sujith 已提交
199

200
	while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
S
Sujith 已提交
201 202 203
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
	}
S
Sujith 已提交
204
}
205

S
Sujith 已提交
206
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
207
			     u16 seqno)
S
Sujith 已提交
208
{
S
Sujith 已提交
209
	int index, cindex;
S
Sujith 已提交
210

211
	index  = ATH_BA_INDEX(tid->seq_start, seqno);
S
Sujith 已提交
212
	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
213
	__set_bit(cindex, tid->tx_buf);
214

S
Sujith 已提交
215 216 217 218
	if (index >= ((tid->baw_tail - tid->baw_head) &
		(ATH_TID_MAX_BUFS - 1))) {
		tid->baw_tail = cindex;
		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
219 220 221 222
	}
}

/*
S
Sujith 已提交
223 224 225 226
 * TODO: For frame(s) that are in the retry state, we will reuse the
 * sequence number(s) without setting the retry bit. The
 * alternative is to give up on these and BAR the receiver's window
 * forward.
227
 */
S
Sujith 已提交
228 229
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
			  struct ath_atx_tid *tid)
230 231

{
232
	struct sk_buff *skb;
S
Sujith 已提交
233 234
	struct ath_buf *bf;
	struct list_head bf_head;
235
	struct ath_tx_status ts;
236
	struct ath_frame_info *fi;
237 238

	memset(&ts, 0, sizeof(ts));
S
Sujith 已提交
239
	INIT_LIST_HEAD(&bf_head);
240

241 242 243
	while ((skb = __skb_dequeue(&tid->buf_q))) {
		fi = get_frame_info(skb);
		bf = fi->bf;
244

245 246 247 248 249 250 251
		if (!bf) {
			spin_unlock(&txq->axq_lock);
			ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
			spin_lock(&txq->axq_lock);
			continue;
		}

252
		list_add_tail(&bf->list, &bf_head);
253

254
		if (fi->retries)
255
			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
256

S
Sujith 已提交
257
		spin_unlock(&txq->axq_lock);
258
		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
S
Sujith 已提交
259 260
		spin_lock(&txq->axq_lock);
	}
261

S
Sujith 已提交
262 263
	tid->seq_next = tid->seq_start;
	tid->baw_tail = tid->baw_head;
264 265
}

S
Sujith 已提交
266
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
267
			     struct sk_buff *skb, int count)
268
{
269
	struct ath_frame_info *fi = get_frame_info(skb);
270
	struct ath_buf *bf = fi->bf;
S
Sujith 已提交
271
	struct ieee80211_hdr *hdr;
272
	int prev = fi->retries;
273

S
Sujith 已提交
274
	TX_STAT_INC(txq->axq_qnum, a_retries);
275 276 277
	fi->retries += count;

	if (prev > 0)
278
		return;
279

S
Sujith 已提交
280 281
	hdr = (struct ieee80211_hdr *)skb->data;
	hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
282 283
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
		sizeof(*hdr), DMA_TO_DEVICE);
284 285
}

286
static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
S
Sujith 已提交
287
{
288
	struct ath_buf *bf = NULL;
S
Sujith 已提交
289 290

	spin_lock_bh(&sc->tx.txbuflock);
291 292

	if (unlikely(list_empty(&sc->tx.txbuf))) {
293 294 295
		spin_unlock_bh(&sc->tx.txbuflock);
		return NULL;
	}
296 297 298 299

	bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
	list_del(&bf->list);

S
Sujith 已提交
300 301
	spin_unlock_bh(&sc->tx.txbuflock);

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	return bf;
}

static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
{
	spin_lock_bh(&sc->tx.txbuflock);
	list_add_tail(&bf->list, &sc->tx.txbuf);
	spin_unlock_bh(&sc->tx.txbuflock);
}

static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
{
	struct ath_buf *tbf;

	tbf = ath_tx_get_buffer(sc);
	if (WARN_ON(!tbf))
		return NULL;

S
Sujith 已提交
320 321 322 323
	ATH_TXBUF_RESET(tbf);

	tbf->bf_mpdu = bf->bf_mpdu;
	tbf->bf_buf_addr = bf->bf_buf_addr;
324
	memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
S
Sujith 已提交
325 326 327 328 329
	tbf->bf_state = bf->bf_state;

	return tbf;
}

330 331 332 333
static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
			        struct ath_tx_status *ts, int txok,
			        int *nframes, int *nbad)
{
334
	struct ath_frame_info *fi;
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	u16 seq_st = 0;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int ba_index;
	int isaggr = 0;

	*nbad = 0;
	*nframes = 0;

	isaggr = bf_isaggr(bf);
	if (isaggr) {
		seq_st = ts->ts_seqnum;
		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
	}

	while (bf) {
350
		fi = get_frame_info(bf->bf_mpdu);
351
		ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
352 353 354 355 356 357 358 359 360 361

		(*nframes)++;
		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
			(*nbad)++;

		bf = bf->bf_next;
	}
}


S
Sujith 已提交
362 363
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
364
				 struct ath_tx_status *ts, int txok, bool retry)
365
{
S
Sujith 已提交
366 367
	struct ath_node *an = NULL;
	struct sk_buff *skb;
368
	struct ieee80211_sta *sta;
F
Felix Fietkau 已提交
369
	struct ieee80211_hw *hw = sc->hw;
370
	struct ieee80211_hdr *hdr;
371
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
372
	struct ath_atx_tid *tid = NULL;
S
Sujith 已提交
373
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
374 375
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
376
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
377
	u32 ba[WME_BA_BMP_SIZE >> 5];
378 379
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
	bool rc_update = true;
380
	struct ieee80211_tx_rate rates[4];
381
	struct ath_frame_info *fi;
382
	int nframes;
383
	u8 tidno;
384
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
385
	int i, retries;
386

S
Sujith 已提交
387
	skb = bf->bf_mpdu;
388 389
	hdr = (struct ieee80211_hdr *)skb->data;

390 391
	tx_info = IEEE80211_SKB_CB(skb);

392 393
	memcpy(rates, tx_info->control.rates, sizeof(rates));

394 395 396 397
	retries = ts->ts_longretry + 1;
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

398
	rcu_read_lock();
399

400
	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
401 402
	if (!sta) {
		rcu_read_unlock();
403

404 405 406 407
		INIT_LIST_HEAD(&bf_head);
		while (bf) {
			bf_next = bf->bf_next;

408
			if (!bf->bf_stale || bf_next != NULL)
409 410 411 412 413 414 415
				list_move_tail(&bf->list, &bf_head);

			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				0, 0);

			bf = bf_next;
		}
416
		return;
417 418
	}

419
	an = (struct ath_node *)sta->drv_priv;
420 421
	tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
	tid = ATH_AN_2_TID(an, tidno);
422

423 424 425 426 427
	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
	 */
428
	if (tidno != ts->tid)
429 430
		txok = false;

S
Sujith 已提交
431
	isaggr = bf_isaggr(bf);
S
Sujith 已提交
432
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
433

S
Sujith 已提交
434
	if (isaggr && txok) {
435 436 437
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
S
Sujith 已提交
438
		} else {
S
Sujith 已提交
439 440 441 442 443 444 445
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
446
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
S
Sujith 已提交
447
				needreset = 1;
S
Sujith 已提交
448
		}
449 450
	}

451
	__skb_queue_head_init(&bf_pending);
452

453
	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
S
Sujith 已提交
454
	while (bf) {
455 456
		u16 seqno = bf->bf_state.seqno;

457
		txfail = txpending = sendbar = 0;
S
Sujith 已提交
458
		bf_next = bf->bf_next;
459

460 461
		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
462
		fi = get_frame_info(skb);
463

464
		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
S
Sujith 已提交
465 466
			/* transmit completion, subframe is
			 * acked by block ack */
467
			acked_cnt++;
S
Sujith 已提交
468 469
		} else if (!isaggr && txok) {
			/* transmit completion */
470
			acked_cnt++;
S
Sujith 已提交
471
		} else {
472
			if ((tid->state & AGGR_CLEANUP) || !retry) {
S
Sujith 已提交
473 474 475 476 477
				/*
				 * cleanup in progress, just fail
				 * the un-acked sub-frames
				 */
				txfail = 1;
478 479
			} else if (flush) {
				txpending = 1;
480
			} else if (fi->retries < ATH_MAX_SW_RETRIES) {
481
				if (txok || !an->sleeping)
482 483
					ath_tx_set_retry(sc, txq, bf->bf_mpdu,
							 retries);
484 485 486 487 488 489

				txpending = 1;
			} else {
				txfail = 1;
				sendbar = 1;
				txfail_cnt++;
S
Sujith 已提交
490 491
			}
		}
492

493 494 495 496
		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
497 498 499
		INIT_LIST_HEAD(&bf_head);
		if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
		    bf_next != NULL || !bf_last->bf_stale)
S
Sujith 已提交
500
			list_move_tail(&bf->list, &bf_head);
501

502
		if (!txpending || (tid->state & AGGR_CLEANUP)) {
S
Sujith 已提交
503 504 505 506 507
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
			spin_lock_bh(&txq->axq_lock);
508
			ath_tx_update_baw(sc, tid, seqno);
S
Sujith 已提交
509
			spin_unlock_bh(&txq->axq_lock);
510

511
			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
512
				memcpy(tx_info->control.rates, rates, sizeof(rates));
513
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
514 515 516
				rc_update = false;
			}

517 518
			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				!txfail, sendbar);
S
Sujith 已提交
519
		} else {
S
Sujith 已提交
520
			/* retry the un-acked ones */
521 522 523 524 525 526 527 528 529 530 531 532
			if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
				if (bf->bf_next == NULL && bf_last->bf_stale) {
					struct ath_buf *tbf;

					tbf = ath_clone_txbuf(sc, bf_last);
					/*
					 * Update tx baw and complete the
					 * frame with failed status if we
					 * run out of tx buf.
					 */
					if (!tbf) {
						spin_lock_bh(&txq->axq_lock);
533
						ath_tx_update_baw(sc, tid, seqno);
534 535 536 537
						spin_unlock_bh(&txq->axq_lock);

						ath_tx_complete_buf(sc, bf, txq,
								    &bf_head,
538 539
								    ts, 0,
								    !flush);
540 541 542
						break;
					}

543
					fi->bf = tbf;
544
				}
S
Sujith 已提交
545 546 547 548 549 550
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
551
			__skb_queue_tail(&bf_pending, skb);
S
Sujith 已提交
552 553 554
		}

		bf = bf_next;
555 556
	}

557
	/* prepend un-acked frames to the beginning of the pending frame queue */
558
	if (!skb_queue_empty(&bf_pending)) {
559
		if (an->sleeping)
560
			ieee80211_sta_set_buffered(sta, tid->tidno, true);
561

562
		spin_lock_bh(&txq->axq_lock);
563
		skb_queue_splice(&bf_pending, &tid->buf_q);
564
		if (!an->sleeping) {
565
			ath_tx_queue_tid(txq, tid);
566 567 568 569

			if (ts->ts_status & ATH9K_TXERR_FILT)
				tid->ac->clear_ps_filter = true;
		}
570 571 572
		spin_unlock_bh(&txq->axq_lock);
	}

573
	if (tid->state & AGGR_CLEANUP)
574 575
		ath_tx_flush_tid(sc, tid);

576 577
	rcu_read_unlock();

578 579
	if (needreset) {
		RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
580
		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
581
	}
S
Sujith 已提交
582
}
583

584 585 586 587 588 589 590 591 592 593 594
static bool ath_lookup_legacy(struct ath_buf *bf)
{
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	int i;

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;

595 596 597 598
	for (i = 0; i < 4; i++) {
		if (!rates[i].count || rates[i].idx < 0)
			break;

599 600 601 602 603 604 605
		if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
			return true;
	}

	return false;
}

S
Sujith 已提交
606 607
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
			   struct ath_atx_tid *tid)
608
{
S
Sujith 已提交
609 610
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
611
	struct ieee80211_tx_rate *rates;
612
	struct ath_mci_profile *mci = &sc->btcoex.mci;
S
Sujith 已提交
613
	u32 max_4ms_framelen, frmlen;
614
	u16 aggr_limit, legacy = 0;
S
Sujith 已提交
615
	int i;
S
Sujith 已提交
616

S
Sujith 已提交
617
	skb = bf->bf_mpdu;
S
Sujith 已提交
618
	tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
619
	rates = tx_info->control.rates;
S
Sujith 已提交
620

S
Sujith 已提交
621 622 623 624 625 626
	/*
	 * Find the lowest frame length among the rate series that will have a
	 * 4ms transmit duration.
	 * TODO - TXOP limit needs to be considered.
	 */
	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
S
Sujith 已提交
627

S
Sujith 已提交
628 629
	for (i = 0; i < 4; i++) {
		if (rates[i].count) {
630 631
			int modeidx;
			if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
S
Sujith 已提交
632 633 634 635
				legacy = 1;
				break;
			}

636
			if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
637 638
				modeidx = MCS_HT40;
			else
639 640 641 642
				modeidx = MCS_HT20;

			if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
				modeidx++;
643 644

			frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
S
Sujith 已提交
645
			max_4ms_framelen = min(max_4ms_framelen, frmlen);
646 647
		}
	}
S
Sujith 已提交
648

649
	/*
S
Sujith 已提交
650 651 652
	 * limit aggregate size by the minimum rate if rate selected is
	 * not a probe rate, if rate selected is a probe rate then
	 * avoid aggregation of this packet.
653
	 */
S
Sujith 已提交
654 655
	if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
		return 0;
656

657 658 659
	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
		aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
	else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
660 661 662 663 664
		aggr_limit = min((max_4ms_framelen * 3) / 8,
				 (u32)ATH_AMPDU_LIMIT_MAX);
	else
		aggr_limit = min(max_4ms_framelen,
				 (u32)ATH_AMPDU_LIMIT_MAX);
665

S
Sujith 已提交
666
	/*
L
Lucas De Marchi 已提交
667 668
	 * h/w can accept aggregates up to 16 bit lengths (65535).
	 * The IE, however can hold up to 65536, which shows up here
S
Sujith 已提交
669
	 * as zero. Ignore 65536 since we  are constrained by hw.
670
	 */
671 672
	if (tid->an->maxampdu)
		aggr_limit = min(aggr_limit, tid->an->maxampdu);
673

S
Sujith 已提交
674 675
	return aggr_limit;
}
676

S
Sujith 已提交
677
/*
S
Sujith 已提交
678
 * Returns the number of delimiters to be added to
S
Sujith 已提交
679 680 681
 * meet the minimum required mpdudensity.
 */
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
682 683
				  struct ath_buf *bf, u16 frmlen,
				  bool first_subfrm)
S
Sujith 已提交
684
{
685
#define FIRST_DESC_NDELIMS 60
S
Sujith 已提交
686 687
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
688
	u32 nsymbits, nsymbols;
S
Sujith 已提交
689
	u16 minlen;
690
	u8 flags, rix;
691
	int width, streams, half_gi, ndelim, mindelim;
692
	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
S
Sujith 已提交
693 694 695

	/* Select standard number of delimiters based on frame length alone */
	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
696 697

	/*
S
Sujith 已提交
698 699 700 701
	 * If encryption enabled, hardware requires some more padding between
	 * subframes.
	 * TODO - this could be improved to be dependent on the rate.
	 *      The hardware can keep up at lower rates, but not higher rates
702
	 */
703 704
	if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
	    !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
S
Sujith 已提交
705
		ndelim += ATH_AGGR_ENCRYPTDELIM;
706

707 708 709 710
	/*
	 * Add delimiter when using RTS/CTS with aggregation
	 * and non enterprise AR9003 card
	 */
711 712
	if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
	    (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
713 714
		ndelim = max(ndelim, FIRST_DESC_NDELIMS);

S
Sujith 已提交
715 716 717 718 719
	/*
	 * Convert desired mpdu density from microeconds to bytes based
	 * on highest rate in rate series (i.e. first rate) to determine
	 * required minimum length for subframe. Take into account
	 * whether high rate is 20 or 40Mhz and half or full GI.
720
	 *
S
Sujith 已提交
721 722 723
	 * If there is no mpdu density restriction, no further calculation
	 * is needed.
	 */
724 725

	if (tid->an->mpdudensity == 0)
S
Sujith 已提交
726
		return ndelim;
727

S
Sujith 已提交
728 729 730 731
	rix = tx_info->control.rates[0].idx;
	flags = tx_info->control.rates[0].flags;
	width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
	half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
732

S
Sujith 已提交
733
	if (half_gi)
734
		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
S
Sujith 已提交
735
	else
736
		nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
737

S
Sujith 已提交
738 739
	if (nsymbols == 0)
		nsymbols = 1;
740

741 742
	streams = HT_RC_2_STREAMS(rix);
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
S
Sujith 已提交
743
	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
744

S
Sujith 已提交
745 746 747
	if (frmlen < minlen) {
		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
		ndelim = max(mindelim, ndelim);
748 749
	}

S
Sujith 已提交
750
	return ndelim;
751 752
}

S
Sujith 已提交
753
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
S
Sujith 已提交
754
					     struct ath_txq *txq,
S
Sujith 已提交
755
					     struct ath_atx_tid *tid,
756 757
					     struct list_head *bf_q,
					     int *aggr_len)
758
{
S
Sujith 已提交
759
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
760
	struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
S
Sujith 已提交
761
	int rl = 0, nframes = 0, ndelim, prev_al = 0;
S
Sujith 已提交
762 763 764
	u16 aggr_limit = 0, al = 0, bpad = 0,
		al_delta, h_baw = tid->baw_size / 2;
	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
765
	struct ieee80211_tx_info *tx_info;
766
	struct ath_frame_info *fi;
767
	struct sk_buff *skb;
768
	u16 seqno;
769

S
Sujith 已提交
770
	do {
771 772 773
		skb = skb_peek(&tid->buf_q);
		fi = get_frame_info(skb);
		bf = fi->bf;
774 775
		if (!fi->bf)
			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
776

777 778 779
		if (!bf)
			continue;

780
		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
781
		seqno = bf->bf_state.seqno;
782 783
		if (!bf_first)
			bf_first = bf;
784

S
Sujith 已提交
785
		/* do not step over block-ack window */
786
		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
S
Sujith 已提交
787 788 789
			status = ATH_AGGR_BAW_CLOSED;
			break;
		}
790

S
Sujith 已提交
791 792 793 794
		if (!rl) {
			aggr_limit = ath_lookup_rate(sc, bf, tid);
			rl = 1;
		}
795

S
Sujith 已提交
796
		/* do not exceed aggregation limit */
797
		al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
798

S
Sujith 已提交
799
		if (nframes &&
800 801
		    ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
		     ath_lookup_legacy(bf))) {
S
Sujith 已提交
802 803 804
			status = ATH_AGGR_LIMITED;
			break;
		}
805

806
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
807
		if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
808 809
			break;

S
Sujith 已提交
810 811
		/* do not exceed subframe limit */
		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
S
Sujith 已提交
812 813 814
			status = ATH_AGGR_LIMITED;
			break;
		}
815

S
Sujith 已提交
816
		/* add padding for previous frame to aggregation length */
S
Sujith 已提交
817
		al += bpad + al_delta;
818

S
Sujith 已提交
819 820 821 822
		/*
		 * Get the delimiters needed to meet the MPDU
		 * density for this node.
		 */
823 824
		ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
						!nframes);
S
Sujith 已提交
825
		bpad = PADBYTES(al_delta) + (ndelim << 2);
826

827
		nframes++;
S
Sujith 已提交
828
		bf->bf_next = NULL;
829

S
Sujith 已提交
830
		/* link buffers of this frame to the aggregate */
831
		if (!fi->retries)
832
			ath_tx_addto_baw(sc, tid, seqno);
833
		bf->bf_state.ndelim = ndelim;
834 835 836

		__skb_unlink(skb, &tid->buf_q);
		list_add_tail(&bf->list, bf_q);
837
		if (bf_prev)
S
Sujith 已提交
838
			bf_prev->bf_next = bf;
839

S
Sujith 已提交
840
		bf_prev = bf;
S
Sujith 已提交
841

842
	} while (!skb_queue_empty(&tid->buf_q));
843

844
	*aggr_len = al;
S
Sujith 已提交
845

S
Sujith 已提交
846 847 848
	return status;
#undef PADBYTES
}
849

850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
/*
 * rix - rate index
 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
 * width  - 0 for 20 MHz, 1 for 40 MHz
 * half_gi - to use 4us v/s 3.6 us for symbol time
 */
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
			    int width, int half_gi, bool shortPreamble)
{
	u32 nbits, nsymbits, duration, nsymbols;
	int streams;

	/* find number of symbols: PLCP + data */
	streams = HT_RC_2_STREAMS(rix);
	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
	nsymbits = bits_per_symbol[rix % 8][width] * streams;
	nsymbols = (nbits + nsymbits - 1) / nsymbits;

	if (!half_gi)
		duration = SYMBOL_TIME(nsymbols);
	else
		duration = SYMBOL_TIME_HALFGI(nsymbols);

	/* addup duration for legacy/ht training and signal fields */
	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);

	return duration;
}

879 880
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_info *info, int len)
881 882 883 884 885 886 887
{
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_tx_rate *rates;
	const struct ieee80211_rate *rate;
	struct ieee80211_hdr *hdr;
888 889
	int i;
	u8 rix = 0;
890 891 892 893 894

	skb = bf->bf_mpdu;
	tx_info = IEEE80211_SKB_CB(skb);
	rates = tx_info->control.rates;
	hdr = (struct ieee80211_hdr *)skb->data;
895 896 897

	/* set dur_update_en for l-sig computation except for PS-Poll frames */
	info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
898 899 900 901 902 903 904

	/*
	 * We check if Short Preamble is needed for the CTS rate by
	 * checking the BSS's global flag.
	 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
	 */
	rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
905
	info->rtscts_rate = rate->hw_value;
906
	if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
907
		info->rtscts_rate |= rate->hw_value_short;
908 909 910 911 912 913 914 915 916

	for (i = 0; i < 4; i++) {
		bool is_40, is_sgi, is_sp;
		int phy;

		if (!rates[i].count || (rates[i].idx < 0))
			continue;

		rix = rates[i].idx;
917
		info->rates[i].Tries = rates[i].count;
918 919

		    if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
920 921
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_RTSENA;
922
		} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
923 924
			info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
			info->flags |= ATH9K_TXDESC_CTSENA;
925 926 927
		}

		if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
928
			info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
929
		if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
930
			info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
931 932 933 934 935 936 937

		is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
		is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
		is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);

		if (rates[i].flags & IEEE80211_TX_RC_MCS) {
			/* MCS rates */
938 939 940 941
			info->rates[i].Rate = rix | 0x80;
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
942 943
				 is_40, is_sgi, is_sp);
			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
944
				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
945 946 947 948 949 950 951 952 953 954 955
			continue;
		}

		/* legacy rates */
		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
		    !(rate->flags & IEEE80211_RATE_ERP_G))
			phy = WLAN_RC_PHY_CCK;
		else
			phy = WLAN_RC_PHY_OFDM;

		rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
956
		info->rates[i].Rate = rate->hw_value;
957 958
		if (rate->hw_value_short) {
			if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
959
				info->rates[i].Rate |= rate->hw_value_short;
960 961 962 963 964
		} else {
			is_sp = false;
		}

		if (bf->bf_state.bfs_paprd)
965
			info->rates[i].ChSel = ah->txchainmask;
966
		else
967 968
			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
					ah->txchainmask, info->rates[i].Rate);
969

970
		info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
971 972 973 974 975
			phy, rate->bitrate * 100, len, rix, is_sp);
	}

	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
	if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
976
		info->flags &= ~ATH9K_TXDESC_RTSENA;
977 978

	/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
979 980 981
	if (info->flags & ATH9K_TXDESC_RTSENA)
		info->flags &= ~ATH9K_TXDESC_CTSENA;
}
982

983 984 985 986 987 988 989 990
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	enum ath9k_pkt_type htype;
	__le16 fc;

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
991

992 993 994 995 996 997 998 999 1000 1001 1002 1003
	if (ieee80211_is_beacon(fc))
		htype = ATH9K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = ATH9K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = ATH9K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = ATH9K_PKT_TYPE_PSPOLL;
	else
		htype = ATH9K_PKT_TYPE_NORMAL;

	return htype;
1004 1005
}

1006 1007
static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_txq *txq, int len)
1008 1009 1010 1011
{
	struct ath_hw *ah = sc->sc_ah;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
	struct ath_buf *bf_first = bf;
1012
	struct ath_tx_info info;
1013 1014
	bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
	memset(&info, 0, sizeof(info));
	info.is_first = true;
	info.is_last = true;
	info.txpower = MAX_RATE_POWER;
	info.qcu = txq->axq_qnum;

	info.flags = ATH9K_TXDESC_INTREQ;
	if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
		info.flags |= ATH9K_TXDESC_NOACK;
	if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
		info.flags |= ATH9K_TXDESC_LDPC;

	ath_buf_set_rate(sc, bf, &info, len);

	if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
		info.flags |= ATH9K_TXDESC_CLRDMASK;

	if (bf->bf_state.bfs_paprd)
		info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1034 1035 1036


	while (bf) {
1037 1038 1039 1040
		struct sk_buff *skb = bf->bf_mpdu;
		struct ath_frame_info *fi = get_frame_info(skb);

		info.type = get_hw_packet_type(skb);
1041
		if (bf->bf_next)
1042
			info.link = bf->bf_next->bf_daddr;
1043
		else
1044 1045
			info.link = 0;

1046 1047
		info.buf_addr[0] = bf->bf_buf_addr;
		info.buf_len[0] = skb->len;
1048 1049 1050 1051 1052
		info.pkt_len = fi->framelen;
		info.keyix = fi->keyix;
		info.keytype = fi->keytype;

		if (aggr) {
1053
			if (bf == bf_first)
1054 1055 1056 1057 1058
				info.aggr = AGGR_BUF_FIRST;
			else if (!bf->bf_next)
				info.aggr = AGGR_BUF_LAST;
			else
				info.aggr = AGGR_BUF_MIDDLE;
1059

1060 1061
			info.ndelim = bf->bf_state.ndelim;
			info.aggr_len = len;
1062 1063
		}

1064
		ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1065 1066 1067 1068
		bf = bf->bf_next;
	}
}

S
Sujith 已提交
1069 1070 1071
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
			      struct ath_atx_tid *tid)
{
S
Sujith 已提交
1072
	struct ath_buf *bf;
S
Sujith 已提交
1073
	enum ATH_AGGR_STATUS status;
1074
	struct ieee80211_tx_info *tx_info;
S
Sujith 已提交
1075
	struct list_head bf_q;
1076
	int aggr_len;
1077

S
Sujith 已提交
1078
	do {
1079
		if (skb_queue_empty(&tid->buf_q))
S
Sujith 已提交
1080
			return;
1081

S
Sujith 已提交
1082 1083
		INIT_LIST_HEAD(&bf_q);

1084
		status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
1085 1086

		/*
S
Sujith 已提交
1087 1088
		 * no frames picked up to be aggregated;
		 * block-ack window is not open.
1089
		 */
S
Sujith 已提交
1090 1091
		if (list_empty(&bf_q))
			break;
1092

S
Sujith 已提交
1093
		bf = list_first_entry(&bf_q, struct ath_buf, list);
S
Sujith 已提交
1094
		bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1095
		tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1096

1097 1098
		if (tid->ac->clear_ps_filter) {
			tid->ac->clear_ps_filter = false;
1099 1100 1101
			tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
		} else {
			tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1102 1103
		}

S
Sujith 已提交
1104
		/* if only one frame, send as non-aggregate */
1105
		if (bf == bf->bf_lastbf) {
1106 1107 1108 1109
			aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
			bf->bf_state.bf_type = BUF_AMPDU;
		} else {
			TX_STAT_INC(txq->axq_qnum, a_aggr);
S
Sujith 已提交
1110
		}
1111

1112
		ath_tx_fill_desc(sc, bf, txq, aggr_len);
1113
		ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1114
	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
S
Sujith 已提交
1115 1116 1117
		 status != ATH_AGGR_BAW_CLOSED);
}

1118 1119
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
		      u16 tid, u16 *ssn)
S
Sujith 已提交
1120 1121 1122 1123 1124
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;
S
Sujith 已提交
1125
	txtid = ATH_AN_2_TID(an, tid);
1126 1127 1128 1129

	if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
		return -EAGAIN;

S
Sujith 已提交
1130
	txtid->state |= AGGR_ADDBA_PROGRESS;
1131
	txtid->paused = true;
1132
	*ssn = txtid->seq_start = txtid->seq_next;
1133

1134 1135 1136
	memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
	txtid->baw_head = txtid->baw_tail = 0;

1137
	return 0;
S
Sujith 已提交
1138
}
1139

S
Sujith 已提交
1140
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
S
Sujith 已提交
1141 1142 1143
{
	struct ath_node *an = (struct ath_node *)sta->drv_priv;
	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1144
	struct ath_txq *txq = txtid->ac->txq;
1145

S
Sujith 已提交
1146
	if (txtid->state & AGGR_CLEANUP)
S
Sujith 已提交
1147
		return;
1148

S
Sujith 已提交
1149
	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
1150
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
S
Sujith 已提交
1151
		return;
S
Sujith 已提交
1152
	}
1153

S
Sujith 已提交
1154
	spin_lock_bh(&txq->axq_lock);
1155
	txtid->paused = true;
1156

1157 1158 1159 1160 1161 1162 1163
	/*
	 * If frames are still being transmitted for this TID, they will be
	 * cleaned up during tx completion. To prevent race conditions, this
	 * TID can only be reused after all in-progress subframes have been
	 * completed.
	 */
	if (txtid->baw_head != txtid->baw_tail)
S
Sujith 已提交
1164
		txtid->state |= AGGR_CLEANUP;
1165
	else
S
Sujith 已提交
1166
		txtid->state &= ~AGGR_ADDBA_COMPLETE;
1167 1168 1169
	spin_unlock_bh(&txq->axq_lock);

	ath_tx_flush_tid(sc, txtid);
S
Sujith 已提交
1170
}
1171

1172 1173
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
		       struct ath_node *an)
1174 1175 1176 1177
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
1178
	bool buffered;
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {

		if (!tid->sched)
			continue;

		ac = tid->ac;
		txq = ac->txq;

		spin_lock_bh(&txq->axq_lock);

1192
		buffered = !skb_queue_empty(&tid->buf_q);
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

		tid->sched = false;
		list_del(&tid->list);

		if (ac->sched) {
			ac->sched = false;
			list_del(&ac->list);
		}

		spin_unlock_bh(&txq->axq_lock);

1204 1205
		ieee80211_sta_set_buffered(sta, tidno, buffered);
	}
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
}

void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	struct ath_txq *txq;
	int tidno;

	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {

		ac = tid->ac;
		txq = ac->txq;

		spin_lock_bh(&txq->axq_lock);
		ac->clear_ps_filter = true;

1224
		if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
1225 1226 1227 1228 1229 1230 1231 1232
			ath_tx_queue_tid(txq, tid);
			ath_txq_schedule(sc, txq);
		}

		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
	struct ath_atx_tid *txtid;
	struct ath_node *an;

	an = (struct ath_node *)sta->drv_priv;

	if (sc->sc_flags & SC_OP_TXAGGR) {
		txtid = ATH_AN_2_TID(an, tid);
		txtid->baw_size =
			IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
		txtid->state |= AGGR_ADDBA_COMPLETE;
		txtid->state &= ~AGGR_ADDBA_PROGRESS;
		ath_tx_resume_tid(sc, txtid);
	}
1248 1249
}

S
Sujith 已提交
1250 1251 1252
/********************/
/* Queue Management */
/********************/
1253

S
Sujith 已提交
1254 1255
static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
					  struct ath_txq *txq)
1256
{
S
Sujith 已提交
1257 1258
	struct ath_atx_ac *ac, *ac_tmp;
	struct ath_atx_tid *tid, *tid_tmp;
1259

S
Sujith 已提交
1260 1261 1262 1263 1264 1265 1266 1267
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		list_del(&ac->list);
		ac->sched = false;
		list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
			list_del(&tid->list);
			tid->sched = false;
			ath_tid_drain(sc, txq, tid);
		}
1268 1269 1270
	}
}

S
Sujith 已提交
1271
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1272
{
1273
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1274
	struct ath9k_tx_queue_info qi;
1275 1276 1277 1278 1279 1280
	static const int subtype_txq_to_hwq[] = {
		[WME_AC_BE] = ATH_TXQ_AC_BE,
		[WME_AC_BK] = ATH_TXQ_AC_BK,
		[WME_AC_VI] = ATH_TXQ_AC_VI,
		[WME_AC_VO] = ATH_TXQ_AC_VO,
	};
1281
	int axq_qnum, i;
1282

S
Sujith 已提交
1283
	memset(&qi, 0, sizeof(qi));
1284
	qi.tqi_subtype = subtype_txq_to_hwq[subtype];
S
Sujith 已提交
1285 1286 1287 1288
	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
	qi.tqi_physCompBuf = 0;
1289 1290

	/*
S
Sujith 已提交
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise waiting for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
	 *
	 * The UAPSD queue is an exception, since we take a desc-
	 * based intr on the EOSP frames.
1304
	 */
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
				TXQ_FLAG_TXERRINT_ENABLE;
	} else {
		if (qtype == ATH9K_TX_QUEUE_UAPSD)
			qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
		else
			qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
					TXQ_FLAG_TXDESCINT_ENABLE;
	}
1315 1316
	axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
	if (axq_qnum == -1) {
1317
		/*
S
Sujith 已提交
1318 1319
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
1320
		 */
S
Sujith 已提交
1321
		return NULL;
1322
	}
1323 1324
	if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
		struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1325

1326 1327
		txq->axq_qnum = axq_qnum;
		txq->mac80211_qnum = -1;
S
Sujith 已提交
1328 1329 1330 1331 1332
		txq->axq_link = NULL;
		INIT_LIST_HEAD(&txq->axq_q);
		INIT_LIST_HEAD(&txq->axq_acq);
		spin_lock_init(&txq->axq_lock);
		txq->axq_depth = 0;
1333
		txq->axq_ampdu_depth = 0;
1334
		txq->axq_tx_inprogress = false;
1335
		sc->tx.txqsetup |= 1<<axq_qnum;
1336 1337 1338 1339

		txq->txq_headidx = txq->txq_tailidx = 0;
		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
			INIT_LIST_HEAD(&txq->txq_fifo[i]);
S
Sujith 已提交
1340
	}
1341
	return &sc->tx.txq[axq_qnum];
1342 1343
}

S
Sujith 已提交
1344 1345 1346
int ath_txq_update(struct ath_softc *sc, int qnum,
		   struct ath9k_tx_queue_info *qinfo)
{
1347
	struct ath_hw *ah = sc->sc_ah;
S
Sujith 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
	int error = 0;
	struct ath9k_tx_queue_info qi;

	if (qnum == sc->beacon.beaconq) {
		/*
		 * XXX: for beacon queue, we just save the parameter.
		 * It will be picked up by ath_beaconq_config when
		 * it's necessary.
		 */
		sc->beacon.beacon_qi = *qinfo;
1358
		return 0;
S
Sujith 已提交
1359
	}
1360

1361
	BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
S
Sujith 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370

	ath9k_hw_get_txq_props(ah, qnum, &qi);
	qi.tqi_aifs = qinfo->tqi_aifs;
	qi.tqi_cwmin = qinfo->tqi_cwmin;
	qi.tqi_cwmax = qinfo->tqi_cwmax;
	qi.tqi_burstTime = qinfo->tqi_burstTime;
	qi.tqi_readyTime = qinfo->tqi_readyTime;

	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1371 1372
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Unable to update hardware queue %u!\n", qnum);
S
Sujith 已提交
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
		error = -EIO;
	} else {
		ath9k_hw_resettxqueue(ah, qnum);
	}

	return error;
}

int ath_cabq_update(struct ath_softc *sc)
{
	struct ath9k_tx_queue_info qi;
1384
	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
S
Sujith 已提交
1385
	int qnum = sc->beacon.cabq->axq_qnum;
1386

S
Sujith 已提交
1387
	ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1388
	/*
S
Sujith 已提交
1389
	 * Ensure the readytime % is within the bounds.
1390
	 */
S
Sujith 已提交
1391 1392 1393 1394
	if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
	else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
		sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1395

1396
	qi.tqi_readyTime = (cur_conf->beacon_interval *
S
Sujith 已提交
1397
			    sc->config.cabqReadytime) / 100;
S
Sujith 已提交
1398 1399 1400
	ath_txq_update(sc, qnum, &qi);

	return 0;
1401 1402
}

1403 1404 1405 1406 1407 1408
static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
{
    struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
    return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}

1409 1410
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
			       struct list_head *list, bool retry_tx)
R
Rajkumar Manoharan 已提交
1411 1412
	__releases(txq->axq_lock)
	__acquires(txq->axq_lock)
1413
{
S
Sujith 已提交
1414 1415
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
1416 1417 1418
	struct ath_tx_status ts;

	memset(&ts, 0, sizeof(ts));
1419
	ts.ts_status = ATH9K_TX_FLUSH;
S
Sujith 已提交
1420
	INIT_LIST_HEAD(&bf_head);
1421

1422 1423
	while (!list_empty(list)) {
		bf = list_first_entry(list, struct ath_buf, list);
1424

1425 1426
		if (bf->bf_stale) {
			list_del(&bf->list);
1427

1428 1429
			ath_tx_return_buffer(sc, bf);
			continue;
S
Sujith 已提交
1430
		}
1431

S
Sujith 已提交
1432
		lastbf = bf->bf_lastbf;
1433
		list_cut_position(&bf_head, list, &lastbf->list);
1434

S
Sujith 已提交
1435
		txq->axq_depth--;
1436 1437
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth--;
S
Sujith 已提交
1438

1439
		spin_unlock_bh(&txq->axq_lock);
S
Sujith 已提交
1440
		if (bf_isampdu(bf))
1441 1442
			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
					     retry_tx);
S
Sujith 已提交
1443
		else
1444
			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1445
		spin_lock_bh(&txq->axq_lock);
1446
	}
1447
}
1448

1449 1450 1451 1452 1453 1454 1455 1456
/*
 * Drain a given TX queue (could be Beacon or Data)
 *
 * This assumes output has been stopped and
 * we do not need to block ath_tx_tasklet.
 */
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
1457
	spin_lock_bh(&txq->axq_lock);
1458
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1459
		int idx = txq->txq_tailidx;
1460

1461 1462 1463 1464 1465
		while (!list_empty(&txq->txq_fifo[idx])) {
			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
					   retry_tx);

			INCR(idx, ATH_TXFIFO_DEPTH);
1466
		}
1467
		txq->txq_tailidx = idx;
1468
	}
1469

1470 1471 1472 1473
	txq->axq_link = NULL;
	txq->axq_tx_inprogress = false;
	ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);

1474
	/* flush any pending frames if aggregation is enabled */
1475 1476 1477 1478
	if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
		ath_txq_drain_pending_buffers(sc, txq);

	spin_unlock_bh(&txq->axq_lock);
1479 1480
}

1481
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1482
{
1483
	struct ath_hw *ah = sc->sc_ah;
1484
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
1485
	struct ath_txq *txq;
1486 1487
	int i;
	u32 npend = 0;
S
Sujith 已提交
1488 1489

	if (sc->sc_flags & SC_OP_INVALID)
1490
		return true;
S
Sujith 已提交
1491

1492
	ath9k_hw_abort_tx_dma(ah);
S
Sujith 已提交
1493

1494
	/* Check if any queue remains active */
S
Sujith 已提交
1495
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1496 1497 1498
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

1499 1500
		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
			npend |= BIT(i);
S
Sujith 已提交
1501 1502
	}

1503
	if (npend)
1504
		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
S
Sujith 已提交
1505 1506

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
		if (!ATH_TXQ_SETUP(sc, i))
			continue;

		/*
		 * The caller will resume queues with ieee80211_wake_queues.
		 * Mark the queue as not stopped to prevent ath_tx_complete
		 * from waking the queue too early.
		 */
		txq = &sc->tx.txq[i];
		txq->stopped = false;
		ath_draintxq(sc, txq, retry_tx);
S
Sujith 已提交
1518
	}
1519 1520

	return !npend;
S
Sujith 已提交
1521
}
1522

S
Sujith 已提交
1523
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
S
Sujith 已提交
1524
{
S
Sujith 已提交
1525 1526
	ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
	sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
S
Sujith 已提交
1527
}
1528

1529 1530 1531
/* For each axq_acq entry, for each tid, try to schedule packets
 * for transmit until ampdu_depth has reached min Q depth.
 */
S
Sujith 已提交
1532 1533
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
1534 1535
	struct ath_atx_ac *ac, *ac_tmp, *last_ac;
	struct ath_atx_tid *tid, *last_tid;
1536

1537
	if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
1538
	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
S
Sujith 已提交
1539
		return;
1540

S
Sujith 已提交
1541
	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1542
	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1543

1544 1545 1546 1547
	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
		list_del(&ac->list);
		ac->sched = false;
1548

1549 1550 1551 1552 1553
		while (!list_empty(&ac->tid_q)) {
			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
					       list);
			list_del(&tid->list);
			tid->sched = false;
1554

1555 1556
			if (tid->paused)
				continue;
1557

1558
			ath_tx_sched_aggr(sc, txq, tid);
1559

1560 1561 1562 1563
			/*
			 * add tid to round-robin queue if more frames
			 * are pending for the tid
			 */
1564
			if (!skb_queue_empty(&tid->buf_q))
1565
				ath_tx_queue_tid(txq, tid);
1566

1567 1568 1569 1570
			if (tid == last_tid ||
			    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
				break;
		}
1571

1572 1573 1574 1575 1576
		if (!list_empty(&ac->tid_q)) {
			if (!ac->sched) {
				ac->sched = true;
				list_add_tail(&ac->list, &txq->axq_acq);
			}
1577
		}
1578 1579 1580 1581

		if (ac == last_ac ||
		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
			return;
S
Sujith 已提交
1582 1583
	}
}
1584

S
Sujith 已提交
1585 1586 1587 1588
/***********/
/* TX, DMA */
/***********/

1589
/*
S
Sujith 已提交
1590 1591
 * Insert a chain of ath_buf (descriptors) on a txq and
 * assume the descriptors are already chained together by caller.
1592
 */
S
Sujith 已提交
1593
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1594
			     struct list_head *head, bool internal)
1595
{
1596
	struct ath_hw *ah = sc->sc_ah;
1597
	struct ath_common *common = ath9k_hw_common(ah);
1598 1599 1600
	struct ath_buf *bf, *bf_last;
	bool puttxbuf = false;
	bool edma;
1601

S
Sujith 已提交
1602 1603 1604 1605
	/*
	 * Insert the frame on the outbound list and
	 * pass it on to the hardware.
	 */
1606

S
Sujith 已提交
1607 1608
	if (list_empty(head))
		return;
1609

1610
	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
S
Sujith 已提交
1611
	bf = list_first_entry(head, struct ath_buf, list);
1612
	bf_last = list_entry(head->prev, struct ath_buf, list);
1613

J
Joe Perches 已提交
1614 1615
	ath_dbg(common, ATH_DBG_QUEUE,
		"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1616

1617 1618
	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1619
		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1620
		puttxbuf = true;
S
Sujith 已提交
1621
	} else {
1622 1623
		list_splice_tail_init(head, &txq->axq_q);

1624 1625
		if (txq->axq_link) {
			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
J
Joe Perches 已提交
1626 1627 1628 1629
			ath_dbg(common, ATH_DBG_XMIT,
				"link[%u] (%p)=%llx (%p)\n",
				txq->axq_qnum, txq->axq_link,
				ito64(bf->bf_daddr), bf->bf_desc);
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
		} else if (!edma)
			puttxbuf = true;

		txq->axq_link = bf_last->bf_desc;
	}

	if (puttxbuf) {
		TX_STAT_INC(txq->axq_qnum, puttxbuf);
		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
		ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
	}

	if (!edma) {
F
Felix Fietkau 已提交
1644
		TX_STAT_INC(txq->axq_qnum, txstart);
1645
		ath9k_hw_txstart(ah, txq->axq_qnum);
S
Sujith 已提交
1646
	}
1647 1648 1649 1650 1651 1652

	if (!internal) {
		txq->axq_depth++;
		if (bf_is_ampdu_not_probing(bf))
			txq->axq_ampdu_depth++;
	}
S
Sujith 已提交
1653
}
1654

S
Sujith 已提交
1655
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1656
			      struct sk_buff *skb, struct ath_tx_control *txctl)
1657
{
1658
	struct ath_frame_info *fi = get_frame_info(skb);
F
Felix Fietkau 已提交
1659
	struct list_head bf_head;
1660
	struct ath_buf *bf;
1661

S
Sujith 已提交
1662 1663 1664 1665 1666 1667 1668
	/*
	 * Do not queue to h/w when any of the following conditions is true:
	 * - there are pending frames in software queue
	 * - the TID is currently paused for ADDBA/BAR request
	 * - seqno is not within block-ack window
	 * - h/w queue depth exceeds low water mark
	 */
1669
	if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
1670
	    !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1671
	    txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1672
		/*
S
Sujith 已提交
1673 1674
		 * Add this frame to software queue for scheduling later
		 * for aggregation.
1675
		 */
1676
		TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1677
		__skb_queue_tail(&tid->buf_q, skb);
1678 1679
		if (!txctl->an || !txctl->an->sleeping)
			ath_tx_queue_tid(txctl->txq, tid);
S
Sujith 已提交
1680 1681 1682
		return;
	}

1683 1684 1685 1686
	bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
	if (!bf)
		return;

1687
	bf->bf_state.bf_type = BUF_AMPDU;
F
Felix Fietkau 已提交
1688 1689 1690
	INIT_LIST_HEAD(&bf_head);
	list_add(&bf->list, &bf_head);

S
Sujith 已提交
1691
	/* Add sub-frame to BAW */
1692
	ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
S
Sujith 已提交
1693 1694

	/* Queue to h/w without aggregation */
1695
	TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
S
Sujith 已提交
1696
	bf->bf_lastbf = bf;
1697
	ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
1698
	ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
S
Sujith 已提交
1699 1700
}

F
Felix Fietkau 已提交
1701
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1702
			       struct ath_atx_tid *tid, struct sk_buff *skb)
S
Sujith 已提交
1703
{
1704 1705
	struct ath_frame_info *fi = get_frame_info(skb);
	struct list_head bf_head;
S
Sujith 已提交
1706 1707
	struct ath_buf *bf;

1708 1709 1710 1711 1712 1713 1714 1715 1716
	bf = fi->bf;
	if (!bf)
		bf = ath_tx_setup_buffer(sc, txq, tid, skb);

	if (!bf)
		return;

	INIT_LIST_HEAD(&bf_head);
	list_add_tail(&bf->list, &bf_head);
1717
	bf->bf_state.bf_type = 0;
S
Sujith 已提交
1718 1719

	/* update starting sequence number for subsequent ADDBA request */
F
Felix Fietkau 已提交
1720 1721
	if (tid)
		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
S
Sujith 已提交
1722

S
Sujith 已提交
1723
	bf->bf_lastbf = bf;
1724
	ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1725
	ath_tx_txqaddbuf(sc, txq, &bf_head, false);
S
Sujith 已提交
1726
	TX_STAT_INC(txq->axq_qnum, queued);
S
Sujith 已提交
1727 1728
}

1729 1730
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
			     int framelen)
S
Sujith 已提交
1731 1732
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1733 1734
	struct ieee80211_sta *sta = tx_info->control.sta;
	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1735
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1736
	struct ath_frame_info *fi = get_frame_info(skb);
1737
	struct ath_node *an = NULL;
1738
	enum ath9k_key_type keytype;
S
Sujith 已提交
1739

1740
	keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
S
Sujith 已提交
1741

1742 1743 1744
	if (sta)
		an = (struct ath_node *) sta->drv_priv;

1745 1746 1747
	memset(fi, 0, sizeof(*fi));
	if (hw_key)
		fi->keyix = hw_key->hw_key_idx;
1748 1749
	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
		fi->keyix = an->ps_key;
1750 1751 1752 1753
	else
		fi->keyix = ATH9K_TXKEYIX_INVALID;
	fi->keytype = keytype;
	fi->framelen = framelen;
S
Sujith 已提交
1754 1755
}

1756 1757 1758 1759
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_channel *curchan = ah->curchan;
1760 1761 1762
	if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
	    (curchan->channelFlags & CHANNEL_5GHZ) &&
	    (chainmask == 0x7) && (rate < 0x90))
1763 1764 1765 1766 1767
		return 0x3;
	else
		return chainmask;
}

1768 1769 1770 1771
/*
 * Assign a descriptor (and sequence number if necessary,
 * and map buffer for DMA. Frees skb on error
 */
1772
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
F
Felix Fietkau 已提交
1773
					   struct ath_txq *txq,
1774
					   struct ath_atx_tid *tid,
1775
					   struct sk_buff *skb)
1776
{
F
Felix Fietkau 已提交
1777
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1778
	struct ath_frame_info *fi = get_frame_info(skb);
1779
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
F
Felix Fietkau 已提交
1780
	struct ath_buf *bf;
1781
	u16 seqno;
F
Felix Fietkau 已提交
1782 1783 1784

	bf = ath_tx_get_buffer(sc);
	if (!bf) {
J
Joe Perches 已提交
1785
		ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
1786
		goto error;
F
Felix Fietkau 已提交
1787
	}
1788

S
Sujith 已提交
1789
	ATH_TXBUF_RESET(bf);
1790

1791 1792 1793 1794 1795 1796 1797
	if (tid) {
		seqno = tid->seq_next;
		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
		INCR(tid->seq_next, IEEE80211_SEQ_MAX);
		bf->bf_state.seqno = seqno;
	}

1798
	bf->bf_mpdu = skb;
1799

B
Ben Greear 已提交
1800 1801 1802
	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1803
		bf->bf_mpdu = NULL;
1804
		bf->bf_buf_addr = 0;
1805 1806
		ath_err(ath9k_hw_common(sc->sc_ah),
			"dma_mapping_error() on TX\n");
F
Felix Fietkau 已提交
1807
		ath_tx_return_buffer(sc, bf);
1808
		goto error;
1809 1810
	}

1811
	fi->bf = bf;
F
Felix Fietkau 已提交
1812 1813

	return bf;
1814 1815 1816 1817

error:
	dev_kfree_skb_any(skb);
	return NULL;
F
Felix Fietkau 已提交
1818 1819 1820
}

/* FIXME: tx power */
1821
static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
F
Felix Fietkau 已提交
1822 1823 1824 1825
			     struct ath_tx_control *txctl)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1826
	struct ath_atx_tid *tid = NULL;
1827
	struct ath_buf *bf;
F
Felix Fietkau 已提交
1828
	u8 tidno;
1829

S
Sujith 已提交
1830
	spin_lock_bh(&txctl->txq->axq_lock);
1831 1832
	if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
		ieee80211_is_data_qos(hdr->frame_control)) {
1833 1834
		tidno = ieee80211_get_qos_ctl(hdr)[0] &
			IEEE80211_QOS_CTL_TID_MASK;
1835
		tid = ATH_AN_2_TID(txctl->an, tidno);
1836

1837
		WARN_ON(tid->ac->txq != txctl->txq);
1838 1839 1840
	}

	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
F
Felix Fietkau 已提交
1841 1842 1843 1844
		/*
		 * Try aggregation if it's a unicast data frame
		 * and the destination is HT capable.
		 */
1845
		ath_tx_send_ampdu(sc, tid, skb, txctl);
1846
	} else {
1847 1848 1849
		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
		if (!bf)
			goto out;
F
Felix Fietkau 已提交
1850

F
Felix Fietkau 已提交
1851 1852
		bf->bf_state.bfs_paprd = txctl->paprd;

1853 1854 1855
		if (txctl->paprd)
			bf->bf_state.bfs_paprd_timestamp = jiffies;

1856
		ath_tx_send_normal(sc, txctl->txq, tid, skb);
1857
	}
S
Sujith 已提交
1858

1859
out:
S
Sujith 已提交
1860
	spin_unlock_bh(&txctl->txq->axq_lock);
1861 1862
}

1863
/* Upon failure caller should free skb */
1864
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
S
Sujith 已提交
1865
		 struct ath_tx_control *txctl)
1866
{
1867 1868
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1869
	struct ieee80211_sta *sta = info->control.sta;
1870
	struct ieee80211_vif *vif = info->control.vif;
1871
	struct ath_softc *sc = hw->priv;
1872
	struct ath_txq *txq = txctl->txq;
1873
	int padpos, padsize;
F
Felix Fietkau 已提交
1874
	int frmlen = skb->len + FCS_LEN;
1875
	int q;
1876

1877 1878 1879 1880
	/* NOTE:  sta can be NULL according to net/mac80211.h */
	if (sta)
		txctl->an = (struct ath_node *)sta->drv_priv;

F
Felix Fietkau 已提交
1881 1882 1883
	if (info->control.hw_key)
		frmlen += info->control.hw_key->icv_len;

1884
	/*
S
Sujith 已提交
1885 1886 1887
	 * As a temporary workaround, assign seq# here; this will likely need
	 * to be cleaned up to work better with Beacon transmission and virtual
	 * BSSes.
1888
	 */
S
Sujith 已提交
1889 1890 1891 1892 1893
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
			sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1894 1895
	}

1896 1897 1898 1899 1900 1901
	/* Add the padding after the header if this is not already done */
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
	if (padsize && skb->len > padpos) {
		if (skb_headroom(skb) < padsize)
			return -ENOMEM;
1902

1903 1904
		skb_push(skb, padsize);
		memmove(skb->data, skb->data + padsize, padpos);
1905
		hdr = (struct ieee80211_hdr *) skb->data;
1906 1907
	}

1908 1909 1910 1911 1912
	if ((vif && vif->type != NL80211_IFTYPE_AP &&
	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
	    !ieee80211_is_data(hdr->frame_control))
		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;

1913 1914 1915 1916 1917 1918 1919
	setup_frame_info(hw, skb, frmlen);

	/*
	 * At this point, the vif, hw_key and sta pointers in the tx control
	 * info are no longer valid (overwritten by the ath_frame_info data.
	 */

1920 1921 1922 1923
	q = skb_get_queue_mapping(skb);
	spin_lock_bh(&txq->axq_lock);
	if (txq == sc->tx.txq_map[q] &&
	    ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1924
		ieee80211_stop_queue(sc->hw, q);
1925
		txq->stopped = 1;
1926
	}
1927
	spin_unlock_bh(&txq->axq_lock);
1928

1929 1930
	ath_tx_start_dma(sc, skb, txctl);
	return 0;
1931 1932
}

S
Sujith 已提交
1933 1934 1935
/*****************/
/* TX Completion */
/*****************/
S
Sujith 已提交
1936

S
Sujith 已提交
1937
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1938
			    int tx_flags, struct ath_txq *txq)
S
Sujith 已提交
1939
{
S
Sujith 已提交
1940 1941
	struct ieee80211_hw *hw = sc->hw;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1942
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1943
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1944
	int q, padpos, padsize;
S
Sujith 已提交
1945

J
Joe Perches 已提交
1946
	ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
S
Sujith 已提交
1947

1948
	if (tx_flags & ATH_TX_BAR)
S
Sujith 已提交
1949 1950
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;

1951
	if (!(tx_flags & ATH_TX_ERROR))
S
Sujith 已提交
1952 1953
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;
S
Sujith 已提交
1954

1955 1956 1957 1958 1959 1960 1961 1962 1963
	padpos = ath9k_cmn_padpos(hdr->frame_control);
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
S
Sujith 已提交
1964
	}
S
Sujith 已提交
1965

1966
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
S
Sujith 已提交
1967
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
J
Joe Perches 已提交
1968 1969
		ath_dbg(common, ATH_DBG_PS,
			"Going back to sleep after having received TX status (0x%lx)\n",
S
Sujith 已提交
1970 1971 1972 1973
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
1974 1975
	}

1976 1977 1978 1979 1980
	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.txq_map[q]) {
		spin_lock_bh(&txq->axq_lock);
		if (WARN_ON(--txq->pending_frames < 0))
			txq->pending_frames = 0;
1981

1982 1983 1984
		if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
			ieee80211_wake_queue(sc->hw, q);
			txq->stopped = 0;
1985
		}
1986
		spin_unlock_bh(&txq->axq_lock);
1987
	}
1988 1989

	ieee80211_tx_status(hw, skb);
S
Sujith 已提交
1990
}
1991

S
Sujith 已提交
1992
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1993 1994
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok, int sendbar)
1995
{
S
Sujith 已提交
1996
	struct sk_buff *skb = bf->bf_mpdu;
1997
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
S
Sujith 已提交
1998
	unsigned long flags;
1999
	int tx_flags = 0;
2000

S
Sujith 已提交
2001
	if (sendbar)
2002
		tx_flags = ATH_TX_BAR;
2003

2004
	if (!txok)
2005
		tx_flags |= ATH_TX_ERROR;
2006

2007 2008 2009
	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

B
Ben Greear 已提交
2010
	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2011
	bf->bf_buf_addr = 0;
2012 2013

	if (bf->bf_state.bfs_paprd) {
2014 2015 2016
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2017
			dev_kfree_skb_any(skb);
2018
		else
2019
			complete(&sc->paprd_complete);
2020
	} else {
2021
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2022
		ath_tx_complete(sc, skb, tx_flags, txq);
2023
	}
2024 2025 2026 2027
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later.
	 */
	bf->bf_mpdu = NULL;
S
Sujith 已提交
2028 2029 2030 2031 2032 2033 2034

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2035 2036
}

F
Felix Fietkau 已提交
2037 2038
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
2039
			     int txok)
2040
{
S
Sujith 已提交
2041
	struct sk_buff *skb = bf->bf_mpdu;
2042
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
S
Sujith 已提交
2043
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
F
Felix Fietkau 已提交
2044
	struct ieee80211_hw *hw = sc->hw;
2045
	struct ath_hw *ah = sc->sc_ah;
2046
	u8 i, tx_rateindex;
2047

S
Sujith 已提交
2048
	if (txok)
2049
		tx_info->status.ack_signal = ts->ts_rssi;
S
Sujith 已提交
2050

2051
	tx_rateindex = ts->ts_rateindex;
2052 2053
	WARN_ON(tx_rateindex >= hw->max_rates);

2054
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2055
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2056

2057
		BUG_ON(nbad > nframes);
2058
	}
2059 2060
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;
2061

2062
	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2063
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
2076 2077 2078
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
2079
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2080 2081
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;
2082
	}
2083

2084
	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2085
		tx_info->status.rates[i].count = 0;
2086 2087
		tx_info->status.rates[i].idx = -1;
	}
2088

2089
	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2090 2091
}

2092 2093 2094
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
R
Rajkumar Manoharan 已提交
2095 2096
	__releases(txq->axq_lock)
	__acquires(txq->axq_lock)
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
{
	int txok;

	txq->axq_depth--;
	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	txq->axq_tx_inprogress = false;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	spin_unlock_bh(&txq->axq_lock);

	if (!bf_isampdu(bf)) {
2109
		ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);

	spin_lock_bh(&txq->axq_lock);

	if (sc->sc_flags & SC_OP_TXAGGR)
		ath_txq_schedule(sc, txq);
}

S
Sujith 已提交
2120
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2121
{
2122
	struct ath_hw *ah = sc->sc_ah;
2123
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
2124
	struct ath_buf *bf, *lastbf, *bf_held = NULL;
2125
	struct list_head bf_head;
S
Sujith 已提交
2126
	struct ath_desc *ds;
2127
	struct ath_tx_status ts;
S
Sujith 已提交
2128
	int status;
2129

J
Joe Perches 已提交
2130 2131 2132
	ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
		txq->axq_link);
2133

2134
	spin_lock_bh(&txq->axq_lock);
2135
	for (;;) {
2136 2137 2138
		if (work_pending(&sc->hw_reset_work))
			break;

2139 2140
		if (list_empty(&txq->axq_q)) {
			txq->axq_link = NULL;
2141
			if (sc->sc_flags & SC_OP_TXAGGR)
B
Ben Greear 已提交
2142
				ath_txq_schedule(sc, txq);
2143 2144 2145 2146
			break;
		}
		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);

S
Sujith 已提交
2147 2148 2149 2150 2151 2152 2153 2154 2155
		/*
		 * There is a race condition that a BH gets scheduled
		 * after sw writes TxE and before hw re-load the last
		 * descriptor to get the newly chained one.
		 * Software must keep the last DONE descriptor as a
		 * holding descriptor - software does so by marking
		 * it with the STALE flag.
		 */
		bf_held = NULL;
S
Sujith 已提交
2156
		if (bf->bf_stale) {
S
Sujith 已提交
2157
			bf_held = bf;
2158
			if (list_is_last(&bf_held->list, &txq->axq_q))
S
Sujith 已提交
2159
				break;
2160 2161 2162

			bf = list_entry(bf_held->list.next, struct ath_buf,
					list);
2163 2164 2165
		}

		lastbf = bf->bf_lastbf;
S
Sujith 已提交
2166
		ds = lastbf->bf_desc;
2167

2168 2169
		memset(&ts, 0, sizeof(ts));
		status = ath9k_hw_txprocdesc(ah, ds, &ts);
2170
		if (status == -EINPROGRESS)
S
Sujith 已提交
2171
			break;
2172

2173
		TX_STAT_INC(txq->axq_qnum, txprocdesc);
2174

S
Sujith 已提交
2175 2176 2177 2178 2179
		/*
		 * Remove ath_buf's of the same transmit unit from txq,
		 * however leave the last descriptor back as the holding
		 * descriptor for hw.
		 */
S
Sujith 已提交
2180
		lastbf->bf_stale = true;
S
Sujith 已提交
2181 2182 2183 2184
		INIT_LIST_HEAD(&bf_head);
		if (!list_is_singular(&lastbf->list))
			list_cut_position(&bf_head,
				&txq->axq_q, lastbf->list.prev);
2185

2186
		if (bf_held) {
2187 2188
			list_del(&bf_held->list);
			ath_tx_return_buffer(sc, bf_held);
S
Sujith 已提交
2189
		}
2190

2191
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2192
	}
2193
	spin_unlock_bh(&txq->axq_lock);
2194 2195
}

S
Sujith 已提交
2196
static void ath_tx_complete_poll_work(struct work_struct *work)
2197 2198 2199 2200 2201 2202
{
	struct ath_softc *sc = container_of(work, struct ath_softc,
			tx_complete_work.work);
	struct ath_txq *txq;
	int i;
	bool needreset = false;
2203 2204 2205
#ifdef CONFIG_ATH9K_DEBUGFS
	sc->tx_complete_poll_work_seen++;
#endif
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i)) {
			txq = &sc->tx.txq[i];
			spin_lock_bh(&txq->axq_lock);
			if (txq->axq_depth) {
				if (txq->axq_tx_inprogress) {
					needreset = true;
					spin_unlock_bh(&txq->axq_lock);
					break;
				} else {
					txq->axq_tx_inprogress = true;
				}
			}
			spin_unlock_bh(&txq->axq_lock);
		}

	if (needreset) {
J
Joe Perches 已提交
2224 2225
		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
			"tx hung, resetting the chip\n");
2226
		RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
2227
		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
2228 2229
	}

2230
	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2231 2232 2233 2234
			msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
}


2235

S
Sujith 已提交
2236
void ath_tx_tasklet(struct ath_softc *sc)
2237
{
S
Sujith 已提交
2238 2239
	int i;
	u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2240

S
Sujith 已提交
2241
	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2242

S
Sujith 已提交
2243 2244 2245
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
		if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
			ath_tx_processq(sc, &sc->tx.txq[i]);
2246 2247 2248
	}
}

2249 2250
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
2251
	struct ath_tx_status ts;
2252 2253 2254 2255 2256 2257 2258 2259
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
	int status;

	for (;;) {
2260 2261 2262
		if (work_pending(&sc->hw_reset_work))
			break;

2263
		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2264 2265 2266
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
J
Joe Perches 已提交
2267 2268
			ath_dbg(common, ATH_DBG_XMIT,
				"Error processing tx status\n");
2269 2270 2271 2272
			break;
		}

		/* Skip beacon completions */
2273
		if (ts.qid == sc->beacon.beaconq)
2274 2275
			continue;

2276
		txq = &sc->tx.txq[ts.qid];
2277 2278

		spin_lock_bh(&txq->axq_lock);
2279

2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
			spin_unlock_bh(&txq->axq_lock);
			return;
		}

		bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
				      struct ath_buf, list);
		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
		list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
				  &lastbf->list);

2293 2294
		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2295

2296 2297
			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;
2298

2299 2300 2301 2302 2303 2304
				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
			}
		}
2305

2306
		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2307 2308 2309 2310
		spin_unlock_bh(&txq->axq_lock);
	}
}

S
Sujith 已提交
2311 2312 2313
/*****************/
/* Init, Cleanup */
/*****************/
2314

2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
static int ath_txstatus_setup(struct ath_softc *sc, int size)
{
	struct ath_descdma *dd = &sc->txsdma;
	u8 txs_len = sc->sc_ah->caps.txs_len;

	dd->dd_desc_len = size * txs_len;
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (!dd->dd_desc)
		return -ENOMEM;

	return 0;
}

static int ath_tx_edma_init(struct ath_softc *sc)
{
	int err;

	err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
	if (!err)
		ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
					  sc->txsdma.dd_desc_paddr,
					  ATH_TXSTATUS_RING_SIZE);

	return err;
}

static void ath_tx_edma_cleanup(struct ath_softc *sc)
{
	struct ath_descdma *dd = &sc->txsdma;

	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
}

S
Sujith 已提交
2350
int ath_tx_init(struct ath_softc *sc, int nbufs)
2351
{
2352
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
S
Sujith 已提交
2353
	int error = 0;
2354

2355
	spin_lock_init(&sc->tx.txbuflock);
2356

2357
	error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2358
				  "tx", nbufs, 1, 1);
2359
	if (error != 0) {
2360 2361
		ath_err(common,
			"Failed to allocate tx descriptors: %d\n", error);
2362 2363
		goto err;
	}
2364

2365
	error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2366
				  "beacon", ATH_BCBUF, 1, 1);
2367
	if (error != 0) {
2368 2369
		ath_err(common,
			"Failed to allocate beacon descriptors: %d\n", error);
2370 2371
		goto err;
	}
2372

2373 2374
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);

2375 2376 2377 2378 2379 2380
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		error = ath_tx_edma_init(sc);
		if (error)
			goto err;
	}

2381
err:
S
Sujith 已提交
2382 2383
	if (error != 0)
		ath_tx_cleanup(sc);
2384

S
Sujith 已提交
2385
	return error;
2386 2387
}

2388
void ath_tx_cleanup(struct ath_softc *sc)
S
Sujith 已提交
2389 2390 2391 2392 2393 2394
{
	if (sc->beacon.bdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);

	if (sc->tx.txdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2395 2396 2397

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_tx_edma_cleanup(sc);
S
Sujith 已提交
2398
}
2399 2400 2401

void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
2402 2403 2404
	struct ath_atx_tid *tid;
	struct ath_atx_ac *ac;
	int tidno, acno;
2405

2406
	for (tidno = 0, tid = &an->tid[tidno];
2407 2408 2409 2410 2411 2412 2413 2414
	     tidno < WME_NUM_TID;
	     tidno++, tid++) {
		tid->an        = an;
		tid->tidno     = tidno;
		tid->seq_start = tid->seq_next = 0;
		tid->baw_size  = WME_MAX_BA;
		tid->baw_head  = tid->baw_tail = 0;
		tid->sched     = false;
S
Sujith 已提交
2415
		tid->paused    = false;
2416
		tid->state &= ~AGGR_CLEANUP;
2417
		__skb_queue_head_init(&tid->buf_q);
2418
		acno = TID_TO_WME_AC(tidno);
2419
		tid->ac = &an->ac[acno];
2420 2421
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_ADDBA_PROGRESS;
2422
	}
2423

2424
	for (acno = 0, ac = &an->ac[acno];
2425 2426
	     acno < WME_NUM_AC; acno++, ac++) {
		ac->sched    = false;
2427
		ac->txq = sc->tx.txq_map[acno];
2428
		INIT_LIST_HEAD(&ac->tid_q);
2429 2430 2431
	}
}

S
Sujith 已提交
2432
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2433
{
2434 2435
	struct ath_atx_ac *ac;
	struct ath_atx_tid *tid;
2436
	struct ath_txq *txq;
2437
	int tidno;
S
Sujith 已提交
2438

2439 2440
	for (tidno = 0, tid = &an->tid[tidno];
	     tidno < WME_NUM_TID; tidno++, tid++) {
2441

2442
		ac = tid->ac;
2443
		txq = ac->txq;
2444

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
		spin_lock_bh(&txq->axq_lock);

		if (tid->sched) {
			list_del(&tid->list);
			tid->sched = false;
		}

		if (ac->sched) {
			list_del(&ac->list);
			tid->ac->sched = false;
2455
		}
2456 2457 2458 2459 2460 2461

		ath_tid_drain(sc, txq, tid);
		tid->state &= ~AGGR_ADDBA_COMPLETE;
		tid->state &= ~AGGR_CLEANUP;

		spin_unlock_bh(&txq->axq_lock);
2462 2463
	}
}