iwl-trans-pcie-tx.c 30.4 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-agn-hw.h"
38
#include "iwl-trans-pcie-int.h"
39

40 41 42
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

43 44 45
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
46
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
47 48 49
					   struct iwl_tx_queue *txq,
					   u16 byte_cnt)
{
50 51 52
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
53 54 55 56 57 58
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
59 60
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
61

62 63
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

64 65
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

66 67
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

90 91 92
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
93
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
94 95 96 97 98
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
99
		return;
100

101
	if (hw_params(trans).shadow_reg_enable) {
W
Wey-Yi Guy 已提交
102
		/* shadow register enabled */
103
		iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
104 105 106
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		/* if we're trying to save power */
107
		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
W
Wey-Yi Guy 已提交
108 109 110
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
111
			reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
112

W
Wey-Yi Guy 已提交
113
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
114
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
115 116
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
117
				iwl_set_bit(bus(trans), CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
118 119 120
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
121

122
			iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
123 124
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
125 126 127 128 129 130
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
131
			iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
132 133
				    txq->q.write_ptr | (txq_id << 8));
	}
134 135 136
	txq->need_update = 0;
}

J
Johannes Berg 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

176
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
177
		     struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
178 179 180 181 182 183 184 185
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
186
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
187 188 189 190 191 192
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
193
		dma_unmap_single(bus(trans)->dev,
194 195
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
196
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
197 198 199

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
200
		dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
201
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
202 203 204 205
}

/**
 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
206
 * @trans - transport private data
207
 * @txq - tx queue
208
 * @index - the index of the TFD to be freed
209
 *@dma_dir - the direction of the DMA mapping
210 211 212 213
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
214
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
215
	int index, enum dma_data_direction dma_dir)
216 217 218
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

219
	iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
J
Johannes Berg 已提交
220 221

	/* free SKB */
222
	if (txq->skbs) {
J
Johannes Berg 已提交
223 224
		struct sk_buff *skb;

225
		skb = txq->skbs[index];
J
Johannes Berg 已提交
226

227 228 229 230
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
231
		if (skb) {
232
			iwl_free_skb(priv(trans), skb);
233
			txq->skbs[index] = NULL;
J
Johannes Berg 已提交
234 235 236 237
		}
	}
}

238
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
239 240
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
241
				 u8 reset)
J
Johannes Berg 已提交
242 243 244 245 246 247
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
248
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
249 250 251 252 253 254 255 256 257
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
258
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
J
Johannes Berg 已提交
259 260 261 262 263 264 265 266
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
267
		IWL_ERR(trans, "Unaligned address = %llx\n",
J
Johannes Berg 已提交
268 269 270 271 272 273 274
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

313 314 315
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
316
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
317 318 319 320 321 322 323
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
324 325
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
326 327 328

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
329 330
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
331 332 333 334 335 336 337 338 339 340 341 342 343 344

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

345
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
346 347
					  struct iwl_tx_queue *txq)
{
348 349
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
350
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
351 352 353 354
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
355 356
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
357 358 359

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

360
	if (txq_id != trans->shrd->cmd_queue)
361
		sta_id = tx_cmd->sta_id;
362 363 364 365 366 367 368 369 370

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

371
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
372 373 374 375 376 377
					u16 txq_id)
{
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

378 379 380
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

381 382
	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

383
	tbl_dw_addr = trans_pcie->scd_base_addr +
384 385
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

386
	tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
387 388 389 390 391 392

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

393
	iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
394 395 396 397

	return 0;
}

398
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
399 400 401
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
402
	iwl_write_prph(bus(trans),
403 404 405 406 407
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

408
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
409 410
				int txq_id, u32 index)
{
411
	iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
412
			(index & 0xff) | (txq_id << 8));
413
	iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
414 415
}

416
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
417 418 419
					struct iwl_tx_queue *txq,
					int tx_fifo_id, int scd_retry)
{
420
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
421
	int txq_id = txq->q.id;
422
	int active =
423
		test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
424

425
	iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
426 427 428 429 430 431 432
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

	txq->sched_retry = scd_retry;

433
	IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
434 435 436 437
		       active ? "Activate" : "Deactivate",
		       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}

438 439
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
				    u8 ctx, u16 tid)
440
{
441
	const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
442
	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
443
		return ac_to_fifo[tid_to_ac[tid]];
444 445 446 447 448

	/* no support for TIDs 8-15 yet */
	return -EINVAL;
}

449 450 451
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
				 enum iwl_rxon_context_id ctx, int sta_id,
				 int tid, int frame_limit)
452 453 454 455 456 457
{
	int tx_fifo, txq_id, ssn_idx;
	u16 ra_tid;
	unsigned long flags;
	struct iwl_tid_data *tid_data;

458 459 460
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

461 462
	if (WARN_ON(sta_id == IWL_INVALID_STATION))
		return;
463
	if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
464 465
		return;

466
	tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
467 468 469 470 471
	if (WARN_ON(tx_fifo < 0)) {
		IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
		return;
	}

472 473
	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
	tid_data = &trans->shrd->tid_data[sta_id][tid];
474 475
	ssn_idx = SEQ_TO_SN(tid_data->seq_number);
	txq_id = tid_data->agg.txq_id;
476
	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
477 478 479

	ra_tid = BUILD_RAxTID(sta_id, tid);

480
	spin_lock_irqsave(&trans->shrd->lock, flags);
481 482

	/* Stop this Tx queue before configuring it */
483
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
484 485

	/* Map receiver-address / traffic-ID to this queue */
486
	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
487 488

	/* Set this queue as a chain-building queue */
489
	iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
490 491

	/* enable aggregations for the queue */
492
	iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
493 494 495

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
496 497
	trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
498
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
499 500

	/* Set up Tx window size and frame limit for this queue */
501
	iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
502 503 504 505 506 507 508 509 510
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
			sizeof(u32),
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
			SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

511
	iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
512 513

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
514
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
515
					tx_fifo, 1);
516

517 518
	trans_pcie->txq[txq_id].sta_id = sta_id;
	trans_pcie->txq[txq_id].tid = tid;
519

520
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
521 522
}

523 524 525 526 527 528 529 530
/*
 * Find first available (lowest unused) Tx Queue, mark it "active".
 * Called only when finding queue for aggregation.
 * Should never return anything < 7, because they should already
 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
 */
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
{
531
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
532 533 534 535
	int txq_id;

	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		if (!test_and_set_bit(txq_id,
536
					&trans_pcie->txq_ctx_active_msk))
537 538 539 540 541 542 543 544
			return txq_id;
	return -1;
}

int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
				enum iwl_rxon_context_id ctx, int sta_id,
				int tid, u16 *ssn)
{
545
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
546 547
	struct iwl_tid_data *tid_data;
	unsigned long flags;
W
Wey-Yi Guy 已提交
548
	int txq_id;
549 550 551 552 553 554 555 556 557 558

	txq_id = iwlagn_txq_ctx_activate_free(trans);
	if (txq_id == -1) {
		IWL_ERR(trans, "No free aggregation queue available\n");
		return -ENXIO;
	}

	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
	tid_data = &trans->shrd->tid_data[sta_id][tid];
	tid_data->agg.txq_id = txq_id;
559 560 561
	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);

	*ssn = tid_data->agg.ssn;
562
	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
563

564 565 566
	if (*ssn == tid_data->next_reclaimed) {
		IWL_DEBUG_TX_QUEUES(trans, "Proceed: ssn = next_recl = %d",
				    tid_data->agg.ssn);
567 568 569
		tid_data->agg.state = IWL_AGG_ON;
		iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
	} else {
570 571 572 573
		IWL_DEBUG_TX_QUEUES(trans, "Can't proceed: ssn %d, "
				    "next_recl = %d",
				    tid_data->agg.ssn,
				    tid_data->next_reclaimed);
574 575
		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
	}
576
	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
577 578 579

	return 0;
}
580

581
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
582
{
583
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
584 585
	/* TODO: the transport layer shouldn't access the tid_data */
	int txq_id = trans->shrd->tid_data[sta_id][tid].agg.txq_id;
586

587 588
	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
	    (IWLAGN_FIRST_AMPDU_QUEUE +
589 590
		hw_params(trans).num_ampdu_queues <= txq_id)) {
		IWL_ERR(trans,
591 592 593
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
594
			hw_params(trans).num_ampdu_queues - 1);
595 596 597
		return -EINVAL;
	}

598
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
599

600
	iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
601

602 603 604 605
	trans_pcie->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
606

607 608 609
	iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
610 611 612
	return 0;
}

613 614 615 616 617 618 619 620 621 622 623
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
624
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
625
{
626 627
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
628
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
629 630
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
631 632
	dma_addr_t phys_addr;
	unsigned long flags;
T
Tomas Winkler 已提交
633
	u32 idx;
634
	u16 copy_size, cmd_size;
635
	bool is_ct_kill = false;
636 637 638 639 640 641 642 643
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif
644

645 646
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_WARN(trans, "fw recovery, no hcmd send\n");
647 648 649
		return -EIO;
	}

650
	if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
651
	    !(cmd->flags & CMD_ON_DEMAND)) {
652
		IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
653 654 655
		return -EIO;
	}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
675

676 677
	/*
	 * If any of the command structures end up being larger than
678 679 680
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
681
	 */
682
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
683
		return -EINVAL;
684

685 686 687
	if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
		IWL_WARN(trans, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
688 689
		return -EIO;
	}
690

691
	spin_lock_irqsave(&trans->hcmd_lock, flags);
692

J
Johannes Berg 已提交
693
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
694
		spin_unlock_irqrestore(&trans->hcmd_lock, flags);
695

696
		IWL_ERR(trans, "No space in command queue\n");
697
		is_ct_kill = iwl_check_for_ct_kill(priv(trans));
698
		if (!is_ct_kill) {
699
			IWL_ERR(trans, "Restarting adapter queue is full\n");
700
			iwlagn_fw_error(priv(trans), false);
701
		}
702 703 704
		return -ENOSPC;
	}

705
	idx = get_cmd_index(q, q->write_ptr);
706
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
707 708
	out_meta = &txq->meta[idx];

709
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
710 711
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
712

713
	/* set up the header */
714

715
	out_cmd->hdr.cmd = cmd->id;
716
	out_cmd->hdr.flags = 0;
717
	out_cmd->hdr.sequence =
718
		cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
719
					 INDEX_TO_SEQ(q->write_ptr));
720 721 722

	/* and copy the data that needs to be copied */

723
	cmd_dest = out_cmd->payload;
724 725 726 727 728 729 730
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
731
	}
732

733
	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
734 735 736 737
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
738
			q->write_ptr, idx, trans->shrd->cmd_queue);
739

740
	phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
741
				DMA_BIDIRECTIONAL);
742
	if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
J
Johannes Berg 已提交
743 744 745 746
		idx = -ENOMEM;
		goto out;
	}

747
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
748 749
	dma_unmap_len_set(out_meta, len, copy_size);

750 751
	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
752 753 754 755 756 757 758 759 760 761 762
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
763 764
		phys_addr = dma_map_single(bus(trans)->dev,
					   (void *)cmd->data[i],
765
					   cmd->len[i], DMA_BIDIRECTIONAL);
766 767
		if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
			iwlagn_unmap_tfd(trans, out_meta,
J
Johannes Berg 已提交
768
					 &txq->tfds[q->write_ptr],
769
					 DMA_BIDIRECTIONAL);
770 771 772 773
			idx = -ENOMEM;
			goto out;
		}

774
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
775 776 777 778 779 780 781
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}
R
Reinette Chatre 已提交
782

783
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
784 785 786

	txq->need_update = 1;

787 788 789
	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
790
	trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
791 792 793 794
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif
R
Reinette Chatre 已提交
795

796 797
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
798
	iwl_txq_update_write_ptr(trans, txq);
799

J
Johannes Berg 已提交
800
 out:
801
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
802
	return idx;
803 804
}

805 806 807 808 809 810 811
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
812 813
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
				   int idx)
814
{
815
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
816
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
817 818 819
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

T
Tomas Winkler 已提交
820
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
821
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
822 823
			  "index %d is out of range [0-%d] %d %d.\n", __func__,
			  txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
824 825 826
		return;
	}

T
Tomas Winkler 已提交
827 828
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
829

T
Tomas Winkler 已提交
830
		if (nfreed++ > 0) {
831
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
832
					q->write_ptr, q->read_ptr);
833
			iwlagn_fw_error(priv(trans), false);
834
		}
835

836 837 838 839 840 841
	}
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
842 843
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
844 845 846 847 848
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
849 850
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
			 int handler_status)
851
{
Z
Zhu Yi 已提交
852
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
853 854 855 856
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
857 858
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
859 860
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
861
	unsigned long flags;
862 863 864 865

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
866
	if (WARN(txq_id != trans->shrd->cmd_queue,
867
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
868
		  txq_id, trans->shrd->cmd_queue, sequence,
869 870
		  trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
		  trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
871
		iwl_print_hex_error(trans, pkt, 32);
872
		return;
873
	}
874

875
	cmd_index = get_cmd_index(&txq->q, index);
Z
Zhu Yi 已提交
876 877
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
878

879 880
	txq->time_stamp = jiffies;

881 882
	iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
			 DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
883

884
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
885
	if (meta->flags & CMD_WANT_SKB) {
Z
Zhu Yi 已提交
886
		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
887
		meta->source->handler_status = handler_status;
Z
Zhu Yi 已提交
888
		rxb->page = NULL;
889
	}
890

891
	spin_lock_irqsave(&trans->hcmd_lock, flags);
892

893
	iwl_hcmd_queue_reclaim(trans, txq_id, index);
894

J
Johannes Berg 已提交
895
	if (!(meta->flags & CMD_ASYNC)) {
896 897 898 899 900
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
				 get_cmd_string(cmd->hdr.cmd));
		}
901 902
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
903
			       get_cmd_string(cmd->hdr.cmd));
904
		wake_up(&trans->shrd->wait_command_queue);
905
	}
906

Z
Zhu Yi 已提交
907
	meta->flags = 0;
908

909
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
910
}
911 912 913

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

914
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
915 916 917 918 919 920 921 922
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;


923
	if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
924 925
		return -EBUSY;

926
	ret = iwl_enqueue_hcmd(trans, cmd);
927
	if (ret < 0) {
928 929
		IWL_DEBUG_QUIET_RFKILL(trans,
			"Error sending %s: enqueue_hcmd failed: %d\n",
930 931 932 933 934 935
			  get_cmd_string(cmd->id), ret);
		return ret;
	}
	return 0;
}

936
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
937
{
938
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
939 940 941
	int cmd_idx;
	int ret;

942
	lockdep_assert_held(&trans->shrd->mutex);
943

944
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
945 946
			get_cmd_string(cmd->id));

947 948 949 950 951 952 953 954 955 956 957 958 959 960
	if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
		return -EBUSY;


	if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
			       get_cmd_string(cmd->id));
		return -ECANCELED;
	}
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s failed: FW Error\n",
			       get_cmd_string(cmd->id));
		return -EIO;
	}
961 962
	set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
963 964
			get_cmd_string(cmd->id));

965
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
966 967
	if (cmd_idx < 0) {
		ret = cmd_idx;
968
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
969 970
		IWL_DEBUG_QUIET_RFKILL(trans,
			"Error sending %s: enqueue_hcmd failed: %d\n",
971 972 973 974
			  get_cmd_string(cmd->id), ret);
		return ret;
	}

975
	ret = wait_event_timeout(trans->shrd->wait_command_queue,
976
			!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
977 978
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
979
		if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
980
			struct iwl_tx_queue *txq =
981
				&trans_pcie->txq[trans->shrd->cmd_queue];
982 983
			struct iwl_queue *q = &txq->q;

984
			IWL_DEBUG_QUIET_RFKILL(trans,
985 986 987 988
				"Error sending %s: time out after %dms.\n",
				get_cmd_string(cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

989
			IWL_DEBUG_QUIET_RFKILL(trans,
990 991 992
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

993 994
			clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
			IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
995 996 997 998 999 1000 1001
				 "%s\n", get_cmd_string(cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1002
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
			  get_cmd_string(cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1018
		trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1019 1020
							~CMD_WANT_SKB;
	}
1021

1022
	if (cmd->reply_page) {
1023
		iwl_free_pages(trans->shrd, cmd->reply_page);
1024 1025 1026 1027 1028 1029
		cmd->reply_page = 0;
	}

	return ret;
}

1030
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1031 1032
{
	if (cmd->flags & CMD_ASYNC)
1033
		return iwl_send_cmd_async(trans, cmd);
1034

1035
	return iwl_send_cmd_sync(trans, cmd);
1036 1037
}

1038
/* Frees buffers until index _not_ inclusive */
1039 1040
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
1041
{
1042 1043
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1044 1045
	struct iwl_queue *q = &txq->q;
	int last_to_free;
1046
	int freed = 0;
1047

1048 1049 1050 1051
	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans->shrd->cmd_queue))
		return 0;

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
1062
		return 0;
1063 1064 1065
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
1066
		return 0;
1067 1068 1069 1070 1071

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

1072
		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1073 1074
			continue;

1075
		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1076

1077
		txq->skbs[txq->q.read_ptr] = NULL;
1078

1079
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1080

1081
		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
1082
		freed++;
1083
	}
1084
	return freed;
1085
}