iwl-trans-pcie-tx.c 31.4 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33
/* TODO: remove include to iwl-dev.h */
34
#include "iwl-dev.h"
35 36 37
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
38
#include "iwl-io.h"
39
#include "iwl-agn-hw.h"
40
#include "iwl-helpers.h"
41
#include "iwl-trans-pcie-int.h"
42

43 44 45
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

46 47 48
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
49
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
50 51 52
					   struct iwl_tx_queue *txq,
					   u16 byte_cnt)
{
53 54 55
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
56 57 58 59 60 61 62
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;

63 64
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

	sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
	sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

91 92 93
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
94
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
95 96 97 98 99
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
100
		return;
101

102
	if (hw_params(trans).shadow_reg_enable) {
W
Wey-Yi Guy 已提交
103
		/* shadow register enabled */
104
		iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
105 106 107
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		/* if we're trying to save power */
108
		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
W
Wey-Yi Guy 已提交
109 110 111
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
112
			reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
113

W
Wey-Yi Guy 已提交
114
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
115
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
116 117
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
118
				iwl_set_bit(bus(trans), CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
119 120 121
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
122

123
			iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
124 125
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
126 127 128 129 130 131
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
132
			iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
133 134
				    txq->q.write_ptr | (txq_id << 8));
	}
135 136 137
	txq->need_update = 0;
}

J
Johannes Berg 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

177
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
178
		     struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
179 180 181 182 183 184 185 186
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
187
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
188 189 190 191 192 193
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
194
		dma_unmap_single(bus(trans)->dev,
195 196
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
197
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
198 199 200

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
201
		dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
202
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
203 204 205 206
}

/**
 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
207
 * @trans - transport private data
208
 * @txq - tx queue
209
 * @index - the index of the TFD to be freed
210
 *@dma_dir - the direction of the DMA mapping
211 212 213 214
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
215
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
216
	int index, enum dma_data_direction dma_dir)
217 218 219
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

220
	iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
J
Johannes Berg 已提交
221 222

	/* free SKB */
223
	if (txq->skbs) {
J
Johannes Berg 已提交
224 225
		struct sk_buff *skb;

226
		skb = txq->skbs[index];
J
Johannes Berg 已提交
227

228 229 230 231
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
232
		if (skb) {
233
			iwl_free_skb(priv(trans), skb);
234
			txq->skbs[index] = NULL;
J
Johannes Berg 已提交
235 236 237 238
		}
	}
}

239
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
240 241
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
242
				 u8 reset)
J
Johannes Berg 已提交
243 244 245 246 247 248
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
249
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
250 251 252 253 254 255 256 257 258
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
259
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
J
Johannes Berg 已提交
260 261 262 263 264 265 266 267
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
268
		IWL_ERR(trans, "Unaligned address = %llx\n",
J
Johannes Berg 已提交
269 270 271 272 273 274 275
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

314 315 316
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
317
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
318 319 320 321 322 323 324
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
325 326
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
327 328 329

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
330 331
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
332 333 334 335 336 337 338 339 340 341 342 343 344 345

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

346
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
347 348
					  struct iwl_tx_queue *txq)
{
349 350
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
351
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
352 353 354 355 356 357 358
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

359
	if (txq_id != trans->shrd->cmd_queue)
360 361 362 363 364 365 366 367 368 369
		sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

370
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
371 372 373 374 375 376
					u16 txq_id)
{
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

377 378 379
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

380 381
	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

382
	tbl_dw_addr = trans_pcie->scd_base_addr +
383 384
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

385
	tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
386 387 388 389 390 391

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

392
	iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
393 394 395 396

	return 0;
}

397
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
398 399 400
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
401
	iwl_write_prph(bus(trans),
402 403 404 405 406
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

407
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
408 409
				int txq_id, u32 index)
{
410
	iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
411
			(index & 0xff) | (txq_id << 8));
412
	iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
413 414
}

415
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
416 417 418
					struct iwl_tx_queue *txq,
					int tx_fifo_id, int scd_retry)
{
419
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
420
	int txq_id = txq->q.id;
421
	int active =
422
		test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
423

424
	iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
425 426 427 428 429 430 431
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

	txq->sched_retry = scd_retry;

432
	IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
433 434 435 436
		       active ? "Activate" : "Deactivate",
		       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}

437 438
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
				    u8 ctx, u16 tid)
439
{
440
	const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
441
	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
442
		return ac_to_fifo[tid_to_ac[tid]];
443 444 445 446 447

	/* no support for TIDs 8-15 yet */
	return -EINVAL;
}

448 449 450
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
				 enum iwl_rxon_context_id ctx, int sta_id,
				 int tid, int frame_limit)
451 452 453 454 455 456
{
	int tx_fifo, txq_id, ssn_idx;
	u16 ra_tid;
	unsigned long flags;
	struct iwl_tid_data *tid_data;

457 458 459
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

460 461
	if (WARN_ON(sta_id == IWL_INVALID_STATION))
		return;
462
	if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
463 464
		return;

465
	tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
466 467 468 469 470
	if (WARN_ON(tx_fifo < 0)) {
		IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
		return;
	}

471 472
	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
	tid_data = &trans->shrd->tid_data[sta_id][tid];
473 474
	ssn_idx = SEQ_TO_SN(tid_data->seq_number);
	txq_id = tid_data->agg.txq_id;
475
	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
476 477 478

	ra_tid = BUILD_RAxTID(sta_id, tid);

479
	spin_lock_irqsave(&trans->shrd->lock, flags);
480 481

	/* Stop this Tx queue before configuring it */
482
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
483 484

	/* Map receiver-address / traffic-ID to this queue */
485
	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
486 487

	/* Set this queue as a chain-building queue */
488
	iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
489 490

	/* enable aggregations for the queue */
491
	iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
492 493 494

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
495 496
	trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
497
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
498 499

	/* Set up Tx window size and frame limit for this queue */
500
	iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
501 502 503 504 505 506 507 508 509
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
			sizeof(u32),
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
			SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

510
	iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
511 512

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
513
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
514
					tx_fifo, 1);
515

516 517
	trans_pcie->txq[txq_id].sta_id = sta_id;
	trans_pcie->txq[txq_id].tid = tid;
518

519
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
520 521
}

522 523 524 525 526 527 528 529
/*
 * Find first available (lowest unused) Tx Queue, mark it "active".
 * Called only when finding queue for aggregation.
 * Should never return anything < 7, because they should already
 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
 */
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
{
530
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
531 532 533 534
	int txq_id;

	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		if (!test_and_set_bit(txq_id,
535
					&trans_pcie->txq_ctx_active_msk))
536 537 538 539 540 541 542 543
			return txq_id;
	return -1;
}

int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
				enum iwl_rxon_context_id ctx, int sta_id,
				int tid, u16 *ssn)
{
544
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
545 546
	struct iwl_tid_data *tid_data;
	unsigned long flags;
W
Wey-Yi Guy 已提交
547
	int txq_id;
548 549 550 551 552 553 554 555 556 557 558

	txq_id = iwlagn_txq_ctx_activate_free(trans);
	if (txq_id == -1) {
		IWL_ERR(trans, "No free aggregation queue available\n");
		return -ENXIO;
	}

	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
	tid_data = &trans->shrd->tid_data[sta_id][tid];
	*ssn = SEQ_TO_SN(tid_data->seq_number);
	tid_data->agg.txq_id = txq_id;
559
	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
560 561 562 563 564 565 566 567 568 569 570

	tid_data = &trans->shrd->tid_data[sta_id][tid];
	if (tid_data->tfds_in_queue == 0) {
		IWL_DEBUG_HT(trans, "HW queue is empty\n");
		tid_data->agg.state = IWL_AGG_ON;
		iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
	} else {
		IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
			     "queue\n", tid_data->tfds_in_queue);
		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
	}
571
	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
572 573 574

	return 0;
}
575 576

void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
577
{
578
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
579 580 581 582
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);

	iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));

583 584
	trans_pcie->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
585 586 587 588
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);

	iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
589 590
	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
591 592 593 594 595 596
}

int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
				  enum iwl_rxon_context_id ctx, int sta_id,
				  int tid)
{
597
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
598 599 600 601 602 603 604 605 606 607
	unsigned long flags;
	int read_ptr, write_ptr;
	struct iwl_tid_data *tid_data;
	int txq_id;

	spin_lock_irqsave(&trans->shrd->sta_lock, flags);

	tid_data = &trans->shrd->tid_data[sta_id][tid];
	txq_id = tid_data->agg.txq_id;

608 609
	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
	    (IWLAGN_FIRST_AMPDU_QUEUE +
610 611
		hw_params(trans).num_ampdu_queues <= txq_id)) {
		IWL_ERR(trans,
612 613 614
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
615 616
			hw_params(trans).num_ampdu_queues - 1);
		spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
617 618 619
		return -EINVAL;
	}

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		/*
		* This can happen if the peer stops aggregation
		* again before we've had a chance to drain the
		* queue we selected previously, i.e. before the
		* session was really started completely.
		*/
		IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
		goto turn_off;
	case IWL_AGG_ON:
		break;
	default:
		IWL_WARN(trans, "Stopping AGG while state not ON"
				"or starting\n");
	}
636

637 638
	write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
	read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
639

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
	/* The queue is not empty */
	if (write_ptr != read_ptr) {
		IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
		trans->shrd->tid_data[sta_id][tid].agg.state =
			IWL_EMPTYING_HW_QUEUE_DELBA;
		spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
		return 0;
	}

	IWL_DEBUG_HT(trans, "HW queue is empty\n");
turn_off:
	trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;

	/* do not restore/save irqs */
	spin_unlock(&trans->shrd->sta_lock);
	spin_lock(&trans->shrd->lock);

	iwl_trans_pcie_txq_agg_disable(trans, txq_id);

	spin_unlock_irqrestore(&trans->shrd->lock, flags);
660

661
	iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
662 663 664 665

	return 0;
}

666 667 668 669 670 671 672 673 674 675 676
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
677
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
678
{
679 680
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
681
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
682 683
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
684 685
	dma_addr_t phys_addr;
	unsigned long flags;
T
Tomas Winkler 已提交
686
	u32 idx;
687
	u16 copy_size, cmd_size;
688
	bool is_ct_kill = false;
689 690 691 692 693 694 695 696
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif
697

698 699
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_WARN(trans, "fw recovery, no hcmd send\n");
700 701 702
		return -EIO;
	}

703
	if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
704
	    !(cmd->flags & CMD_ON_DEMAND)) {
705
		IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
706 707 708
		return -EIO;
	}

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
728

729 730
	/*
	 * If any of the command structures end up being larger than
731 732 733
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
734
	 */
735
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
736
		return -EINVAL;
737

738 739 740
	if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
		IWL_WARN(trans, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
741 742
		return -EIO;
	}
743

744
	spin_lock_irqsave(&trans->hcmd_lock, flags);
745

J
Johannes Berg 已提交
746
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
747
		spin_unlock_irqrestore(&trans->hcmd_lock, flags);
748

749
		IWL_ERR(trans, "No space in command queue\n");
750
		is_ct_kill = iwl_check_for_ct_kill(priv(trans));
751
		if (!is_ct_kill) {
752
			IWL_ERR(trans, "Restarting adapter queue is full\n");
753
			iwlagn_fw_error(priv(trans), false);
754
		}
755 756 757
		return -ENOSPC;
	}

758
	idx = get_cmd_index(q, q->write_ptr);
759
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
760 761
	out_meta = &txq->meta[idx];

762
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
763 764
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
765

766
	/* set up the header */
767

768
	out_cmd->hdr.cmd = cmd->id;
769
	out_cmd->hdr.flags = 0;
770
	out_cmd->hdr.sequence =
771
		cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
772
					 INDEX_TO_SEQ(q->write_ptr));
773 774 775 776 777 778 779 780 781 782 783

	/* and copy the data that needs to be copied */

	cmd_dest = &out_cmd->cmd.payload[0];
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
784
	}
785

786
	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
787 788 789 790
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
791
			q->write_ptr, idx, trans->shrd->cmd_queue);
792

793
	phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
794
				DMA_BIDIRECTIONAL);
795
	if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
J
Johannes Berg 已提交
796 797 798 799
		idx = -ENOMEM;
		goto out;
	}

800
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
801 802
	dma_unmap_len_set(out_meta, len, copy_size);

803 804
	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
805 806 807 808 809 810 811 812 813 814 815
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
816 817
		phys_addr = dma_map_single(bus(trans)->dev,
					   (void *)cmd->data[i],
818
					   cmd->len[i], DMA_BIDIRECTIONAL);
819 820
		if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
			iwlagn_unmap_tfd(trans, out_meta,
J
Johannes Berg 已提交
821
					 &txq->tfds[q->write_ptr],
822
					 DMA_BIDIRECTIONAL);
823 824 825 826
			idx = -ENOMEM;
			goto out;
		}

827
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
828 829 830 831 832 833 834
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}
R
Reinette Chatre 已提交
835

836
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
837 838 839

	txq->need_update = 1;

840 841 842
	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
843
	trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
844 845 846 847
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif
R
Reinette Chatre 已提交
848

849 850
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
851
	iwl_txq_update_write_ptr(trans, txq);
852

J
Johannes Berg 已提交
853
 out:
854
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
855
	return idx;
856 857
}

858 859 860 861 862 863 864
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
865 866
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
				   int idx)
867
{
868
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
869
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
870 871 872
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

T
Tomas Winkler 已提交
873
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
874
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
875 876
			  "index %d is out of range [0-%d] %d %d.\n", __func__,
			  txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
877 878 879
		return;
	}

T
Tomas Winkler 已提交
880 881
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
882

T
Tomas Winkler 已提交
883
		if (nfreed++ > 0) {
884
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
885
					q->write_ptr, q->read_ptr);
886
			iwlagn_fw_error(priv(trans), false);
887
		}
888

889 890 891 892 893 894
	}
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
895 896
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
897 898 899 900 901
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
902 903
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
			 int handler_status)
904
{
Z
Zhu Yi 已提交
905
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
906 907 908 909
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
910 911
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
912 913
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
914
	unsigned long flags;
915 916 917 918

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
919
	if (WARN(txq_id != trans->shrd->cmd_queue,
920
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
921
		  txq_id, trans->shrd->cmd_queue, sequence,
922 923
		  trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
		  trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
924
		iwl_print_hex_error(trans, pkt, 32);
925
		return;
926
	}
927

928
	cmd_index = get_cmd_index(&txq->q, index);
Z
Zhu Yi 已提交
929 930
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
931

932 933
	txq->time_stamp = jiffies;

934 935
	iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
			 DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
936

937
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
938
	if (meta->flags & CMD_WANT_SKB) {
Z
Zhu Yi 已提交
939
		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
940
		meta->source->handler_status = handler_status;
Z
Zhu Yi 已提交
941
		rxb->page = NULL;
942
	}
943

944
	spin_lock_irqsave(&trans->hcmd_lock, flags);
945

946
	iwl_hcmd_queue_reclaim(trans, txq_id, index);
947

J
Johannes Berg 已提交
948
	if (!(meta->flags & CMD_ASYNC)) {
949 950
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
951
			       get_cmd_string(cmd->hdr.cmd));
952
		wake_up(&trans->shrd->wait_command_queue);
953
	}
954

Z
Zhu Yi 已提交
955
	meta->flags = 0;
956

957
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
958
}
959 960 961

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

962
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
963 964 965 966 967 968 969 970
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;


971
	if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
972 973
		return -EBUSY;

974
	ret = iwl_enqueue_hcmd(trans, cmd);
975
	if (ret < 0) {
976
		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
977 978 979 980 981 982
			  get_cmd_string(cmd->id), ret);
		return ret;
	}
	return 0;
}

983
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
984
{
985
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
986 987 988
	int cmd_idx;
	int ret;

989
	lockdep_assert_held(&trans->shrd->mutex);
990

991
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
992 993
			get_cmd_string(cmd->id));

994 995
	set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
996 997
			get_cmd_string(cmd->id));

998
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
999 1000
	if (cmd_idx < 0) {
		ret = cmd_idx;
1001 1002
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1003 1004 1005 1006
			  get_cmd_string(cmd->id), ret);
		return ret;
	}

1007
	ret = wait_event_timeout(trans->shrd->wait_command_queue,
1008
			!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
1009 1010
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
1011 1012
		if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
			IWL_ERR(trans,
1013 1014 1015 1016
				"Error sending %s: time out after %dms.\n",
				get_cmd_string(cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

1017 1018
			clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
			IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
1019 1020 1021 1022 1023 1024
				 "%s\n", get_cmd_string(cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

1025 1026
	if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
1027 1028 1029 1030
			       get_cmd_string(cmd->id));
		ret = -ECANCELED;
		goto fail;
	}
1031 1032
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s failed: FW Error\n",
1033 1034 1035 1036 1037
			       get_cmd_string(cmd->id));
		ret = -EIO;
		goto fail;
	}
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1038
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
			  get_cmd_string(cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1054
		trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1055 1056 1057 1058
							~CMD_WANT_SKB;
	}
fail:
	if (cmd->reply_page) {
1059
		iwl_free_pages(trans->shrd, cmd->reply_page);
1060 1061 1062 1063 1064 1065
		cmd->reply_page = 0;
	}

	return ret;
}

1066
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1067 1068
{
	if (cmd->flags & CMD_ASYNC)
1069
		return iwl_send_cmd_async(trans, cmd);
1070

1071
	return iwl_send_cmd_sync(trans, cmd);
1072 1073
}

1074
/* Frees buffers until index _not_ inclusive */
1075 1076
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
1077
{
1078 1079
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1080 1081
	struct iwl_queue *q = &txq->q;
	int last_to_free;
1082
	int freed = 0;
1083

1084 1085 1086 1087
	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans->shrd->cmd_queue))
		return 0;

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
1098
		return 0;
1099 1100 1101 1102 1103 1104
	}

	IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
			   q->read_ptr, index);

	if (WARN_ON(!skb_queue_empty(skbs)))
1105
		return 0;
1106 1107 1108 1109 1110

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

1111
		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1112 1113
			continue;

1114
		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1115

1116
		txq->skbs[txq->q.read_ptr] = NULL;
1117

1118
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1119

1120
		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
1121
		freed++;
1122
	}
1123
	return freed;
1124
}