iwl-trans-tx-pcie.c 30.6 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31
#include <linux/sched.h>
32
#include <net/mac80211.h>
33

J
Johannes Berg 已提交
34
#include "iwl-agn.h"
35 36 37 38
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
39
#include "iwl-trans-int-pcie.h"
40

41 42 43
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
44
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
45 46 47
					   struct iwl_tx_queue *txq,
					   u16 byte_cnt)
{
48 49 50
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
51 52 53 54 55 56 57
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;

58 59
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

	sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
	sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

86 87 88
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
89
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
90 91 92 93 94
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
95
		return;
96

97
	if (hw_params(trans).shadow_reg_enable) {
W
Wey-Yi Guy 已提交
98
		/* shadow register enabled */
99
		iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
100 101 102
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		/* if we're trying to save power */
103
		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
W
Wey-Yi Guy 已提交
104 105 106
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
107
			reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
108

W
Wey-Yi Guy 已提交
109
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
110
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
111 112
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
113
				iwl_set_bit(bus(trans), CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
114 115 116
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
117

118
			iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
119 120
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
121 122 123 124 125 126
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
127
			iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
128 129
				    txq->q.write_ptr | (txq_id << 8));
	}
130 131 132
	txq->need_update = 0;
}

J
Johannes Berg 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

172
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
173
		     struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
174 175 176 177 178 179 180 181
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
182
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
183 184 185 186 187 188
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
189
		dma_unmap_single(bus(trans)->dev,
190 191
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
192
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
193 194 195

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
196
		dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
197
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
198 199 200 201
}

/**
 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
202
 * @trans - transport private data
203
 * @txq - tx queue
204
 * @index - the index of the TFD to be freed
205 206 207 208
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
209
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
210
	int index)
211 212 213
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

214
	iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
215
			 DMA_TO_DEVICE);
J
Johannes Berg 已提交
216 217

	/* free SKB */
218
	if (txq->skbs) {
J
Johannes Berg 已提交
219 220
		struct sk_buff *skb;

221
		skb = txq->skbs[index];
J
Johannes Berg 已提交
222 223 224 225

		/* can be called from irqs-disabled context */
		if (skb) {
			dev_kfree_skb_any(skb);
226
			txq->skbs[index] = NULL;
J
Johannes Berg 已提交
227 228 229 230
		}
	}
}

231
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
232 233
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
234
				 u8 reset)
J
Johannes Berg 已提交
235 236 237 238 239 240
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
241
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
242 243 244 245 246 247 248 249 250
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
251
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
J
Johannes Berg 已提交
252 253 254 255 256 257 258 259
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
260
		IWL_ERR(trans, "Unaligned address = %llx\n",
J
Johannes Berg 已提交
261 262 263 264 265 266 267
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

306 307 308
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
309
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
310 311 312 313 314 315 316
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
317 318
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
319 320 321

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
322 323
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
324 325 326 327 328 329 330 331 332 333 334 335 336 337

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

338
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
339 340
					  struct iwl_tx_queue *txq)
{
341 342
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
343
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
344 345 346 347 348 349 350
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

351
	if (txq_id != trans->shrd->cmd_queue)
352 353 354 355 356 357 358 359 360 361
		sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

362
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
363 364 365 366 367 368
					u16 txq_id)
{
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

369 370 371
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

372 373
	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

374
	tbl_dw_addr = trans_pcie->scd_base_addr +
375 376
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

377
	tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
378 379 380 381 382 383

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

384
	iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
385 386 387 388

	return 0;
}

389
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
390 391 392
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
393
	iwl_write_prph(bus(trans),
394 395 396 397 398
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

399
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
400 401
				int txq_id, u32 index)
{
402
	iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
403
			(index & 0xff) | (txq_id << 8));
404
	iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
405 406 407 408 409 410 411 412 413
}

void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
					struct iwl_tx_queue *txq,
					int tx_fifo_id, int scd_retry)
{
	int txq_id = txq->q.id;
	int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;

414
	iwl_write_prph(bus(priv), SCD_QUEUE_STATUS_BITS(txq_id),
415 416 417 418 419 420 421 422 423 424 425 426
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

	txq->sched_retry = scd_retry;

	IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
		       active ? "Activate" : "Deactivate",
		       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}

427 428 429 430 431 432 433 434 435 436 437 438
static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
{
	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
		return ctx->ac_to_fifo[tid_to_ac[tid]];

	/* no support for TIDs 8-15 yet */
	return -EINVAL;
}

void iwl_trans_pcie_txq_agg_setup(struct iwl_priv *priv,
				  enum iwl_rxon_context_id ctx, int sta_id,
				  int tid, int frame_limit)
439 440 441 442 443 444
{
	int tx_fifo, txq_id, ssn_idx;
	u16 ra_tid;
	unsigned long flags;
	struct iwl_tid_data *tid_data;

445 446 447 448
	struct iwl_trans *trans = trans(priv);
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

449 450
	if (WARN_ON(sta_id == IWL_INVALID_STATION))
		return;
451
	if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
452 453
		return;

454 455 456 457 458 459
	tx_fifo = get_fifo_from_tid(&priv->contexts[ctx], tid);
	if (WARN_ON(tx_fifo < 0)) {
		IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
		return;
	}

460
	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
461
	tid_data = &priv->shrd->tid_data[sta_id][tid];
462 463
	ssn_idx = SEQ_TO_SN(tid_data->seq_number);
	txq_id = tid_data->agg.txq_id;
464
	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
465 466 467

	ra_tid = BUILD_RAxTID(sta_id, tid);

468
	spin_lock_irqsave(&priv->shrd->lock, flags);
469 470

	/* Stop this Tx queue before configuring it */
471
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
472 473

	/* Map receiver-address / traffic-ID to this queue */
474
	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
475 476

	/* Set this queue as a chain-building queue */
477
	iwl_set_bits_prph(bus(priv), SCD_QUEUECHAIN_SEL, (1<<txq_id));
478 479

	/* enable aggregations for the queue */
480
	iwl_set_bits_prph(bus(priv), SCD_AGGR_SEL, (1<<txq_id));
481 482 483 484 485

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
	priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
	priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
486
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
487 488

	/* Set up Tx window size and frame limit for this queue */
489
	iwl_write_targ_mem(bus(priv), trans_pcie->scd_base_addr +
490 491 492 493 494 495 496 497 498
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
			sizeof(u32),
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
			SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

499
	iwl_set_bits_prph(bus(priv), SCD_INTERRUPT_MASK, (1 << txq_id));
500 501 502 503

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
	iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);

504 505 506
	priv->txq[txq_id].sta_id = sta_id;
	priv->txq[txq_id].tid = tid;

507
	spin_unlock_irqrestore(&priv->shrd->lock, flags);
508 509
}

510
int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id)
511
{
512
	struct iwl_trans *trans = trans(priv);
513 514
	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
	    (IWLAGN_FIRST_AMPDU_QUEUE +
515
		hw_params(priv).num_ampdu_queues <= txq_id)) {
516 517 518 519
		IWL_ERR(priv,
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
520
			hw_params(priv).num_ampdu_queues - 1);
521 522 523
		return -EINVAL;
	}

524
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
525

526
	iwl_clear_bits_prph(bus(priv), SCD_AGGR_SEL, (1 << txq_id));
527

528 529
	priv->txq[txq_id].q.read_ptr = 0;
	priv->txq[txq_id].q.write_ptr = 0;
530
	/* supposes that ssn_idx is valid (!= 0xFFF) */
531
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
532

533
	iwl_clear_bits_prph(bus(priv), SCD_INTERRUPT_MASK, (1 << txq_id));
534
	iwl_txq_ctx_deactivate(priv, txq_id);
535
	iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], 0, 0);
536 537 538 539

	return 0;
}

540 541 542 543 544 545 546 547 548 549 550
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
551
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
552
{
553
	struct iwl_tx_queue *txq = &priv(trans)->txq[trans->shrd->cmd_queue];
554
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
555 556
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
557 558
	dma_addr_t phys_addr;
	unsigned long flags;
T
Tomas Winkler 已提交
559
	u32 idx;
560
	u16 copy_size, cmd_size;
561
	bool is_ct_kill = false;
562 563 564 565 566 567 568 569
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif
570

571 572
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_WARN(trans, "fw recovery, no hcmd send\n");
573 574 575
		return -EIO;
	}

576
	if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
577
	    !(cmd->flags & CMD_ON_DEMAND)) {
578
		IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
579 580 581
		return -EIO;
	}

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
601

602 603
	/*
	 * If any of the command structures end up being larger than
604 605 606
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
607
	 */
608
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
609
		return -EINVAL;
610

611 612 613
	if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
		IWL_WARN(trans, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
614 615
		return -EIO;
	}
616

617
	spin_lock_irqsave(&trans->hcmd_lock, flags);
618

J
Johannes Berg 已提交
619
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
620
		spin_unlock_irqrestore(&trans->hcmd_lock, flags);
621

622
		IWL_ERR(trans, "No space in command queue\n");
623
		is_ct_kill = iwl_check_for_ct_kill(priv(trans));
624
		if (!is_ct_kill) {
625
			IWL_ERR(trans, "Restarting adapter queue is full\n");
626
			iwlagn_fw_error(priv(trans), false);
627
		}
628 629 630
		return -ENOSPC;
	}

631
	idx = get_cmd_index(q, q->write_ptr);
632
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
633 634
	out_meta = &txq->meta[idx];

635
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
636 637 638 639
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
	if (cmd->flags & CMD_ASYNC)
		out_meta->callback = cmd->callback;
640

641
	/* set up the header */
642

643
	out_cmd->hdr.cmd = cmd->id;
644
	out_cmd->hdr.flags = 0;
645
	out_cmd->hdr.sequence =
646
		cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
647
					 INDEX_TO_SEQ(q->write_ptr));
648 649 650 651 652 653 654 655 656 657 658

	/* and copy the data that needs to be copied */

	cmd_dest = &out_cmd->cmd.payload[0];
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
659
	}
660

661
	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
662 663 664 665
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
666
			q->write_ptr, idx, trans->shrd->cmd_queue);
667

668
	phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
669
				DMA_BIDIRECTIONAL);
670
	if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
J
Johannes Berg 已提交
671 672 673 674
		idx = -ENOMEM;
		goto out;
	}

675
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
676 677
	dma_unmap_len_set(out_meta, len, copy_size);

678 679
	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
680 681 682 683 684 685 686 687 688 689 690
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
691 692
		phys_addr = dma_map_single(bus(trans)->dev,
					   (void *)cmd->data[i],
693
					   cmd->len[i], DMA_BIDIRECTIONAL);
694 695
		if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
			iwlagn_unmap_tfd(trans, out_meta,
J
Johannes Berg 已提交
696
					 &txq->tfds[q->write_ptr],
697
					 DMA_BIDIRECTIONAL);
698 699 700 701
			idx = -ENOMEM;
			goto out;
		}

702
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
703 704 705 706 707 708 709
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}
R
Reinette Chatre 已提交
710

711
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
712 713 714

	txq->need_update = 1;

715 716 717
	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
718
	trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
719 720 721 722
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif
R
Reinette Chatre 已提交
723

724 725
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
726
	iwl_txq_update_write_ptr(trans, txq);
727

J
Johannes Berg 已提交
728
 out:
729
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
730
	return idx;
731 732
}

733 734 735 736 737 738 739
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
740
static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
741 742 743 744 745
{
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

T
Tomas Winkler 已提交
746
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
747 748 749
		IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
			  "index %d is out of range [0-%d] %d %d.\n", __func__,
			  txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
750 751 752
		return;
	}

T
Tomas Winkler 已提交
753 754
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
755

T
Tomas Winkler 已提交
756
		if (nfreed++ > 0) {
757
			IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
758
					q->write_ptr, q->read_ptr);
759
			iwlagn_fw_error(priv, false);
760
		}
761

762 763 764 765 766 767 768 769 770 771 772 773 774
	}
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
Z
Zhu Yi 已提交
775
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
776 777 778 779
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
780 781
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
782 783
	struct iwl_trans *trans = trans(priv);
	struct iwl_tx_queue *txq = &priv->txq[trans->shrd->cmd_queue];
784
	unsigned long flags;
785 786 787 788

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
789
	if (WARN(txq_id != trans->shrd->cmd_queue,
790
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
791 792 793
		  txq_id, trans->shrd->cmd_queue, sequence,
		  priv->txq[trans->shrd->cmd_queue].q.read_ptr,
		  priv->txq[trans->shrd->cmd_queue].q.write_ptr)) {
794
		iwl_print_hex_error(priv, pkt, 32);
795
		return;
796
	}
797

798
	cmd_index = get_cmd_index(&txq->q, index);
Z
Zhu Yi 已提交
799 800
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
801

802 803
	iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
			 DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
804

805
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
806
	if (meta->flags & CMD_WANT_SKB) {
Z
Zhu Yi 已提交
807 808
		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
		rxb->page = NULL;
809 810 811
	} else if (meta->callback)
		meta->callback(priv, cmd, pkt);

812
	spin_lock_irqsave(&trans->hcmd_lock, flags);
813

814
	iwl_hcmd_queue_reclaim(priv, txq_id, index);
815

J
Johannes Berg 已提交
816
	if (!(meta->flags & CMD_ASYNC)) {
817 818
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
819
			       get_cmd_string(cmd->hdr.cmd));
820 821
		wake_up_interruptible(&priv->wait_command_queue);
	}
822

Z
Zhu Yi 已提交
823
	meta->flags = 0;
824

825
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
826
}
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897

const char *get_cmd_string(u8 cmd)
{
	switch (cmd) {
		IWL_CMD(REPLY_ALIVE);
		IWL_CMD(REPLY_ERROR);
		IWL_CMD(REPLY_RXON);
		IWL_CMD(REPLY_RXON_ASSOC);
		IWL_CMD(REPLY_QOS_PARAM);
		IWL_CMD(REPLY_RXON_TIMING);
		IWL_CMD(REPLY_ADD_STA);
		IWL_CMD(REPLY_REMOVE_STA);
		IWL_CMD(REPLY_REMOVE_ALL_STA);
		IWL_CMD(REPLY_TXFIFO_FLUSH);
		IWL_CMD(REPLY_WEPKEY);
		IWL_CMD(REPLY_TX);
		IWL_CMD(REPLY_LEDS_CMD);
		IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
		IWL_CMD(COEX_PRIORITY_TABLE_CMD);
		IWL_CMD(COEX_MEDIUM_NOTIFICATION);
		IWL_CMD(COEX_EVENT_CMD);
		IWL_CMD(REPLY_QUIET_CMD);
		IWL_CMD(REPLY_CHANNEL_SWITCH);
		IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
		IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
		IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
		IWL_CMD(POWER_TABLE_CMD);
		IWL_CMD(PM_SLEEP_NOTIFICATION);
		IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
		IWL_CMD(REPLY_SCAN_CMD);
		IWL_CMD(REPLY_SCAN_ABORT_CMD);
		IWL_CMD(SCAN_START_NOTIFICATION);
		IWL_CMD(SCAN_RESULTS_NOTIFICATION);
		IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
		IWL_CMD(BEACON_NOTIFICATION);
		IWL_CMD(REPLY_TX_BEACON);
		IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
		IWL_CMD(QUIET_NOTIFICATION);
		IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
		IWL_CMD(MEASURE_ABORT_NOTIFICATION);
		IWL_CMD(REPLY_BT_CONFIG);
		IWL_CMD(REPLY_STATISTICS_CMD);
		IWL_CMD(STATISTICS_NOTIFICATION);
		IWL_CMD(REPLY_CARD_STATE_CMD);
		IWL_CMD(CARD_STATE_NOTIFICATION);
		IWL_CMD(MISSED_BEACONS_NOTIFICATION);
		IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
		IWL_CMD(SENSITIVITY_CMD);
		IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
		IWL_CMD(REPLY_RX_PHY_CMD);
		IWL_CMD(REPLY_RX_MPDU_CMD);
		IWL_CMD(REPLY_RX);
		IWL_CMD(REPLY_COMPRESSED_BA);
		IWL_CMD(CALIBRATION_CFG_CMD);
		IWL_CMD(CALIBRATION_RES_NOTIFICATION);
		IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
		IWL_CMD(REPLY_TX_POWER_DBM_CMD);
		IWL_CMD(TEMPERATURE_NOTIFICATION);
		IWL_CMD(TX_ANT_CONFIGURATION_CMD);
		IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
		IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
		IWL_CMD(REPLY_BT_COEX_PROT_ENV);
		IWL_CMD(REPLY_WIPAN_PARAMS);
		IWL_CMD(REPLY_WIPAN_RXON);
		IWL_CMD(REPLY_WIPAN_RXON_TIMING);
		IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
		IWL_CMD(REPLY_WIPAN_QOS_PARAM);
		IWL_CMD(REPLY_WIPAN_WEPKEY);
		IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
		IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
		IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
J
Johannes Berg 已提交
898 899 900 901 902 903
		IWL_CMD(REPLY_WOWLAN_PATTERNS);
		IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
		IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
		IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
		IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
		IWL_CMD(REPLY_WOWLAN_GET_STATUS);
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
	default:
		return "UNKNOWN";

	}
}

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

static void iwl_generic_cmd_callback(struct iwl_priv *priv,
				     struct iwl_device_cmd *cmd,
				     struct iwl_rx_packet *pkt)
{
	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
		IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
			get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
		return;
	}

#ifdef CONFIG_IWLWIFI_DEBUG
	switch (cmd->hdr.cmd) {
	case REPLY_TX_LINK_QUALITY_CMD:
	case SENSITIVITY_CMD:
		IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
				get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
		break;
	default:
		IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
				get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
	}
#endif
}

936
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
937 938 939 940 941 942 943 944 945 946 947
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

	/* Assign a generic callback if one is not provided */
	if (!cmd->callback)
		cmd->callback = iwl_generic_cmd_callback;

948
	if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
949 950
		return -EBUSY;

951
	ret = iwl_enqueue_hcmd(trans, cmd);
952
	if (ret < 0) {
953
		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
954 955 956 957 958 959
			  get_cmd_string(cmd->id), ret);
		return ret;
	}
	return 0;
}

960
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
961 962 963 964
{
	int cmd_idx;
	int ret;

965
	lockdep_assert_held(&trans->shrd->mutex);
966 967 968 969 970

	 /* A synchronous command can not have a callback set. */
	if (WARN_ON(cmd->callback))
		return -EINVAL;

971
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
972 973
			get_cmd_string(cmd->id));

974 975
	set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
976 977
			get_cmd_string(cmd->id));

978
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
979 980
	if (cmd_idx < 0) {
		ret = cmd_idx;
981 982
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
983 984 985 986
			  get_cmd_string(cmd->id), ret);
		return ret;
	}

987 988
	ret = wait_event_interruptible_timeout(priv(trans)->wait_command_queue,
			!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
989 990
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
991 992
		if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
			IWL_ERR(trans,
993 994 995 996
				"Error sending %s: time out after %dms.\n",
				get_cmd_string(cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

997 998
			clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
			IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
999 1000 1001 1002 1003 1004
				 "%s\n", get_cmd_string(cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

1005 1006
	if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
1007 1008 1009 1010
			       get_cmd_string(cmd->id));
		ret = -ECANCELED;
		goto fail;
	}
1011 1012
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s failed: FW Error\n",
1013 1014 1015 1016 1017
			       get_cmd_string(cmd->id));
		ret = -EIO;
		goto fail;
	}
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
1018
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
			  get_cmd_string(cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1034
		priv(trans)->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1035 1036 1037 1038
							~CMD_WANT_SKB;
	}
fail:
	if (cmd->reply_page) {
1039
		iwl_free_pages(trans->shrd, cmd->reply_page);
1040 1041 1042 1043 1044 1045
		cmd->reply_page = 0;
	}

	return ret;
}

1046
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1047 1048
{
	if (cmd->flags & CMD_ASYNC)
1049
		return iwl_send_cmd_async(trans, cmd);
1050

1051
	return iwl_send_cmd_sync(trans, cmd);
1052 1053
}

1054
int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
1055
		u16 len, const void *data)
1056 1057 1058 1059 1060 1061 1062 1063
{
	struct iwl_host_cmd cmd = {
		.id = id,
		.len = { len, },
		.data = { data, },
		.flags = flags,
	};

1064
	return iwl_trans_pcie_send_cmd(trans, &cmd);
1065
}
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097

/* Frees buffers until index _not_ inclusive */
void iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			    struct sk_buff_head *skbs)
{
	struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int last_to_free;

	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
		return;
	}

	IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
			   q->read_ptr, index);

	if (WARN_ON(!skb_queue_empty(skbs)))
		return;

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

1098
		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1099 1100
			continue;

1101
		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1102

1103
		txq->skbs[txq->q.read_ptr] = NULL;
1104

1105
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1106

1107
		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);
1108 1109
	}
}