iwl-trans-pcie-tx.c 26.9 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-agn-hw.h"
38
#include "iwl-op-mode.h"
39
#include "iwl-trans-pcie-int.h"
40

41 42 43
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

44 45 46
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
47
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
48 49 50
					   struct iwl_tx_queue *txq,
					   u16 byte_cnt)
{
51 52 53
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
54 55 56 57 58 59
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
60 61
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
62

63 64
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

65 66
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

67 68
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

91 92 93
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
94
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
95 96 97 98 99
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
100
		return;
101

102
	if (cfg(trans)->base_params->shadow_reg_enable) {
W
Wey-Yi Guy 已提交
103
		/* shadow register enabled */
104
		iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
105 106
			    txq->q.write_ptr | (txq_id << 8));
	} else {
D
Don Fry 已提交
107 108
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);
W
Wey-Yi Guy 已提交
109
		/* if we're trying to save power */
D
Don Fry 已提交
110
		if (test_bit(STATUS_POWER_PMI, &trans_pcie->status)) {
W
Wey-Yi Guy 已提交
111 112 113
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
114
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
115

W
Wey-Yi Guy 已提交
116
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
117
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
118 119
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
120
				iwl_set_bit(trans, CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
121 122 123
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
124

125
			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
126 127
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
128 129 130 131 132 133
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
134
			iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
135 136
				    txq->q.write_ptr | (txq_id << 8));
	}
137 138 139
	txq->need_update = 0;
}

J
Johannes Berg 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

179
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
180
		     struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
181 182 183 184 185 186 187 188
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
189
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
190 191 192 193 194 195
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
196
		dma_unmap_single(trans->dev,
197 198
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
199
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
200 201 202

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
203
		dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
204
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
205 206 207 208
}

/**
 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
209
 * @trans - transport private data
210
 * @txq - tx queue
211
 * @index - the index of the TFD to be freed
212
 *@dma_dir - the direction of the DMA mapping
213 214 215 216
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
217
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
218
	int index, enum dma_data_direction dma_dir)
219 220 221
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

222 223
	lockdep_assert_held(&txq->lock);

224
	iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
J
Johannes Berg 已提交
225 226

	/* free SKB */
227
	if (txq->skbs) {
J
Johannes Berg 已提交
228 229
		struct sk_buff *skb;

230
		skb = txq->skbs[index];
J
Johannes Berg 已提交
231

232 233 234 235
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
236
		if (skb) {
237
			iwl_op_mode_free_skb(trans->op_mode, skb);
238
			txq->skbs[index] = NULL;
J
Johannes Berg 已提交
239 240 241 242
		}
	}
}

243
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
244 245
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
246
				 u8 reset)
J
Johannes Berg 已提交
247 248 249 250 251 252
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
253
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
254 255 256 257 258 259 260 261 262
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
263
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
J
Johannes Berg 已提交
264 265 266 267 268 269 270 271
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
272
		IWL_ERR(trans, "Unaligned address = %llx\n",
J
Johannes Berg 已提交
273 274 275 276 277 278 279
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

318 319 320
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
321
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
322 323 324 325 326 327 328
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
329 330
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
331 332 333

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
334 335
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
336 337 338 339 340 341 342 343 344 345 346 347 348 349

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

350
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
351 352
					  struct iwl_tx_queue *txq)
{
353 354
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
355
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
356 357 358 359
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
360 361
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
362 363 364

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

365
	if (txq_id != trans_pcie->cmd_queue)
366
		sta_id = tx_cmd->sta_id;
367 368 369 370 371 372 373 374 375

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

376
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
377 378 379 380 381 382
					u16 txq_id)
{
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

383 384 385
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

386 387
	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

388
	tbl_dw_addr = trans_pcie->scd_base_addr +
389 390
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

391
	tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
392 393 394 395 396 397

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

398
	iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
399 400 401 402

	return 0;
}

403
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
404 405 406
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
407
	iwl_write_prph(trans,
408 409 410 411 412
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

413
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
414 415
				int txq_id, u32 index)
{
416
	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d\n", txq_id, index & 0xff);
417
	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
418
			(index & 0xff) | (txq_id << 8));
419
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
420 421
}

422
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
423 424
				   struct iwl_tx_queue *txq,
				   int tx_fifo_id, bool active)
425 426 427
{
	int txq_id = txq->q.id;

428
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
429 430 431 432 433
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

434
	if (active)
435 436
		IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
				    txq_id, tx_fifo_id);
437
	else
438
		IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
439 440
}

441 442
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
				 int sta_id, int tid, int frame_limit, u16 ssn)
443
{
444
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
445
	unsigned long flags;
446
	u16 ra_tid = BUILD_RAxTID(sta_id, tid);
447

448 449
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
450

J
Johannes Berg 已提交
451
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
452 453

	/* Stop this Tx queue before configuring it */
454
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
455 456

	/* Map receiver-address / traffic-ID to this queue */
457
	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
458 459

	/* Set this queue as a chain-building queue */
460
	iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
461 462

	/* enable aggregations for the queue */
463
	iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
464 465 466

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
467 468 469
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
470 471

	/* Set up Tx window size and frame limit for this queue */
472
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
473 474 475 476 477
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
478

479
	iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
480 481

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
482
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
483
				      fifo, true);
484

J
Johannes Berg 已提交
485
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
486 487
}

488
void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
489
{
490
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
491

492 493 494
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d not used", txq_id);
		return;
495 496
	}

497
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
498

499
	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
500

501 502 503
	trans_pcie->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
504

505 506 507 508
	iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));

	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
				      0, false);
509 510
}

511 512 513 514 515 516 517 518 519 520 521
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
522
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
523
{
524
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
525
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
526
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
527 528
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
529
	dma_addr_t phys_addr;
T
Tomas Winkler 已提交
530
	u32 idx;
531 532 533 534 535 536 537 538 539
	u16 copy_size, cmd_size;
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif
540

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
560

561 562
	/*
	 * If any of the command structures end up being larger than
563 564 565
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
566
	 */
567
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
568
		return -EINVAL;
569

570
	spin_lock_bh(&txq->lock);
571

J
Johannes Berg 已提交
572
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
573
		spin_unlock_bh(&txq->lock);
574

575
		IWL_ERR(trans, "No space in command queue\n");
576
		iwl_op_mode_cmd_queue_full(trans->op_mode);
577 578 579
		return -ENOSPC;
	}

580
	idx = get_cmd_index(q, q->write_ptr);
581
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
582 583
	out_meta = &txq->meta[idx];

584
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
585 586
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
587

588
	/* set up the header */
589

590
	out_cmd->hdr.cmd = cmd->id;
591
	out_cmd->hdr.flags = 0;
592
	out_cmd->hdr.sequence =
593
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
594
					 INDEX_TO_SEQ(q->write_ptr));
595 596 597

	/* and copy the data that needs to be copied */

598
	cmd_dest = out_cmd->payload;
599 600 601 602 603 604 605
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
606
	}
607

608
	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
609 610 611 612
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
613
			q->write_ptr, idx, trans_pcie->cmd_queue);
614

615
	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
616
				DMA_BIDIRECTIONAL);
617
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
J
Johannes Berg 已提交
618 619 620 621
		idx = -ENOMEM;
		goto out;
	}

622
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
623 624
	dma_unmap_len_set(out_meta, len, copy_size);

625 626
	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
627 628 629 630 631 632 633 634 635 636 637
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
638
		phys_addr = dma_map_single(trans->dev,
639
					   (void *)cmd->data[i],
640
					   cmd->len[i], DMA_BIDIRECTIONAL);
641
		if (dma_mapping_error(trans->dev, phys_addr)) {
642
			iwlagn_unmap_tfd(trans, out_meta,
J
Johannes Berg 已提交
643
					 &txq->tfds[q->write_ptr],
644
					 DMA_BIDIRECTIONAL);
645 646 647 648
			idx = -ENOMEM;
			goto out;
		}

649
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
650 651 652 653 654 655 656
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}
R
Reinette Chatre 已提交
657

658
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
659 660 661

	txq->need_update = 1;

662 663 664
	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
665
	trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
666 667 668 669
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif
R
Reinette Chatre 已提交
670

671 672 673 674
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

675 676
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
677
	iwl_txq_update_write_ptr(trans, txq);
678

J
Johannes Berg 已提交
679
 out:
680
	spin_unlock_bh(&txq->lock);
681
	return idx;
682 683
}

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
				      struct iwl_tx_queue *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

700 701 702 703 704 705 706
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
707 708
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
				   int idx)
709
{
710
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
711
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
712 713 714
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

715 716
	lockdep_assert_held(&txq->lock);

T
Tomas Winkler 已提交
717
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
718
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
719 720
			  "index %d is out of range [0-%d] %d %d.\n", __func__,
			  txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
721 722 723
		return;
	}

T
Tomas Winkler 已提交
724 725
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
726

T
Tomas Winkler 已提交
727
		if (nfreed++ > 0) {
728
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
729
					q->write_ptr, q->read_ptr);
730
			iwl_op_mode_nic_error(trans->op_mode);
731
		}
732

733
	}
734 735

	iwl_queue_progress(trans_pcie, txq);
736 737 738 739 740
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
741 742
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
743 744 745 746 747
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
748
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
749
			 int handler_status)
750
{
Z
Zhu Yi 已提交
751
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
752 753 754 755
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
756 757
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
758
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
759
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
760 761 762 763

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
764
	if (WARN(txq_id != trans_pcie->cmd_queue,
765
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
766 767 768
		  txq_id, trans_pcie->cmd_queue, sequence,
		  trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		  trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
769
		iwl_print_hex_error(trans, pkt, 32);
770
		return;
771
	}
772

773 774
	spin_lock(&txq->lock);

775
	cmd_index = get_cmd_index(&txq->q, index);
Z
Zhu Yi 已提交
776 777
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
778

779 780
	iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
			 DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
781

782
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
783
	if (meta->flags & CMD_WANT_SKB) {
784
		struct page *p = rxb_steal_page(rxb);
785 786 787

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
788
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
789 790
		meta->source->handler_status = handler_status;
	}
791

792
	iwl_hcmd_queue_reclaim(trans, txq_id, index);
793

J
Johannes Berg 已提交
794
	if (!(meta->flags & CMD_ASYNC)) {
D
Don Fry 已提交
795
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
796 797 798 799
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
				 get_cmd_string(cmd->hdr.cmd));
		}
D
Don Fry 已提交
800
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
801
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
802
			       get_cmd_string(cmd->hdr.cmd));
803
		wake_up(&trans->wait_command_queue);
804
	}
805

Z
Zhu Yi 已提交
806
	meta->flags = 0;
807

808
	spin_unlock(&txq->lock);
809
}
810 811 812

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

813
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
814 815 816 817 818 819 820 821
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;


822
	ret = iwl_enqueue_hcmd(trans, cmd);
823
	if (ret < 0) {
824
		IWL_ERR(trans,
825
			"Error sending %s: enqueue_hcmd failed: %d\n",
826 827 828 829 830 831
			  get_cmd_string(cmd->id), ret);
		return ret;
	}
	return 0;
}

832
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
833
{
834
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
835 836 837
	int cmd_idx;
	int ret;

838
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
839 840
			get_cmd_string(cmd->id));

841
	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
D
Don Fry 已提交
842
				     &trans_pcie->status))) {
843 844 845 846 847
		IWL_ERR(trans, "Command %s: a command is already active!\n",
			get_cmd_string(cmd->id));
		return -EIO;
	}

848
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
849 850
			get_cmd_string(cmd->id));

851
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
852 853
	if (cmd_idx < 0) {
		ret = cmd_idx;
D
Don Fry 已提交
854
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
855
		IWL_ERR(trans,
856
			"Error sending %s: enqueue_hcmd failed: %d\n",
857 858 859 860
			  get_cmd_string(cmd->id), ret);
		return ret;
	}

861
	ret = wait_event_timeout(trans->wait_command_queue,
D
Don Fry 已提交
862
			!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
863 864
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
D
Don Fry 已提交
865
		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
866
			struct iwl_tx_queue *txq =
867
				&trans_pcie->txq[trans_pcie->cmd_queue];
868 869
			struct iwl_queue *q = &txq->q;

870
			IWL_ERR(trans,
871 872 873 874
				"Error sending %s: time out after %dms.\n",
				get_cmd_string(cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

875
			IWL_ERR(trans,
876 877 878
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

D
Don Fry 已提交
879
			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
880
			IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
881 882 883 884 885 886
				 "%s\n", get_cmd_string(cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

887
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
888
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
			  get_cmd_string(cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
904
		trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &=
905 906
							~CMD_WANT_SKB;
	}
907

908 909 910
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
911 912 913 914 915
	}

	return ret;
}

916
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
917 918
{
	if (cmd->flags & CMD_ASYNC)
919
		return iwl_send_cmd_async(trans, cmd);
920

921
	return iwl_send_cmd_sync(trans, cmd);
922 923
}

924
/* Frees buffers until index _not_ inclusive */
925 926
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
927
{
928 929
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
930 931
	struct iwl_queue *q = &txq->q;
	int last_to_free;
932
	int freed = 0;
933

934
	/* This function is not meant to release cmd queue*/
935
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
936 937
		return 0;

938 939
	lockdep_assert_held(&txq->lock);

940 941 942 943 944 945 946 947 948 949
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
950
		return 0;
951 952 953
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
954
		return 0;
955 956 957 958 959

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

960
		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
961 962
			continue;

963
		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
964

965
		txq->skbs[txq->q.read_ptr] = NULL;
966

967
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
968

969
		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
970
		freed++;
971
	}
972 973 974

	iwl_queue_progress(trans_pcie, txq);

975
	return freed;
976
}