tx.c 27.8 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-op-mode.h"
38
#include "internal.h"
39
/* FIXME: need to abstract out TX command (once we know what it looks like) */
40
#include "dvm/commands.h"
41

42 43 44
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

45 46 47
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
48
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
49 50
				       struct iwl_tx_queue *txq,
				       u16 byte_cnt)
51
{
52
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
53
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
54 55 56 57 58 59
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
60
	struct iwl_tx_cmd *tx_cmd =
61
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
62

63 64
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

65 66
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

67 68
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

91 92 93
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
94
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
95 96 97 98 99
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
100
		return;
101

102
	if (trans->cfg->base_params->shadow_reg_enable) {
W
Wey-Yi Guy 已提交
103
		/* shadow register enabled */
104
		iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
105 106
			    txq->q.write_ptr | (txq_id << 8));
	} else {
D
Don Fry 已提交
107 108
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);
W
Wey-Yi Guy 已提交
109
		/* if we're trying to save power */
110
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
W
Wey-Yi Guy 已提交
111 112 113
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
114
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
115

W
Wey-Yi Guy 已提交
116
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
117
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
118 119
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
120
				iwl_set_bit(trans, CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
121 122 123
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
124

125
			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
126 127
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
128 129 130 131 132 133
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
134
			iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
135 136
				    txq->q.write_ptr | (txq_id << 8));
	}
137 138 139
	txq->need_update = 0;
}

J
Johannes Berg 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

179 180
static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
			  struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
181 182 183 184 185 186 187 188
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
189
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
190 191 192 193 194 195
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
196
		dma_unmap_single(trans->dev,
197 198
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
199
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
200 201 202

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
203
		dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
204
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
205 206

	tfd->num_tbs = 0;
207 208 209
}

/**
210
 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211
 * @trans - transport private data
212
 * @txq - tx queue
213
 * @dma_dir - the direction of the DMA mapping
214 215 216 217
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
218 219
void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
		      enum dma_data_direction dma_dir)
220 221 222
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

223 224 225 226
	/* rd_ptr is bounded by n_bd and idx is bounded by n_window */
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

227 228
	lockdep_assert_held(&txq->lock);

229
	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
230 231
	iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
		      dma_dir);
J
Johannes Berg 已提交
232 233

	/* free SKB */
234
	if (txq->entries) {
J
Johannes Berg 已提交
235 236
		struct sk_buff *skb;

237
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
238

239 240 241 242
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
243
		if (skb) {
244
			iwl_op_mode_free_skb(trans->op_mode, skb);
245
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
246 247 248 249
		}
	}
}

250
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
251 252
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
253
				 u8 reset)
J
Johannes Berg 已提交
254 255 256 257 258 259
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
260
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
261 262 263 264 265 266 267 268 269
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
270
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
271
			IWL_NUM_OF_TBS);
J
Johannes Berg 已提交
272 273 274 275 276 277 278
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
279
		IWL_ERR(trans, "Unaligned address = %llx\n",
280
			(unsigned long long)addr);
J
Johannes Berg 已提交
281 282 283 284 285 286

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

325 326 327
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
328
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
329 330 331 332 333 334 335
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
336 337
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
338 339 340

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
341 342
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
343 344 345 346 347 348 349 350 351 352 353 354 355 356

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

357
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
358 359
					  struct iwl_tx_queue *txq)
{
360 361
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
362
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
363 364 365 366
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
367
	struct iwl_tx_cmd *tx_cmd =
368
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;
369 370 371

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

372
	if (txq_id != trans_pcie->cmd_queue)
373
		sta_id = tx_cmd->sta_id;
374 375 376 377 378 379 380 381 382

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

383 384
static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
				 u16 txq_id)
385
{
386
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
387 388 389 390 391 392
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

393
	tbl_dw_addr = trans_pcie->scd_base_addr +
394 395
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

396
	tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
397 398 399 400 401 402

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

403
	iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
404 405 406 407

	return 0;
}

408
static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
409 410 411
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
412
	iwl_write_prph(trans,
413 414 415 416 417
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

418 419 420
void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
					int fifo, int sta_id, int tid,
					int frame_limit, u16 ssn)
421
{
422
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
423 424

	lockdep_assert_held(&trans_pcie->irq_lock);
425

426 427
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
428 429

	/* Stop this Tx queue before configuring it */
430
	iwl_txq_set_inactive(trans, txq_id);
431

432 433 434 435 436 437 438
	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
	if (sta_id != IWL_INVALID_STATION) {
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);
439

440
		/* Map receiver-address / traffic-ID to this queue */
441
		iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
442

443 444
		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
445 446 447 448 449 450 451
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
452
	}
453 454 455

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
456 457
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
458 459 460 461

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
462 463

	/* Set up Tx window size and frame limit for this queue */
464 465
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
466
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
467 468 469 470 471
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
472 473

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
474 475 476 477 478 479 480
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
481 482 483 484 485 486 487 488 489 490 491 492
}

void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

	__iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id,
				    tid, frame_limit, ssn);
493

J
Johannes Berg 已提交
494
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
495 496
}

497
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
498
{
499
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
500 501
	u16 rd_ptr, wr_ptr;
	int n_bd = trans_pcie->txq[txq_id].q.n_bd;
502

503 504 505
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d not used", txq_id);
		return;
506 507
	}

508 509
	rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
	wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
510

511 512
	WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
		  txq_id, rd_ptr, wr_ptr);
513

514 515
	iwl_txq_set_inactive(trans, txq_id);
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
516 517
}

518 519 520 521 522 523 524 525 526 527 528
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
529
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
530
{
531
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
532
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
533
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
534 535
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
536
	dma_addr_t phys_addr;
T
Tomas Winkler 已提交
537
	u32 idx;
538 539 540 541 542 543 544 545 546
	u16 copy_size, cmd_size;
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif
547

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
567

568 569
	/*
	 * If any of the command structures end up being larger than
570 571 572
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
573
	 */
574
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
575
		return -EINVAL;
576

577
	spin_lock_bh(&txq->lock);
578

J
Johannes Berg 已提交
579
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
580
		spin_unlock_bh(&txq->lock);
581

582
		IWL_ERR(trans, "No space in command queue\n");
583
		iwl_op_mode_cmd_queue_full(trans->op_mode);
584 585 586
		return -ENOSPC;
	}

587
	idx = get_cmd_index(q, q->write_ptr);
588 589
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
590

591
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
592 593
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
594

595
	/* set up the header */
596

597
	out_cmd->hdr.cmd = cmd->id;
598
	out_cmd->hdr.flags = 0;
599
	out_cmd->hdr.sequence =
600
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
601
					 INDEX_TO_SEQ(q->write_ptr));
602 603 604

	/* and copy the data that needs to be copied */

605
	cmd_dest = out_cmd->payload;
606 607 608 609 610 611 612
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
613
	}
614

J
Johannes Berg 已提交
615
	IWL_DEBUG_HC(trans,
616 617 618 619
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
		     trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
620

621
	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
622
				   DMA_BIDIRECTIONAL);
623
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
J
Johannes Berg 已提交
624 625 626 627
		idx = -ENOMEM;
		goto out;
	}

628
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
629 630
	dma_unmap_len_set(out_meta, len, copy_size);

631
	iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
632 633 634 635 636 637 638 639 640 641 642
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
643
		phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
644
					   cmd->len[i], DMA_BIDIRECTIONAL);
645
		if (dma_mapping_error(trans->dev, phys_addr)) {
646 647 648
			iwl_unmap_tfd(trans, out_meta,
				      &txq->tfds[q->write_ptr],
				      DMA_BIDIRECTIONAL);
649 650 651 652
			idx = -ENOMEM;
			goto out;
		}

653
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
654 655 656 657 658 659 660
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}
R
Reinette Chatre 已提交
661

662
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
663 664 665

	txq->need_update = 1;

666 667 668
	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
669
	trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
670 671 672 673
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif
R
Reinette Chatre 已提交
674

675 676 677 678
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

679 680
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
681
	iwl_txq_update_write_ptr(trans, txq);
682

J
Johannes Berg 已提交
683
 out:
684
	spin_unlock_bh(&txq->lock);
685
	return idx;
686 687
}

688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
				      struct iwl_tx_queue *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

704 705 706 707 708 709 710
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
711 712
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
				   int idx)
713
{
714
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
715
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
716 717 718
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

719 720
	lockdep_assert_held(&txq->lock);

T
Tomas Winkler 已提交
721
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
722 723 724 725
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, idx, q->n_bd,
			q->write_ptr, q->read_ptr);
726 727 728
		return;
	}

T
Tomas Winkler 已提交
729 730
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
731

T
Tomas Winkler 已提交
732
		if (nfreed++ > 0) {
733 734
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
735
			iwl_op_mode_nic_error(trans->op_mode);
736
		}
737

738
	}
739 740

	iwl_queue_progress(trans_pcie, txq);
741 742 743 744 745
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
746 747
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
748 749 750 751 752
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
753
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
754
			 int handler_status)
755
{
Z
Zhu Yi 已提交
756
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
757 758 759 760
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
761 762
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
763
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
764
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
765 766 767 768

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
769
	if (WARN(txq_id != trans_pcie->cmd_queue,
770
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
771 772 773
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
774
		iwl_print_hex_error(trans, pkt, 32);
775
		return;
776
	}
777

778 779
	spin_lock(&txq->lock);

780
	cmd_index = get_cmd_index(&txq->q, index);
781 782
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
783

784
	iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
785

786
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
787
	if (meta->flags & CMD_WANT_SKB) {
788
		struct page *p = rxb_steal_page(rxb);
789 790 791

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
792
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
793 794
		meta->source->handler_status = handler_status;
	}
795

796
	iwl_hcmd_queue_reclaim(trans, txq_id, index);
797

J
Johannes Berg 已提交
798
	if (!(meta->flags & CMD_ASYNC)) {
D
Don Fry 已提交
799
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
800 801
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
J
Johannes Berg 已提交
802 803
				 trans_pcie_get_cmd_string(trans_pcie,
							   cmd->hdr.cmd));
804
		}
D
Don Fry 已提交
805
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
806
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
J
Johannes Berg 已提交
807 808
			       trans_pcie_get_cmd_string(trans_pcie,
							 cmd->hdr.cmd));
809
		wake_up(&trans->wait_command_queue);
810
	}
811

Z
Zhu Yi 已提交
812
	meta->flags = 0;
813

814
	spin_unlock(&txq->lock);
815
}
816 817 818

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

819
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
820
{
J
Johannes Berg 已提交
821
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
822 823 824 825 826 827 828
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;


829
	ret = iwl_enqueue_hcmd(trans, cmd);
830
	if (ret < 0) {
831
		IWL_ERR(trans,
832
			"Error sending %s: enqueue_hcmd failed: %d\n",
J
Johannes Berg 已提交
833
			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
834 835 836 837 838
		return ret;
	}
	return 0;
}

839
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
840
{
841
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
842 843 844
	int cmd_idx;
	int ret;

845
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
J
Johannes Berg 已提交
846
		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
847

848
	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
D
Don Fry 已提交
849
				     &trans_pcie->status))) {
850
		IWL_ERR(trans, "Command %s: a command is already active!\n",
J
Johannes Berg 已提交
851
			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
852 853 854
		return -EIO;
	}

855
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
J
Johannes Berg 已提交
856
		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
857

858
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
859 860
	if (cmd_idx < 0) {
		ret = cmd_idx;
D
Don Fry 已提交
861
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
862
		IWL_ERR(trans,
863
			"Error sending %s: enqueue_hcmd failed: %d\n",
J
Johannes Berg 已提交
864
			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
865 866 867
		return ret;
	}

868
	ret = wait_event_timeout(trans->wait_command_queue,
869 870 871
				 !test_bit(STATUS_HCMD_ACTIVE,
					   &trans_pcie->status),
				 HOST_COMPLETE_TIMEOUT);
872
	if (!ret) {
D
Don Fry 已提交
873
		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
874
			struct iwl_tx_queue *txq =
875
				&trans_pcie->txq[trans_pcie->cmd_queue];
876 877
			struct iwl_queue *q = &txq->q;

878
			IWL_ERR(trans,
879
				"Error sending %s: time out after %dms.\n",
J
Johannes Berg 已提交
880
				trans_pcie_get_cmd_string(trans_pcie, cmd->id),
881 882
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

883
			IWL_ERR(trans,
884 885 886
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

D
Don Fry 已提交
887
			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
J
Johannes Berg 已提交
888 889 890 891
			IWL_DEBUG_INFO(trans,
				       "Clearing HCMD_ACTIVE for command %s\n",
				       trans_pcie_get_cmd_string(trans_pcie,
								 cmd->id));
892 893 894 895 896
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

897
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
898
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
J
Johannes Berg 已提交
899
			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
900 901 902 903 904 905 906 907 908 909 910 911 912 913
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
914 915
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
916
	}
917

918 919 920
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
921 922 923 924 925
	}

	return ret;
}

926
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
927 928
{
	if (cmd->flags & CMD_ASYNC)
929
		return iwl_send_cmd_async(trans, cmd);
930

931
	return iwl_send_cmd_sync(trans, cmd);
932 933
}

934
/* Frees buffers until index _not_ inclusive */
935 936
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
937
{
938 939
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
940 941
	struct iwl_queue *q = &txq->q;
	int last_to_free;
942
	int freed = 0;
943

944
	/* This function is not meant to release cmd queue*/
945
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
946 947
		return 0;

948 949
	lockdep_assert_held(&txq->lock);

950 951 952 953 954 955
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
956 957 958 959
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, last_to_free, q->n_bd,
			q->write_ptr, q->read_ptr);
960
		return 0;
961 962 963
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
964
		return 0;
965 966 967 968 969

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

970
		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
971 972
			continue;

973
		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
974

975
		txq->entries[txq->q.read_ptr].skb = NULL;
976

977
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
978

979
		iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
980
		freed++;
981
	}
982 983 984

	iwl_queue_progress(trans_pcie, txq);

985
	return freed;
986
}