tx.c 27.2 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-op-mode.h"
38
#include "internal.h"
39
/* FIXME: need to abstract out TX command (once we know what it looks like) */
40
#include "dvm/commands.h"
41

42 43 44
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

45 46 47
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
48
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
49 50
				       struct iwl_tx_queue *txq,
				       u16 byte_cnt)
51
{
52
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
53
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
54 55 56 57 58 59
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
60
	struct iwl_tx_cmd *tx_cmd =
61
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
62

63 64
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

65 66
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

67 68
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

91 92 93
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
94
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
95 96 97 98 99
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
100
		return;
101

102
	if (trans->cfg->base_params->shadow_reg_enable) {
W
Wey-Yi Guy 已提交
103
		/* shadow register enabled */
104
		iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
105 106
			    txq->q.write_ptr | (txq_id << 8));
	} else {
D
Don Fry 已提交
107 108
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);
W
Wey-Yi Guy 已提交
109
		/* if we're trying to save power */
110
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
W
Wey-Yi Guy 已提交
111 112 113
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
114
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
115

W
Wey-Yi Guy 已提交
116
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
117
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
118 119
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
120
				iwl_set_bit(trans, CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
121 122 123
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
124

125
			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
126 127
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
128 129 130 131 132 133
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
134
			iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
135 136
				    txq->q.write_ptr | (txq_id << 8));
	}
137 138 139
	txq->need_update = 0;
}

J
Johannes Berg 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

179 180
static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
			  struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
181 182 183 184 185 186 187 188
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
189
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
190 191 192 193 194 195
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
196
		dma_unmap_single(trans->dev,
197 198
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
199
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
200 201 202

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
203
		dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
204
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
205 206

	tfd->num_tbs = 0;
207 208 209
}

/**
210
 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211
 * @trans - transport private data
212
 * @txq - tx queue
213
 * @dma_dir - the direction of the DMA mapping
214 215 216 217
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
218 219
void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
		      enum dma_data_direction dma_dir)
220 221 222
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

223 224 225 226
	/* rd_ptr is bounded by n_bd and idx is bounded by n_window */
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

227 228
	lockdep_assert_held(&txq->lock);

229
	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
230 231
	iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
		      dma_dir);
J
Johannes Berg 已提交
232 233

	/* free SKB */
234
	if (txq->entries) {
J
Johannes Berg 已提交
235 236
		struct sk_buff *skb;

237
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
238

239 240 241 242
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
243
		if (skb) {
244
			iwl_op_mode_free_skb(trans->op_mode, skb);
245
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
246 247 248 249
		}
	}
}

250
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
251 252
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
253
				 u8 reset)
J
Johannes Berg 已提交
254 255 256 257 258 259
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
260
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
261 262 263 264 265 266 267 268 269
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
270
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
271
			IWL_NUM_OF_TBS);
J
Johannes Berg 已提交
272 273 274 275 276 277 278
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
279
		IWL_ERR(trans, "Unaligned address = %llx\n",
280
			(unsigned long long)addr);
J
Johannes Berg 已提交
281 282 283 284 285 286

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

325 326 327
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
328
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
329 330 331 332 333 334 335
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
336 337
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
338 339 340

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
341 342
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
343 344 345 346 347 348 349 350 351 352 353 354 355 356

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

357
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
358 359
					  struct iwl_tx_queue *txq)
{
360 361
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
362
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
363 364 365 366
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
367
	struct iwl_tx_cmd *tx_cmd =
368
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;
369 370 371

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

372
	if (txq_id != trans_pcie->cmd_queue)
373
		sta_id = tx_cmd->sta_id;
374 375 376 377 378 379 380 381 382

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

383 384
static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
				 u16 txq_id)
385
{
386
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
387 388 389 390 391 392
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

393
	tbl_dw_addr = trans_pcie->scd_base_addr +
394 395
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

396
	tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
397 398 399 400 401 402

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

403
	iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
404 405 406 407

	return 0;
}

408
static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
409 410 411
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
412
	iwl_write_prph(trans,
413 414 415 416 417
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

418 419
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
420
{
421
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
422

423 424
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
425 426

	/* Stop this Tx queue before configuring it */
427
	iwl_txq_set_inactive(trans, txq_id);
428

429 430 431 432 433 434 435
	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
	if (sta_id != IWL_INVALID_STATION) {
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);
436

437
		/* Map receiver-address / traffic-ID to this queue */
438
		iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
439

440 441
		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
442 443 444 445 446 447 448
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
449
	}
450 451 452

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
453 454
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
455 456 457 458

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
459 460

	/* Set up Tx window size and frame limit for this queue */
461 462
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
463
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
464 465 466 467 468
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
469 470

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
471 472 473 474 475 476 477
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
478 479
}

480
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
481
{
482
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 484
	u16 rd_ptr, wr_ptr;
	int n_bd = trans_pcie->txq[txq_id].q.n_bd;
485

486 487 488
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d not used", txq_id);
		return;
489 490
	}

491 492
	rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
	wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
493

494 495
	WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
		  txq_id, rd_ptr, wr_ptr);
496

497 498
	iwl_txq_set_inactive(trans, txq_id);
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
499 500
}

501 502 503 504 505 506 507 508 509 510 511
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
512
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
513
{
514
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
515
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
516
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
517 518
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
519
	dma_addr_t phys_addr;
T
Tomas Winkler 已提交
520
	u32 idx;
521 522 523
	u16 copy_size, cmd_size;
	bool had_nocopy = false;
	int i;
524
	u32 cmd_pos;
525

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
545

546 547
	/*
	 * If any of the command structures end up being larger than
548 549 550
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
551
	 */
552
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
553
		return -EINVAL;
554

555
	spin_lock_bh(&txq->lock);
556

J
Johannes Berg 已提交
557
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
558
		spin_unlock_bh(&txq->lock);
559

560
		IWL_ERR(trans, "No space in command queue\n");
561
		iwl_op_mode_cmd_queue_full(trans->op_mode);
562 563 564
		return -ENOSPC;
	}

565
	idx = get_cmd_index(q, q->write_ptr);
566 567
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
568

569
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
570 571
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
572

573
	/* set up the header */
574

575
	out_cmd->hdr.cmd = cmd->id;
576
	out_cmd->hdr.flags = 0;
577
	out_cmd->hdr.sequence =
578
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
579
					 INDEX_TO_SEQ(q->write_ptr));
580 581

	/* and copy the data that needs to be copied */
582
	cmd_pos = offsetof(struct iwl_device_cmd, payload);
583 584 585 586 587
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
		cmd_pos += cmd->len[i];
	}

	WARN_ON_ONCE(txq->entries[idx].copy_cmd);

	/*
	 * since out_cmd will be the source address of the FH, it will write
	 * the retry count there. So when the user needs to receivce the HCMD
	 * that corresponds to the response in the response handler, it needs
	 * to set CMD_WANT_HCMD.
	 */
	if (cmd->flags & CMD_WANT_HCMD) {
		txq->entries[idx].copy_cmd =
			kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
		if (unlikely(!txq->entries[idx].copy_cmd)) {
			idx = -ENOMEM;
			goto out;
		}
607
	}
608

J
Johannes Berg 已提交
609
	IWL_DEBUG_HC(trans,
610 611 612 613
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
		     trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
614

615
	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
616
				   DMA_BIDIRECTIONAL);
617
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
J
Johannes Berg 已提交
618 619 620 621
		idx = -ENOMEM;
		goto out;
	}

622
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
623 624
	dma_unmap_len_set(out_meta, len, copy_size);

625
	iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
626 627 628 629 630 631

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
632
		phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
633
					   cmd->len[i], DMA_BIDIRECTIONAL);
634
		if (dma_mapping_error(trans->dev, phys_addr)) {
635 636 637
			iwl_unmap_tfd(trans, out_meta,
				      &txq->tfds[q->write_ptr],
				      DMA_BIDIRECTIONAL);
638 639 640 641
			idx = -ENOMEM;
			goto out;
		}

642
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
643 644
					     cmd->len[i], 0);
	}
R
Reinette Chatre 已提交
645

646
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
647 648 649

	txq->need_update = 1;

650 651
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
			       &out_cmd->hdr, copy_size);
R
Reinette Chatre 已提交
652

653 654 655 656
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

657 658
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
659
	iwl_txq_update_write_ptr(trans, txq);
660

J
Johannes Berg 已提交
661
 out:
662
	spin_unlock_bh(&txq->lock);
663
	return idx;
664 665
}

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
				      struct iwl_tx_queue *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

682 683 684 685 686 687 688
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
689 690
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
				   int idx)
691
{
692
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
693
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
694 695 696
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

697 698
	lockdep_assert_held(&txq->lock);

T
Tomas Winkler 已提交
699
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
700 701 702 703
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, idx, q->n_bd,
			q->write_ptr, q->read_ptr);
704 705 706
		return;
	}

T
Tomas Winkler 已提交
707 708
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
709

T
Tomas Winkler 已提交
710
		if (nfreed++ > 0) {
711 712
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
713
			iwl_op_mode_nic_error(trans->op_mode);
714
		}
715

716
	}
717 718

	iwl_queue_progress(trans_pcie, txq);
719 720 721 722 723
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
724 725
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
726 727 728 729 730
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
731
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
732
			 int handler_status)
733
{
Z
Zhu Yi 已提交
734
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
735 736 737 738
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
739 740
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
741
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
742
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
743 744 745 746

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
747
	if (WARN(txq_id != trans_pcie->cmd_queue,
748
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
749 750 751
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
752
		iwl_print_hex_error(trans, pkt, 32);
753
		return;
754
	}
755

756 757
	spin_lock(&txq->lock);

758
	cmd_index = get_cmd_index(&txq->q, index);
759 760
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
761

762
	iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
763

764
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
765
	if (meta->flags & CMD_WANT_SKB) {
766
		struct page *p = rxb_steal_page(rxb);
767 768 769

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
770
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
771 772
		meta->source->handler_status = handler_status;
	}
773

774
	iwl_hcmd_queue_reclaim(trans, txq_id, index);
775

J
Johannes Berg 已提交
776
	if (!(meta->flags & CMD_ASYNC)) {
D
Don Fry 已提交
777
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
778 779
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
J
Johannes Berg 已提交
780 781
				 trans_pcie_get_cmd_string(trans_pcie,
							   cmd->hdr.cmd));
782
		}
D
Don Fry 已提交
783
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
784
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
J
Johannes Berg 已提交
785 786
			       trans_pcie_get_cmd_string(trans_pcie,
							 cmd->hdr.cmd));
787
		wake_up(&trans->wait_command_queue);
788
	}
789

Z
Zhu Yi 已提交
790
	meta->flags = 0;
791

792
	spin_unlock(&txq->lock);
793
}
794 795 796

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

797
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
798
{
J
Johannes Berg 已提交
799
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
800 801 802 803 804 805 806
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;


807
	ret = iwl_enqueue_hcmd(trans, cmd);
808
	if (ret < 0) {
809
		IWL_ERR(trans,
810
			"Error sending %s: enqueue_hcmd failed: %d\n",
J
Johannes Berg 已提交
811
			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
812 813 814 815 816
		return ret;
	}
	return 0;
}

817
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
818
{
819
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
820 821 822
	int cmd_idx;
	int ret;

823
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
J
Johannes Berg 已提交
824
		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
825

826
	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
D
Don Fry 已提交
827
				     &trans_pcie->status))) {
828
		IWL_ERR(trans, "Command %s: a command is already active!\n",
J
Johannes Berg 已提交
829
			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
830 831 832
		return -EIO;
	}

833
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
J
Johannes Berg 已提交
834
		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
835

836
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
837 838
	if (cmd_idx < 0) {
		ret = cmd_idx;
D
Don Fry 已提交
839
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
840
		IWL_ERR(trans,
841
			"Error sending %s: enqueue_hcmd failed: %d\n",
J
Johannes Berg 已提交
842
			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
843 844 845
		return ret;
	}

846
	ret = wait_event_timeout(trans->wait_command_queue,
847 848 849
				 !test_bit(STATUS_HCMD_ACTIVE,
					   &trans_pcie->status),
				 HOST_COMPLETE_TIMEOUT);
850
	if (!ret) {
D
Don Fry 已提交
851
		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
852
			struct iwl_tx_queue *txq =
853
				&trans_pcie->txq[trans_pcie->cmd_queue];
854 855
			struct iwl_queue *q = &txq->q;

856
			IWL_ERR(trans,
857
				"Error sending %s: time out after %dms.\n",
J
Johannes Berg 已提交
858
				trans_pcie_get_cmd_string(trans_pcie, cmd->id),
859 860
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

861
			IWL_ERR(trans,
862 863 864
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

D
Don Fry 已提交
865
			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
J
Johannes Berg 已提交
866 867 868 869
			IWL_DEBUG_INFO(trans,
				       "Clearing HCMD_ACTIVE for command %s\n",
				       trans_pcie_get_cmd_string(trans_pcie,
								 cmd->id));
870 871 872 873 874
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

875
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
876
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
J
Johannes Berg 已提交
877
			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
878 879 880 881 882 883 884 885 886 887 888 889 890 891
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
892 893
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
894
	}
895

896 897 898
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
899 900 901 902 903
	}

	return ret;
}

904
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
905 906
{
	if (cmd->flags & CMD_ASYNC)
907
		return iwl_send_cmd_async(trans, cmd);
908

909
	return iwl_send_cmd_sync(trans, cmd);
910 911
}

912
/* Frees buffers until index _not_ inclusive */
913 914
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
915
{
916 917
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
918 919
	struct iwl_queue *q = &txq->q;
	int last_to_free;
920
	int freed = 0;
921

922
	/* This function is not meant to release cmd queue*/
923
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
924 925
		return 0;

926 927
	lockdep_assert_held(&txq->lock);

928 929 930 931 932 933
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
934 935 936 937
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, last_to_free, q->n_bd,
			q->write_ptr, q->read_ptr);
938
		return 0;
939 940 941
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
942
		return 0;
943 944 945 946 947

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

948
		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
949 950
			continue;

951
		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
952

953
		txq->entries[txq->q.read_ptr].skb = NULL;
954

955
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
956

957
		iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
958
		freed++;
959
	}
960 961 962

	iwl_queue_progress(trans_pcie, txq);

963
	return freed;
964
}