tx.c 51.3 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-op-mode.h"
38
#include "internal.h"
39
/* FIXME: need to abstract out TX command (once we know what it looks like) */
40
#include "dvm/commands.h"
41

42 43 44
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
68 69
	unsigned int max;
	unsigned int used;
70

71 72 73 74 75 76 77 78 79 80
	/*
	 * To avoid ambiguity between empty and completely full queues, there
	 * should always be less than q->n_bd elements in the queue.
	 * If q->n_window is smaller than q->n_bd, there is no need to reserve
	 * any queue entries for this purpose.
	 */
	if (q->n_window < q->n_bd)
		max = q->n_window;
	else
		max = q->n_bd - 1;
81

82 83 84 85 86 87 88 89 90 91
	/*
	 * q->n_bd is a power of 2, so the following is equivalent to modulo by
	 * q->n_bd and is well defined for negative dividends.
	 */
	used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);

	if (WARN_ON(used > max))
		return 0;

	return max - used;
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
}

/*
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = 0;
	q->read_ptr = 0;

	return 0;
}

static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

	ptr->addr = dma_alloc_coherent(trans->dev, size,
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
	memset(ptr, 0, sizeof(*ptr));
}

static void iwl_pcie_txq_stuck_timer(unsigned long data)
{
	struct iwl_txq *txq = (void *)data;
	struct iwl_queue *q = &txq->q;
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
	u32 scd_sram_addr = trans_pcie->scd_base_addr +
				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
	u8 buf[16];
	int i;

	spin_lock(&txq->lock);
	/* check if triggered erroneously */
	if (txq->q.read_ptr == txq->q.write_ptr) {
		spin_unlock(&txq->lock);
		return;
	}
	spin_unlock(&txq->lock);

	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
		jiffies_to_msecs(trans_pcie->wd_timeout));
	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);

175
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
176 177 178 179 180 181 182 183 184 185 186 187

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
188 189 190
			iwl_trans_read_mem32(trans,
					     trans_pcie->scd_base_addr +
					     SCD_TRANS_TBL_OFFSET_QUEUE(i));
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205

		if (i & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			i, active ? "" : "in", fifo, tbl_dw,
			iwl_read_prph(trans,
				      SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
	}

	for (i = q->read_ptr; i != q->write_ptr;
206
	     i = iwl_queue_inc_wrap(i, q->n_bd))
207
		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
208
			le32_to_cpu(txq->scratchbufs[i].scratch));
209

210
	iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
211 212
}

213 214
/*
 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
215
 */
216 217
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
					     struct iwl_txq *txq, u16 byte_cnt)
218
{
219
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
220
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
221 222 223 224 225 226
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
227
	struct iwl_tx_cmd *tx_cmd =
228
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
229

230 231
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

232 233
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

234 235
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
236 237 238

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
239
		len += IEEE80211_CCMP_MIC_LEN;
240 241
		break;
	case TX_CMD_SEC_TKIP:
242
		len += IEEE80211_TKIP_ICV_LEN;
243 244
		break;
	case TX_CMD_SEC_WEP:
245
		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
246 247 248
		break;
	}

249 250 251 252
	if (trans_pcie->bc_table_dword)
		len = DIV_ROUND_UP(len, 4);

	bc_ent = cpu_to_le16(len | (sta_id << 12));
253 254 255 256 257 258 259 260

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
					    struct iwl_txq *txq)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
	struct iwl_tx_cmd *tx_cmd =
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != trans_pcie->cmd_queue)
		sta_id = tx_cmd->sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

287 288
/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
289
 */
290 291
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_txq *txq)
292
{
293
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
294 295 296
	u32 reg = 0;
	int txq_id = txq->q.id;

297
	lockdep_assert_held(&txq->lock);
298

299 300 301 302 303 304 305 306 307
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. NIC is woken up for CMD regardless of shadow outside this function
	 * 3. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    txq_id != trans_pcie->cmd_queue &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
W
Wey-Yi Guy 已提交
308
		/*
309 310 311
		 * wake up nic if it's powered down ...
		 * uCode will wake up, and interrupt us again, so next
		 * time we'll skip this part.
W
Wey-Yi Guy 已提交
312
		 */
313 314 315 316 317 318 319
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
				       txq_id, reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
320
			txq->need_update = true;
321 322
			return;
		}
W
Wey-Yi Guy 已提交
323
	}
324 325 326 327 328 329 330

	/*
	 * if not in power-save mode, uCode will never sleep when we're
	 * trying to tx (during RFKILL, we're not trying to tx).
	 */
	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
	iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
331
}
332

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		struct iwl_txq *txq = &trans_pcie->txq[i];

		spin_lock(&txq->lock);
		if (trans_pcie->txq[i].need_update) {
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
			trans_pcie->txq[i].need_update = false;
		}
		spin_unlock(&txq->lock);
	}
348 349
}

350
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
351 352 353 354 355 356 357 358 359 360 361
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

362
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
363 364 365 366 367 368
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

369 370
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				       dma_addr_t addr, u16 len)
J
Johannes Berg 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

384
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
J
Johannes Berg 已提交
385 386 387 388
{
	return tfd->num_tbs & 0x1f;
}

389
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
390 391
			       struct iwl_cmd_meta *meta,
			       struct iwl_tfd *tfd)
J
Johannes Berg 已提交
392 393 394 395 396
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
397
	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
J
Johannes Berg 已提交
398 399

	if (num_tbs >= IWL_NUM_OF_TBS) {
400
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
401 402 403 404
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

405
	/* first TB is never freed - it's the scratchbuf data */
J
Johannes Berg 已提交
406 407

	for (i = 1; i < num_tbs; i++)
408
		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
409 410
				 iwl_pcie_tfd_tb_get_len(tfd, i),
				 DMA_TO_DEVICE);
411 412

	tfd->num_tbs = 0;
413 414
}

415 416
/*
 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
417
 * @trans - transport private data
418
 * @txq - tx queue
419
 * @dma_dir - the direction of the DMA mapping
420 421 422 423
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
424
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
425 426 427
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

428 429 430 431
	/* rd_ptr is bounded by n_bd and idx is bounded by n_window */
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

432 433
	lockdep_assert_held(&txq->lock);

434
	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
435
	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
J
Johannes Berg 已提交
436 437

	/* free SKB */
438
	if (txq->entries) {
J
Johannes Berg 已提交
439 440
		struct sk_buff *skb;

441
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
442

443 444 445 446
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
447
		if (skb) {
448
			iwl_op_mode_free_skb(trans->op_mode, skb);
449
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
450 451 452 453
		}
	}
}

454 455
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
				  dma_addr_t addr, u16 len, u8 reset)
J
Johannes Berg 已提交
456 457 458 459 460 461
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
462
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
463 464
	tfd = &tfd_tmp[q->write_ptr];

465 466 467 468 469 470 471 472 473 474 475 476
	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
			IWL_NUM_OF_TBS);
		return -EINVAL;
	}

477 478
	if (WARN(addr & ~IWL_TX_DMA_MASK,
		 "Unaligned address = %llx\n", (unsigned long long)addr))
479 480 481 482 483 484 485 486 487 488 489 490 491
		return -EINVAL;

	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
			       struct iwl_txq *txq, int slots_num,
			       u32 txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
492
	size_t scratchbuf_sz;
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	int i;

	if (WARN_ON(txq->entries || txq->tfds))
		return -EINVAL;

	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
		    (unsigned long)txq);
	txq->trans_pcie = trans_pcie;

	txq->q.n_window = slots_num;

	txq->entries = kcalloc(slots_num,
			       sizeof(struct iwl_pcie_txq_entry),
			       GFP_KERNEL);

	if (!txq->entries)
		goto error;

	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->entries[i].cmd =
				kmalloc(sizeof(struct iwl_device_cmd),
					GFP_KERNEL);
			if (!txq->entries[i].cmd)
				goto error;
		}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
524
	if (!txq->tfds)
525
		goto error;
526 527 528 529 530 531 532 533 534 535 536 537 538 539

	BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
	BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
			sizeof(struct iwl_cmd_header) +
			offsetof(struct iwl_tx_cmd, scratch));

	scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;

	txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
					      &txq->scratchbufs_dma,
					      GFP_KERNEL);
	if (!txq->scratchbufs)
		goto err_free_tfds;

540 541 542
	txq->q.id = txq_id;

	return 0;
543 544
err_free_tfds:
	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
error:
	if (txq->entries && txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++)
			kfree(txq->entries[i].cmd);
	kfree(txq->entries);
	txq->entries = NULL;

	return -ENOMEM;

}

static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
			      int slots_num, u32 txq_id)
{
	int ret;

561
	txq->need_update = false;
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
			txq_id);
	if (ret)
		return ret;

	spin_lock_init(&txq->lock);

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
			   txq->q.dma_addr >> 8);

	return 0;
}

/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;

	if (!q->n_bd)
		return;

	spin_lock_bh(&txq->lock);
	while (q->write_ptr != q->read_ptr) {
599 600
		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
				   txq_id, q->read_ptr);
601
		iwl_pcie_txq_free_tfd(trans, txq);
602 603
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
604
	txq->active = false;
605
	spin_unlock_bh(&txq->lock);
606 607 608

	/* just in case - this queue may have been stopped */
	iwl_wake_queue(trans, txq);
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct device *dev = trans->dev;
	int i;

	if (WARN_ON(!txq))
		return;

	iwl_pcie_txq_unmap(trans, txq_id);

	/* De-alloc array of command/tx buffers */
	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++) {
			kfree(txq->entries[i].cmd);
			kfree(txq->entries[i].free_buf);
		}

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd) {
		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
642
		txq->q.dma_addr = 0;
643 644 645 646

		dma_free_coherent(dev,
				  sizeof(*txq->scratchbufs) * txq->q.n_window,
				  txq->scratchbufs, txq->scratchbufs_dma);
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
	}

	kfree(txq->entries);
	txq->entries = NULL;

	del_timer_sync(&txq->stuck_timer);

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
 */
static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
	struct iwl_trans_pcie __maybe_unused *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write_prph(trans, SCD_TXFACT, mask);
}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
672
	int nq = trans->cfg->base_params->num_of_queues;
673 674
	int chan;
	u32 reg_val;
675 676
	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
677 678 679 680 681 682 683 684 685 686 687

	/* make sure all queue are not stopped/used */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	trans_pcie->scd_base_addr =
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

688 689 690 691
	/* reset context data, TX status and translation data */
	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
				   SCD_CONTEXT_MEM_LOWER_BOUND,
			    NULL, clear_dwords);
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718

	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
		       trans_pcie->scd_bc_tbls.dma >> 10);

	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);

	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
				trans_pcie->cmd_fifo);

	/* Activate all Tx DMA/FIFO channels */
	iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
719 720 721
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
722 723
}

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		struct iwl_txq *txq = &trans_pcie->txq[txq_id];

		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
				   txq->q.dma_addr >> 8);
		iwl_pcie_txq_unmap(trans, txq_id);
		txq->q.read_ptr = 0;
		txq->q.write_ptr = 0;
	}

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

	iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
}

747 748 749 750 751 752 753 754 755
/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ch, txq_id, ret;

	/* Turn off all Tx DMA fifos */
756
	spin_lock(&trans_pcie->irq_lock);
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772

	iwl_pcie_txq_set_sched(trans, 0);

	/* Stop each Tx DMA channel, and wait for it to be idle */
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
		iwl_write_direct32(trans,
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
		if (ret < 0)
			IWL_ERR(trans,
				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
				ch,
				iwl_read_direct32(trans,
						  FH_TSSR_TX_STATUS_REG));
	}
773
	spin_unlock(&trans_pcie->irq_lock);
774

775 776 777 778 779 780 781 782 783 784
	/*
	 * This function can be called before the op_mode disabled the
	 * queues. This happens when we have an rfkill interrupt.
	 * Since we stop Tx altogether - mark the queues as stopped.
	 */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	/* This can happen: start_hw, stop_device */
	if (!trans_pcie->txq)
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
		return 0;

	/* Unmap DMA from host system and free skb's */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++)
		iwl_pcie_txq_unmap(trans, txq_id);

	return 0;
}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{
	int txq_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* Tx queues */
	if (trans_pcie->txq) {
		for (txq_id = 0;
		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
			iwl_pcie_txq_free(trans, txq_id);
	}

	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{
	int ret;
	int txq_id, slots_num;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
			sizeof(struct iwlagn_scd_bc_tbl);

	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
	if (WARN_ON(trans_pcie->txq)) {
		ret = -EINVAL;
		goto error;
	}

	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
				   scd_bc_tbls_size);
	if (ret) {
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
		goto error;
	}

	/* Alloc keep-warm buffer */
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
	if (ret) {
		IWL_ERR(trans, "Keep Warm allocation failed\n");
		goto error;
	}

	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
				  sizeof(struct iwl_txq), GFP_KERNEL);
	if (!trans_pcie->txq) {
		IWL_ERR(trans, "Not enough memory for txq\n");
858
		ret = -ENOMEM;
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
			goto error;
		}
	}

	return 0;

error:
	iwl_pcie_tx_free(trans);

	return ret;
}
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;
	int txq_id, slots_num;
	bool alloc = false;

	if (!trans_pcie->txq) {
		ret = iwl_pcie_tx_alloc(trans);
		if (ret)
			goto error;
		alloc = true;
	}

896
	spin_lock(&trans_pcie->irq_lock);
897 898 899 900 901 902 903 904

	/* Turn off all Tx DMA fifos */
	iwl_write_prph(trans, SCD_TXFACT, 0);

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

905
	spin_unlock(&trans_pcie->irq_lock);
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
		iwl_pcie_tx_free(trans);
	return ret;
}

static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
					   struct iwl_txq *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

/* Frees buffers until index _not_ inclusive */
945 946
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs)
947 948 949
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
950 951
	/* n_bd is usually 256 => n_bd - 1 = 0xff */
	int tfd_num = ssn & (txq->q.n_bd - 1);
952 953 954 955 956
	struct iwl_queue *q = &txq->q;
	int last_to_free;

	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
957
		return;
J
Johannes Berg 已提交
958

959
	spin_lock_bh(&txq->lock);
960

961 962 963 964 965 966
	if (!txq->active) {
		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
				    txq_id, ssn);
		goto out;
	}

967 968 969 970 971
	if (txq->q.read_ptr == tfd_num)
		goto out;

	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
			   txq_id, txq->q.read_ptr, tfd_num, ssn);
J
Johannes Berg 已提交
972

973 974
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
975
	last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
976

977
	if (!iwl_queue_used(q, last_to_free)) {
978 979 980 981
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, last_to_free, q->n_bd,
			q->write_ptr, q->read_ptr);
982
		goto out;
J
Johannes Berg 已提交
983 984
	}

985
	if (WARN_ON(!skb_queue_empty(skbs)))
986
		goto out;
J
Johannes Berg 已提交
987

988
	for (;
989
	     q->read_ptr != tfd_num;
990
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
J
Johannes Berg 已提交
991

992 993
		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
			continue;
J
Johannes Berg 已提交
994

995
		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
J
Johannes Berg 已提交
996

997
		txq->entries[txq->q.read_ptr].skb = NULL;
998

999
		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
1000

1001
		iwl_pcie_txq_free_tfd(trans, txq);
1002
	}
1003

1004 1005
	iwl_pcie_txq_progress(trans_pcie, txq);

1006 1007 1008
	if (iwl_queue_space(&txq->q) > txq->q.low_mark)
		iwl_wake_queue(trans, txq);
out:
1009
	spin_unlock_bh(&txq->lock);
1010 1011
}

1012 1013 1014 1015 1016 1017 1018 1019
/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1020
{
1021 1022 1023
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
1024
	unsigned long flags;
1025
	int nfreed = 0;
1026

1027
	lockdep_assert_held(&txq->lock);
1028

1029
	if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
1030 1031 1032 1033 1034 1035
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, idx, q->n_bd,
			q->write_ptr, q->read_ptr);
		return;
	}
1036

1037 1038
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1039

1040 1041 1042
		if (nfreed++ > 0) {
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
1043
			iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
1044 1045 1046
		}
	}

1047 1048
	if (trans->cfg->base_params->apmg_wake_up_wa &&
	    q->read_ptr == q->write_ptr) {
1049 1050 1051 1052 1053 1054 1055 1056 1057
		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
		WARN_ON(!trans_pcie->cmd_in_flight);
		trans_pcie->cmd_in_flight = false;
		__iwl_trans_pcie_clear_bit(trans,
					   CSR_GP_CNTRL,
					   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
	}

1058
	iwl_pcie_txq_progress(trans_pcie, txq);
1059 1060
}

1061
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1062
				 u16 txq_id)
1063
{
1064
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1065 1066 1067 1068 1069 1070
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

1071
	tbl_dw_addr = trans_pcie->scd_base_addr +
1072 1073
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

1074
	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1075 1076 1077 1078 1079 1080

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

1081
	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1082 1083 1084 1085

	return 0;
}

1086 1087
static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
					     u16 txq_id)
1088 1089 1090
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1091
	iwl_write_prph(trans,
1092 1093 1094 1095 1096
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

1097 1098 1099 1100
/* Receiver address (actually, Rx station's index into station table),
 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))

1101 1102
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
1103
{
1104
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1105

1106 1107
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1108 1109

	/* Stop this Tx queue before configuring it */
1110
	iwl_pcie_txq_set_inactive(trans, txq_id);
1111

1112 1113 1114 1115 1116
	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
1117
	if (sta_id >= 0) {
1118
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);
1119

1120
		/* Map receiver-address / traffic-ID to this queue */
1121
		iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1122

1123 1124
		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1125
		trans_pcie->txq[txq_id].ampdu = true;
1126 1127 1128 1129 1130 1131 1132
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1133 1134

		ssn = trans_pcie->txq[txq_id].q.read_ptr;
1135
	}
1136 1137 1138

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1139 1140
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1141 1142 1143 1144

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1145 1146

	/* Set up Tx window size and frame limit for this queue */
1147
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1148
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1149
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1150 1151 1152 1153 1154
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1155 1156

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1157 1158 1159 1160 1161
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
1162
	trans_pcie->txq[txq_id].active = true;
1163 1164
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
1165 1166
}

1167
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1168
{
1169
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1170 1171 1172
	u32 stts_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
	static const u32 zero_val[4] = {};
1173

1174 1175 1176 1177 1178 1179
	/*
	 * Upon HW Rfkill - we stop the device, and then stop the queues
	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
	 * allow the op_mode to call txq_disable after it already called
	 * stop_device.
	 */
1180
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1181 1182
		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
			  "queue %d not used", txq_id);
1183
		return;
1184 1185
	}

1186
	iwl_pcie_txq_set_inactive(trans, txq_id);
1187

1188 1189
	iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
			    ARRAY_SIZE(zero_val));
1190

1191
	iwl_pcie_txq_unmap(trans, txq_id);
1192
	trans_pcie->txq[txq_id].ampdu = false;
1193

1194
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1195 1196
}

1197 1198
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

1199
/*
1200
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1201
 * @priv: device private data point
1202
 * @cmd: a pointer to the ucode command structure
1203
 *
1204 1205
 * The function returns < 0 values to indicate the operation
 * failed. On success, it returns the index (>= 0) of command in the
1206 1207
 * command queue.
 */
1208 1209
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
				 struct iwl_host_cmd *cmd)
1210
{
1211
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1212
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1213
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
1214 1215
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
1216
	unsigned long flags;
1217
	void *dup_buf = NULL;
1218
	dma_addr_t phys_addr;
1219
	int idx;
1220
	u16 copy_size, cmd_size, scratch_size;
1221
	bool had_nocopy = false;
1222
	int i, ret;
1223
	u32 cmd_pos;
1224 1225
	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1226

1227 1228 1229 1230
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
1231
	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1232

1233
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1234 1235 1236
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

1237 1238
		if (!cmd->len[i])
			continue;
1239

1240 1241 1242
		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1243 1244 1245 1246 1247 1248 1249 1250

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

1251 1252
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
			/*
			 * This is also a chunk that isn't copied
			 * to the static buffer so set had_nocopy.
			 */
			had_nocopy = true;

			/* only allowed once */
			if (WARN_ON(dup_buf)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}

1270
			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1271 1272 1273
					  GFP_ATOMIC);
			if (!dup_buf)
				return -ENOMEM;
1274 1275
		} else {
			/* NOCOPY must not be followed by normal! */
1276 1277 1278 1279
			if (WARN_ON(had_nocopy)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
1280
			copy_size += cmdlen[i];
1281 1282 1283
		}
		cmd_size += cmd->len[i];
	}
1284

1285 1286
	/*
	 * If any of the command structures end up being larger than
1287 1288 1289
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
1290
	 */
1291 1292
	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
		 "Command %s (%#x) is too large (%d bytes)\n",
1293
		 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1294 1295 1296
		idx = -EINVAL;
		goto free_dup_buf;
	}
1297

1298
	spin_lock_bh(&txq->lock);
1299

J
Johannes Berg 已提交
1300
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1301
		spin_unlock_bh(&txq->lock);
1302

1303
		IWL_ERR(trans, "No space in command queue\n");
1304
		iwl_op_mode_cmd_queue_full(trans->op_mode);
1305 1306
		idx = -ENOSPC;
		goto free_dup_buf;
1307 1308
	}

1309
	idx = get_cmd_index(q, q->write_ptr);
1310 1311
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
1312

1313
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
1314 1315
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
1316

1317
	/* set up the header */
1318

1319
	out_cmd->hdr.cmd = cmd->id;
1320
	out_cmd->hdr.flags = 0;
1321
	out_cmd->hdr.sequence =
1322
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1323
					 INDEX_TO_SEQ(q->write_ptr));
1324 1325

	/* and copy the data that needs to be copied */
1326
	cmd_pos = offsetof(struct iwl_device_cmd, payload);
1327
	copy_size = sizeof(out_cmd->hdr);
1328
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1329 1330
		int copy = 0;

1331
		if (!cmd->len[i])
1332
			continue;
1333

1334 1335 1336
		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351

			if (copy > cmd->len[i])
				copy = cmd->len[i];
		}

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
			copy = cmd->len[i];

		if (copy) {
			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
			cmd_pos += copy;
			copy_size += copy;
		}
1352 1353
	}

J
Johannes Berg 已提交
1354
	IWL_DEBUG_HC(trans,
1355
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1356
		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1357 1358
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1359

1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
	/* start the TFD with the scratchbuf */
	scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
	memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
	iwl_pcie_txq_build_tfd(trans, txq,
			       iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
			       scratch_size, 1);

	/* map first command fragment, if any remains */
	if (copy_size > scratch_size) {
		phys_addr = dma_map_single(trans->dev,
					   ((u8 *)&out_cmd->hdr) + scratch_size,
					   copy_size - scratch_size,
					   DMA_TO_DEVICE);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
			idx = -ENOMEM;
			goto out;
		}
1379

1380 1381
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
				       copy_size - scratch_size, 0);
J
Johannes Berg 已提交
1382 1383
	}

1384
	/* map the remaining (adjusted) nocopy/dup fragments */
1385
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1386
		const void *data = cmddata[i];
1387

1388
		if (!cmdlen[i])
1389
			continue;
1390 1391
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
1392
			continue;
1393 1394 1395
		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
			data = dup_buf;
		phys_addr = dma_map_single(trans->dev, (void *)data,
1396
					   cmdlen[i], DMA_TO_DEVICE);
1397
		if (dma_mapping_error(trans->dev, phys_addr)) {
1398
			iwl_pcie_tfd_unmap(trans, out_meta,
1399
					   &txq->tfds[q->write_ptr]);
1400 1401 1402 1403
			idx = -ENOMEM;
			goto out;
		}

1404
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
1405
	}
R
Reinette Chatre 已提交
1406

1407
	out_meta->flags = cmd->flags;
1408 1409 1410
	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
		kfree(txq->entries[idx].free_buf);
	txq->entries[idx].free_buf = dup_buf;
J
Johannes Berg 已提交
1411

1412
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
R
Reinette Chatre 已提交
1413

1414 1415 1416 1417
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

1418 1419 1420 1421 1422
	spin_lock_irqsave(&trans_pcie->reg_lock, flags);

	/*
	 * wake up the NIC to make sure that the firmware will see the host
	 * command - we will let the NIC sleep once all the host commands
1423 1424
	 * returned. This needs to be done only on NICs that have
	 * apmg_wake_up_wa set.
1425
	 */
1426 1427
	if (trans->cfg->base_params->apmg_wake_up_wa &&
	    !trans_pcie->cmd_in_flight) {
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
		trans_pcie->cmd_in_flight = true;
		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
				   15000);
		if (ret < 0) {
			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
			trans_pcie->cmd_in_flight = false;
			idx = -EIO;
			goto out;
		}
	}

1446 1447
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1448
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1449

1450 1451
	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);

J
Johannes Berg 已提交
1452
 out:
1453
	spin_unlock_bh(&txq->lock);
1454 1455 1456
 free_dup_buf:
	if (idx < 0)
		kfree(dup_buf);
1457
	return idx;
1458 1459
}

1460 1461
/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1462
 * @rxb: Rx buffer to reclaim
1463 1464
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
1465 1466 1467 1468 1469
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
1470 1471
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
1472
{
Z
Zhu Yi 已提交
1473
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1474 1475 1476 1477
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
1478 1479
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
1480
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1481
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1482 1483 1484 1485

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
1486
	if (WARN(txq_id != trans_pcie->cmd_queue,
1487
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1488 1489 1490
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1491
		iwl_print_hex_error(trans, pkt, 32);
1492
		return;
1493
	}
1494

1495
	spin_lock_bh(&txq->lock);
1496

1497
	cmd_index = get_cmd_index(&txq->q, index);
1498 1499
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
1500

1501
	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
R
Reinette Chatre 已提交
1502

1503
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
1504
	if (meta->flags & CMD_WANT_SKB) {
1505
		struct page *p = rxb_steal_page(rxb);
1506 1507 1508

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1509
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1510 1511
		meta->source->handler_status = handler_status;
	}
1512

1513
	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1514

J
Johannes Berg 已提交
1515
	if (!(meta->flags & CMD_ASYNC)) {
1516
		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1517 1518
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
1519
				 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1520
		}
1521
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1522
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1523
			       get_cmd_string(trans_pcie, cmd->hdr.cmd));
1524
		wake_up(&trans_pcie->wait_command_queue);
1525
	}
1526

Z
Zhu Yi 已提交
1527
	meta->flags = 0;
1528

1529
	spin_unlock_bh(&txq->lock);
1530
}
1531

1532
#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
1533

1534 1535
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
				    struct iwl_host_cmd *cmd)
1536
{
J
Johannes Berg 已提交
1537
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1538 1539 1540 1541 1542 1543
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

1544
	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1545
	if (ret < 0) {
1546
		IWL_ERR(trans,
1547
			"Error sending %s: enqueue_hcmd failed: %d\n",
1548
			get_cmd_string(trans_pcie, cmd->id), ret);
1549 1550 1551 1552 1553
		return ret;
	}
	return 0;
}

1554 1555
static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
				   struct iwl_host_cmd *cmd)
1556
{
1557
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1558 1559 1560
	int cmd_idx;
	int ret;

1561
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1562
		       get_cmd_string(trans_pcie, cmd->id));
1563

1564 1565
	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
				  &trans->status),
1566 1567
		 "Command %s: a command is already active!\n",
		 get_cmd_string(trans_pcie, cmd->id)))
1568 1569
		return -EIO;

1570
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1571
		       get_cmd_string(trans_pcie, cmd->id));
1572

1573
	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1574 1575
	if (cmd_idx < 0) {
		ret = cmd_idx;
1576
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1577
		IWL_ERR(trans,
1578
			"Error sending %s: enqueue_hcmd failed: %d\n",
1579
			get_cmd_string(trans_pcie, cmd->id), ret);
1580 1581 1582
		return ret;
	}

1583 1584 1585 1586
	ret = wait_event_timeout(trans_pcie->wait_command_queue,
				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
					   &trans->status),
				 HOST_COMPLETE_TIMEOUT);
1587
	if (!ret) {
1588 1589
		struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
		struct iwl_queue *q = &txq->q;
1590

1591 1592 1593
		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
			get_cmd_string(trans_pcie, cmd->id),
			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1594

1595 1596
		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
			q->read_ptr, q->write_ptr);
1597

1598
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1599 1600 1601
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
			       get_cmd_string(trans_pcie, cmd->id));
		ret = -ETIMEDOUT;
1602

1603
		iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
1604
		iwl_trans_fw_error(trans);
1605

1606
		goto cancel;
1607 1608
	}

1609
	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1610
		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1611
			get_cmd_string(trans_pcie, cmd->id));
1612
		dump_stack();
1613 1614 1615 1616
		ret = -EIO;
		goto cancel;
	}

1617
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1618
	    test_bit(STATUS_RFKILL, &trans->status)) {
1619 1620 1621 1622 1623
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

1624
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1625
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1626
			get_cmd_string(trans_pcie, cmd->id));
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1641 1642
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1643
	}
1644

1645 1646 1647
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1648 1649 1650 1651 1652
	}

	return ret;
}

1653
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1654
{
1655
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1656
	    test_bit(STATUS_RFKILL, &trans->status)) {
1657 1658
		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
				  cmd->id);
1659
		return -ERFKILL;
1660
	}
1661

1662
	if (cmd->flags & CMD_ASYNC)
1663
		return iwl_pcie_send_hcmd_async(trans, cmd);
1664

1665
	/* We still can fail on RFKILL that can be asserted while we wait */
1666
	return iwl_pcie_send_hcmd_sync(trans, cmd);
1667 1668
}

1669 1670
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id)
1671
{
1672
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1673 1674 1675 1676 1677
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	struct iwl_cmd_meta *out_meta;
	struct iwl_txq *txq;
	struct iwl_queue *q;
1678 1679 1680
	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
	void *tb1_addr;
	u16 len, tb1_len, tb2_len;
1681
	bool wait_write_ptr;
1682 1683
	__le16 fc = hdr->frame_control;
	u8 hdr_len = ieee80211_hdrlen(fc);
1684
	u16 wifi_seq;
1685 1686 1687

	txq = &trans_pcie->txq[txq_id];
	q = &txq->q;
1688

1689 1690
	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
		      "TX on unused queue %d\n", txq_id))
1691
		return -EINVAL;
1692

1693
	spin_lock(&txq->lock);
1694

1695 1696 1697 1698 1699
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
1700
	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1701
	WARN_ONCE(txq->ampdu &&
1702
		  (wifi_seq & 0xff) != q->write_ptr,
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);

	/* Set up driver data for this TFD */
	txq->entries[q->write_ptr].skb = skb;
	txq->entries[q->write_ptr].cmd = dev_cmd;

	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
			    INDEX_TO_SEQ(q->write_ptr)));

1714 1715 1716 1717 1718 1719 1720
	tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
		       offsetof(struct iwl_tx_cmd, scratch);

	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1721 1722
	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->entries[q->write_ptr].meta;
1723

1724
	/*
1725 1726 1727 1728
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
1729
	 */
1730 1731
	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
	      hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1732
	tb1_len = ALIGN(len, 4);
1733 1734

	/* Tell NIC about any 2-byte padding after MAC header */
1735
	if (tb1_len != len)
1736 1737
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

1738 1739 1740 1741 1742
	/* The first TB points to the scratchbuf data - min_copy bytes */
	memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
	       IWL_HCMD_SCRATCHBUF_SIZE);
	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
			       IWL_HCMD_SCRATCHBUF_SIZE, 1);
1743

1744 1745 1746 1747 1748 1749 1750 1751 1752
	/* there must be data left over for TB1 or this code must be changed */
	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);

	/* map the data for TB1 */
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
		goto out_err;
	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
1753

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
	/*
	 * Set up TFD's third entry to point directly to remainder
	 * of skb, if any (802.11 null frames have no payload).
	 */
	tb2_len = skb->len - hdr_len;
	if (tb2_len > 0) {
		dma_addr_t tb2_phys = dma_map_single(trans->dev,
						     skb->data + hdr_len,
						     tb2_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
1766 1767
			goto out_err;
		}
1768
		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
1769
	}
1770

1771 1772
	/* Set up entry for this TFD in Tx byte-count array */
	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1773

1774 1775 1776
	trace_iwlwifi_dev_tx(trans->dev, skb,
			     &txq->tfds[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
1777 1778
			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
			     skb->data + hdr_len, tb2_len);
1779
	trace_iwlwifi_dev_tx_data(trans->dev, skb,
1780 1781
				  skb->data + hdr_len, tb2_len);

1782
	wait_write_ptr = ieee80211_has_morefrags(fc);
1783

1784 1785 1786 1787 1788 1789 1790
	/* start timer if queue currently empty */
	if (txq->need_update && q->read_ptr == q->write_ptr &&
	    trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

	/* Tell device the write index *just past* this latest filled TFD */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1791 1792
	if (!wait_write_ptr)
		iwl_pcie_txq_inc_wr_ptr(trans, txq);
1793 1794 1795

	/*
	 * At this point the frame is "transmitted" successfully
1796
	 * and we will get a TX status notification eventually.
1797 1798
	 */
	if (iwl_queue_space(q) < q->high_mark) {
1799
		if (wait_write_ptr)
1800
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
1801
		else
1802 1803 1804 1805 1806 1807 1808
			iwl_stop_queue(trans, txq);
	}
	spin_unlock(&txq->lock);
	return 0;
out_err:
	spin_unlock(&txq->lock);
	return -1;
1809
}