tx.c 51.3 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-op-mode.h"
38
#include "internal.h"
39
/* FIXME: need to abstract out TX command (once we know what it looks like) */
40
#include "dvm/commands.h"
41

42 43 44
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
68 69
	unsigned int max;
	unsigned int used;
70

71 72
	/*
	 * To avoid ambiguity between empty and completely full queues, there
73 74 75
	 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
	 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
	 * to reserve any queue entries for this purpose.
76
	 */
77
	if (q->n_window < TFD_QUEUE_SIZE_MAX)
78 79
		max = q->n_window;
	else
80
		max = TFD_QUEUE_SIZE_MAX - 1;
81

82
	/*
83 84
	 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
	 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
85
	 */
86
	used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
87 88 89 90 91

	if (WARN_ON(used > max))
		return 0;

	return max - used;
92 93 94 95 96
}

/*
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
97
static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
{
	q->n_window = slots_num;
	q->id = id;

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = 0;
	q->read_ptr = 0;

	return 0;
}

static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

	ptr->addr = dma_alloc_coherent(trans->dev, size,
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
	memset(ptr, 0, sizeof(*ptr));
}

static void iwl_pcie_txq_stuck_timer(unsigned long data)
{
	struct iwl_txq *txq = (void *)data;
	struct iwl_queue *q = &txq->q;
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
	u32 scd_sram_addr = trans_pcie->scd_base_addr +
				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
	u8 buf[16];
	int i;

	spin_lock(&txq->lock);
	/* check if triggered erroneously */
	if (txq->q.read_ptr == txq->q.write_ptr) {
		spin_unlock(&txq->lock);
		return;
	}
	spin_unlock(&txq->lock);

	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
		jiffies_to_msecs(trans_pcie->wd_timeout));
	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);

169
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
170 171 172 173 174 175 176 177 178 179 180 181

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
182 183 184
			iwl_trans_read_mem32(trans,
					     trans_pcie->scd_base_addr +
					     SCD_TRANS_TBL_OFFSET_QUEUE(i));
185 186 187 188 189 190 191 192 193

		if (i & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			i, active ? "" : "in", fifo, tbl_dw,
194 195
			iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
				(TFD_QUEUE_SIZE_MAX - 1),
196 197 198 199
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
	}

	for (i = q->read_ptr; i != q->write_ptr;
200
	     i = iwl_queue_inc_wrap(i))
201
		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
202
			le32_to_cpu(txq->scratchbufs[i].scratch));
203

204
	iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
205 206
}

207 208
/*
 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
209
 */
210 211
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
					     struct iwl_txq *txq, u16 byte_cnt)
212
{
213
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
214
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
215 216 217 218 219 220
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
221
	struct iwl_tx_cmd *tx_cmd =
222
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
223

224 225
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

226 227
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

228 229
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
230 231 232

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
233
		len += IEEE80211_CCMP_MIC_LEN;
234 235
		break;
	case TX_CMD_SEC_TKIP:
236
		len += IEEE80211_TKIP_ICV_LEN;
237 238
		break;
	case TX_CMD_SEC_WEP:
239
		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
240 241 242
		break;
	}

243 244 245 246
	if (trans_pcie->bc_table_dword)
		len = DIV_ROUND_UP(len, 4);

	bc_ent = cpu_to_le16(len | (sta_id << 12));
247 248 249 250 251 252 253 254

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
					    struct iwl_txq *txq)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
	struct iwl_tx_cmd *tx_cmd =
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != trans_pcie->cmd_queue)
		sta_id = tx_cmd->sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

281 282
/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
283
 */
284 285
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_txq *txq)
286
{
287
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288 289 290
	u32 reg = 0;
	int txq_id = txq->q.id;

291
	lockdep_assert_held(&txq->lock);
292

293 294 295 296 297 298 299 300 301
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. NIC is woken up for CMD regardless of shadow outside this function
	 * 3. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    txq_id != trans_pcie->cmd_queue &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
W
Wey-Yi Guy 已提交
302
		/*
303 304 305
		 * wake up nic if it's powered down ...
		 * uCode will wake up, and interrupt us again, so next
		 * time we'll skip this part.
W
Wey-Yi Guy 已提交
306
		 */
307 308 309 310 311 312 313
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
				       txq_id, reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
314
			txq->need_update = true;
315 316
			return;
		}
W
Wey-Yi Guy 已提交
317
	}
318 319 320 321 322 323 324

	/*
	 * if not in power-save mode, uCode will never sleep when we're
	 * trying to tx (during RFKILL, we're not trying to tx).
	 */
	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
	iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
325
}
326

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		struct iwl_txq *txq = &trans_pcie->txq[i];

		spin_lock(&txq->lock);
		if (trans_pcie->txq[i].need_update) {
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
			trans_pcie->txq[i].need_update = false;
		}
		spin_unlock(&txq->lock);
	}
342 343
}

344
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
345 346 347 348 349 350 351 352 353 354 355
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

356 357
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				       dma_addr_t addr, u16 len)
J
Johannes Berg 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

371
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
J
Johannes Berg 已提交
372 373 374 375
{
	return tfd->num_tbs & 0x1f;
}

376
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
377 378
			       struct iwl_cmd_meta *meta,
			       struct iwl_tfd *tfd)
J
Johannes Berg 已提交
379 380 381 382 383
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
384
	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
J
Johannes Berg 已提交
385 386

	if (num_tbs >= IWL_NUM_OF_TBS) {
387
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
388 389 390 391
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

392
	/* first TB is never freed - it's the scratchbuf data */
J
Johannes Berg 已提交
393 394

	for (i = 1; i < num_tbs; i++)
395
		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
396 397
				 iwl_pcie_tfd_tb_get_len(tfd, i),
				 DMA_TO_DEVICE);
398 399

	tfd->num_tbs = 0;
400 401
}

402 403
/*
 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
404
 * @trans - transport private data
405
 * @txq - tx queue
406
 * @dma_dir - the direction of the DMA mapping
407 408 409 410
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
411
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
412 413 414
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

415 416 417
	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
	 * idx is bounded by n_window
	 */
418 419 420
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

421 422
	lockdep_assert_held(&txq->lock);

423 424 425
	/* We have only q->n_window txq->entries, but we use
	 * TFD_QUEUE_SIZE_MAX tfds
	 */
426
	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
J
Johannes Berg 已提交
427 428

	/* free SKB */
429
	if (txq->entries) {
J
Johannes Berg 已提交
430 431
		struct sk_buff *skb;

432
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
433

434 435 436 437
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
438
		if (skb) {
439
			iwl_op_mode_free_skb(trans->op_mode, skb);
440
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
441 442 443 444
		}
	}
}

445
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
446
				  dma_addr_t addr, u16 len, bool reset)
J
Johannes Berg 已提交
447 448 449 450 451 452
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
453
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
454 455
	tfd = &tfd_tmp[q->write_ptr];

456 457 458 459 460 461 462 463 464 465 466 467
	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
			IWL_NUM_OF_TBS);
		return -EINVAL;
	}

468 469
	if (WARN(addr & ~IWL_TX_DMA_MASK,
		 "Unaligned address = %llx\n", (unsigned long long)addr))
470 471 472 473 474 475 476 477 478 479 480 481 482
		return -EINVAL;

	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
			       struct iwl_txq *txq, int slots_num,
			       u32 txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
483
	size_t scratchbuf_sz;
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	int i;

	if (WARN_ON(txq->entries || txq->tfds))
		return -EINVAL;

	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
		    (unsigned long)txq);
	txq->trans_pcie = trans_pcie;

	txq->q.n_window = slots_num;

	txq->entries = kcalloc(slots_num,
			       sizeof(struct iwl_pcie_txq_entry),
			       GFP_KERNEL);

	if (!txq->entries)
		goto error;

	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->entries[i].cmd =
				kmalloc(sizeof(struct iwl_device_cmd),
					GFP_KERNEL);
			if (!txq->entries[i].cmd)
				goto error;
		}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
515
	if (!txq->tfds)
516
		goto error;
517 518 519 520 521 522 523 524 525 526 527 528 529 530

	BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
	BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
			sizeof(struct iwl_cmd_header) +
			offsetof(struct iwl_tx_cmd, scratch));

	scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;

	txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
					      &txq->scratchbufs_dma,
					      GFP_KERNEL);
	if (!txq->scratchbufs)
		goto err_free_tfds;

531 532 533
	txq->q.id = txq_id;

	return 0;
534 535
err_free_tfds:
	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
error:
	if (txq->entries && txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++)
			kfree(txq->entries[i].cmd);
	kfree(txq->entries);
	txq->entries = NULL;

	return -ENOMEM;

}

static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
			      int slots_num, u32 txq_id)
{
	int ret;

552
	txq->need_update = false;
553 554 555 556 557 558

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
559
	ret = iwl_queue_init(&txq->q, slots_num, txq_id);
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
	if (ret)
		return ret;

	spin_lock_init(&txq->lock);

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
			   txq->q.dma_addr >> 8);

	return 0;
}

/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;

	spin_lock_bh(&txq->lock);
	while (q->write_ptr != q->read_ptr) {
586 587
		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
				   txq_id, q->read_ptr);
588
		iwl_pcie_txq_free_tfd(trans, txq);
589
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
590
	}
591
	txq->active = false;
592
	spin_unlock_bh(&txq->lock);
593 594 595

	/* just in case - this queue may have been stopped */
	iwl_wake_queue(trans, txq);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct device *dev = trans->dev;
	int i;

	if (WARN_ON(!txq))
		return;

	iwl_pcie_txq_unmap(trans, txq_id);

	/* De-alloc array of command/tx buffers */
	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++) {
			kfree(txq->entries[i].cmd);
			kfree(txq->entries[i].free_buf);
		}

	/* De-alloc circular buffer of TFDs */
626 627 628 629
	if (txq->tfds) {
		dma_free_coherent(dev,
				  sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
				  txq->tfds, txq->q.dma_addr);
630
		txq->q.dma_addr = 0;
631
		txq->tfds = NULL;
632 633 634 635

		dma_free_coherent(dev,
				  sizeof(*txq->scratchbufs) * txq->q.n_window,
				  txq->scratchbufs, txq->scratchbufs_dma);
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
	}

	kfree(txq->entries);
	txq->entries = NULL;

	del_timer_sync(&txq->stuck_timer);

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
 */
static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
	struct iwl_trans_pcie __maybe_unused *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write_prph(trans, SCD_TXFACT, mask);
}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
661
	int nq = trans->cfg->base_params->num_of_queues;
662 663
	int chan;
	u32 reg_val;
664 665
	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
666 667 668 669 670 671 672 673 674 675 676

	/* make sure all queue are not stopped/used */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	trans_pcie->scd_base_addr =
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

677 678 679 680
	/* reset context data, TX status and translation data */
	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
				   SCD_CONTEXT_MEM_LOWER_BOUND,
			    NULL, clear_dwords);
681 682 683 684 685 686 687

	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
		       trans_pcie->scd_bc_tbls.dma >> 10);

	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
688 689
	if (trans->cfg->base_params->scd_chain_ext_wa)
		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708

	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
				trans_pcie->cmd_fifo);

	/* Activate all Tx DMA/FIFO channels */
	iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
709 710 711
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
712 713
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		struct iwl_txq *txq = &trans_pcie->txq[txq_id];

		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
				   txq->q.dma_addr >> 8);
		iwl_pcie_txq_unmap(trans, txq_id);
		txq->q.read_ptr = 0;
		txq->q.write_ptr = 0;
	}

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

	iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
}

737 738 739 740 741 742 743 744 745
/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ch, txq_id, ret;

	/* Turn off all Tx DMA fifos */
746
	spin_lock(&trans_pcie->irq_lock);
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762

	iwl_pcie_txq_set_sched(trans, 0);

	/* Stop each Tx DMA channel, and wait for it to be idle */
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
		iwl_write_direct32(trans,
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
		if (ret < 0)
			IWL_ERR(trans,
				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
				ch,
				iwl_read_direct32(trans,
						  FH_TSSR_TX_STATUS_REG));
	}
763
	spin_unlock(&trans_pcie->irq_lock);
764

765 766 767 768 769 770 771 772 773 774
	/*
	 * This function can be called before the op_mode disabled the
	 * queues. This happens when we have an rfkill interrupt.
	 * Since we stop Tx altogether - mark the queues as stopped.
	 */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	/* This can happen: start_hw, stop_device */
	if (!trans_pcie->txq)
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
		return 0;

	/* Unmap DMA from host system and free skb's */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++)
		iwl_pcie_txq_unmap(trans, txq_id);

	return 0;
}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{
	int txq_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* Tx queues */
	if (trans_pcie->txq) {
		for (txq_id = 0;
		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
			iwl_pcie_txq_free(trans, txq_id);
	}

	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{
	int ret;
	int txq_id, slots_num;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
			sizeof(struct iwlagn_scd_bc_tbl);

	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
	if (WARN_ON(trans_pcie->txq)) {
		ret = -EINVAL;
		goto error;
	}

	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
				   scd_bc_tbls_size);
	if (ret) {
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
		goto error;
	}

	/* Alloc keep-warm buffer */
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
	if (ret) {
		IWL_ERR(trans, "Keep Warm allocation failed\n");
		goto error;
	}

	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
				  sizeof(struct iwl_txq), GFP_KERNEL);
	if (!trans_pcie->txq) {
		IWL_ERR(trans, "Not enough memory for txq\n");
848
		ret = -ENOMEM;
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
			goto error;
		}
	}

	return 0;

error:
	iwl_pcie_tx_free(trans);

	return ret;
}
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;
	int txq_id, slots_num;
	bool alloc = false;

	if (!trans_pcie->txq) {
		ret = iwl_pcie_tx_alloc(trans);
		if (ret)
			goto error;
		alloc = true;
	}

886
	spin_lock(&trans_pcie->irq_lock);
887 888 889 890 891 892 893 894

	/* Turn off all Tx DMA fifos */
	iwl_write_prph(trans, SCD_TXFACT, 0);

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

895
	spin_unlock(&trans_pcie->irq_lock);
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
		iwl_pcie_tx_free(trans);
	return ret;
}

static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
					   struct iwl_txq *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

/* Frees buffers until index _not_ inclusive */
935 936
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs)
937 938 939
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
940
	int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
941 942 943 944 945
	struct iwl_queue *q = &txq->q;
	int last_to_free;

	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
946
		return;
J
Johannes Berg 已提交
947

948
	spin_lock_bh(&txq->lock);
949

950 951 952 953 954 955
	if (!txq->active) {
		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
				    txq_id, ssn);
		goto out;
	}

956 957 958 959 960
	if (txq->q.read_ptr == tfd_num)
		goto out;

	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
			   txq_id, txq->q.read_ptr, tfd_num, ssn);
J
Johannes Berg 已提交
961

962 963
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
964
	last_to_free = iwl_queue_dec_wrap(tfd_num);
965

966
	if (!iwl_queue_used(q, last_to_free)) {
967 968
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
969
			__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
970
			q->write_ptr, q->read_ptr);
971
		goto out;
J
Johannes Berg 已提交
972 973
	}

974
	if (WARN_ON(!skb_queue_empty(skbs)))
975
		goto out;
J
Johannes Berg 已提交
976

977
	for (;
978
	     q->read_ptr != tfd_num;
979
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
J
Johannes Berg 已提交
980

981 982
		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
			continue;
J
Johannes Berg 已提交
983

984
		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
J
Johannes Berg 已提交
985

986
		txq->entries[txq->q.read_ptr].skb = NULL;
987

988
		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
989

990
		iwl_pcie_txq_free_tfd(trans, txq);
991
	}
992

993 994
	iwl_pcie_txq_progress(trans_pcie, txq);

995 996 997
	if (iwl_queue_space(&txq->q) > txq->q.low_mark)
		iwl_wake_queue(trans, txq);
out:
998
	spin_unlock_bh(&txq->lock);
999 1000
}

1001 1002 1003 1004 1005 1006 1007 1008
/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1009
{
1010 1011 1012
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
1013
	unsigned long flags;
1014
	int nfreed = 0;
1015

1016
	lockdep_assert_held(&txq->lock);
1017

1018
	if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
1019 1020
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1021
			__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1022 1023 1024
			q->write_ptr, q->read_ptr);
		return;
	}
1025

1026 1027
	for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1028

1029 1030 1031
		if (nfreed++ > 0) {
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
1032
			iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
1033 1034 1035
		}
	}

1036 1037
	if (trans->cfg->base_params->apmg_wake_up_wa &&
	    q->read_ptr == q->write_ptr) {
1038 1039 1040 1041 1042 1043 1044 1045 1046
		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
		WARN_ON(!trans_pcie->cmd_in_flight);
		trans_pcie->cmd_in_flight = false;
		__iwl_trans_pcie_clear_bit(trans,
					   CSR_GP_CNTRL,
					   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
	}

1047
	iwl_pcie_txq_progress(trans_pcie, txq);
1048 1049
}

1050
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1051
				 u16 txq_id)
1052
{
1053
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1054 1055 1056 1057 1058 1059
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

1060
	tbl_dw_addr = trans_pcie->scd_base_addr +
1061 1062
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

1063
	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1064 1065 1066 1067 1068 1069

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

1070
	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1071 1072 1073 1074

	return 0;
}

1075 1076
static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
					     u16 txq_id)
1077 1078 1079
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1080
	iwl_write_prph(trans,
1081 1082 1083 1084 1085
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

1086 1087 1088 1089
/* Receiver address (actually, Rx station's index into station table),
 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))

1090 1091
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
1092
{
1093
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1094

1095 1096
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1097 1098

	/* Stop this Tx queue before configuring it */
1099
	iwl_pcie_txq_set_inactive(trans, txq_id);
1100

1101 1102 1103 1104 1105
	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
1106
	if (sta_id >= 0) {
1107
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);
1108

1109
		/* Map receiver-address / traffic-ID to this queue */
1110
		iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1111

1112 1113
		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1114
		trans_pcie->txq[txq_id].ampdu = true;
1115 1116 1117 1118 1119 1120 1121
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1122 1123

		ssn = trans_pcie->txq[txq_id].q.read_ptr;
1124
	}
1125 1126 1127

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1128 1129
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1130 1131 1132 1133

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1134 1135

	/* Set up Tx window size and frame limit for this queue */
1136
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1137
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1138
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1139 1140 1141 1142 1143
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1144 1145

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1146 1147 1148 1149 1150
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
1151
	trans_pcie->txq[txq_id].active = true;
1152 1153
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
1154 1155
}

1156
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1157
{
1158
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1159 1160 1161
	u32 stts_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
	static const u32 zero_val[4] = {};
1162

1163 1164 1165 1166 1167 1168
	/*
	 * Upon HW Rfkill - we stop the device, and then stop the queues
	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
	 * allow the op_mode to call txq_disable after it already called
	 * stop_device.
	 */
1169
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1170 1171
		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
			  "queue %d not used", txq_id);
1172
		return;
1173 1174
	}

1175
	iwl_pcie_txq_set_inactive(trans, txq_id);
1176

1177 1178
	iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
			    ARRAY_SIZE(zero_val));
1179

1180
	iwl_pcie_txq_unmap(trans, txq_id);
1181
	trans_pcie->txq[txq_id].ampdu = false;
1182

1183
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1184 1185
}

1186 1187
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

1188
/*
1189
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1190
 * @priv: device private data point
1191
 * @cmd: a pointer to the ucode command structure
1192
 *
1193 1194
 * The function returns < 0 values to indicate the operation
 * failed. On success, it returns the index (>= 0) of command in the
1195 1196
 * command queue.
 */
1197 1198
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
				 struct iwl_host_cmd *cmd)
1199
{
1200
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1201
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1202
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
1203 1204
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
1205
	unsigned long flags;
1206
	void *dup_buf = NULL;
1207
	dma_addr_t phys_addr;
1208
	int idx;
1209
	u16 copy_size, cmd_size, scratch_size;
1210
	bool had_nocopy = false;
1211
	int i, ret;
1212
	u32 cmd_pos;
1213 1214
	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1215

1216 1217 1218 1219
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
1220
	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1221

1222
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1223 1224 1225
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

1226 1227
		if (!cmd->len[i])
			continue;
1228

1229 1230 1231
		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1232 1233 1234 1235 1236 1237 1238 1239

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

1240 1241
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
			/*
			 * This is also a chunk that isn't copied
			 * to the static buffer so set had_nocopy.
			 */
			had_nocopy = true;

			/* only allowed once */
			if (WARN_ON(dup_buf)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}

1259
			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1260 1261 1262
					  GFP_ATOMIC);
			if (!dup_buf)
				return -ENOMEM;
1263 1264
		} else {
			/* NOCOPY must not be followed by normal! */
1265 1266 1267 1268
			if (WARN_ON(had_nocopy)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
1269
			copy_size += cmdlen[i];
1270 1271 1272
		}
		cmd_size += cmd->len[i];
	}
1273

1274 1275
	/*
	 * If any of the command structures end up being larger than
1276 1277 1278
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
1279
	 */
1280 1281
	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
		 "Command %s (%#x) is too large (%d bytes)\n",
1282
		 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1283 1284 1285
		idx = -EINVAL;
		goto free_dup_buf;
	}
1286

1287
	spin_lock_bh(&txq->lock);
1288

J
Johannes Berg 已提交
1289
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1290
		spin_unlock_bh(&txq->lock);
1291

1292
		IWL_ERR(trans, "No space in command queue\n");
1293
		iwl_op_mode_cmd_queue_full(trans->op_mode);
1294 1295
		idx = -ENOSPC;
		goto free_dup_buf;
1296 1297
	}

1298
	idx = get_cmd_index(q, q->write_ptr);
1299 1300
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
1301

1302
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
1303 1304
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
1305

1306
	/* set up the header */
1307

1308
	out_cmd->hdr.cmd = cmd->id;
1309
	out_cmd->hdr.flags = 0;
1310
	out_cmd->hdr.sequence =
1311
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1312
					 INDEX_TO_SEQ(q->write_ptr));
1313 1314

	/* and copy the data that needs to be copied */
1315
	cmd_pos = offsetof(struct iwl_device_cmd, payload);
1316
	copy_size = sizeof(out_cmd->hdr);
1317
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1318
		int copy;
1319

1320
		if (!cmd->len[i])
1321
			continue;
1322 1323 1324

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1325
					   IWL_HCMD_DFL_DUP))) {
1326 1327 1328 1329 1330
			copy = cmd->len[i];

			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
			cmd_pos += copy;
			copy_size += copy;
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
			continue;
		}

		/*
		 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
		 * in total (for the scratchbuf handling), but copy up to what
		 * we can fit into the payload for debug dump purposes.
		 */
		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);

		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
		cmd_pos += copy;

		/* However, treat copy_size the proper way, we need it below */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;

			if (copy > cmd->len[i])
				copy = cmd->len[i];
			copy_size += copy;
1351
		}
1352 1353
	}

J
Johannes Berg 已提交
1354
	IWL_DEBUG_HC(trans,
1355
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1356
		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1357 1358
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1359

1360 1361 1362 1363 1364
	/* start the TFD with the scratchbuf */
	scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
	memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
	iwl_pcie_txq_build_tfd(trans, txq,
			       iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1365
			       scratch_size, true);
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378

	/* map first command fragment, if any remains */
	if (copy_size > scratch_size) {
		phys_addr = dma_map_single(trans->dev,
					   ((u8 *)&out_cmd->hdr) + scratch_size,
					   copy_size - scratch_size,
					   DMA_TO_DEVICE);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
			idx = -ENOMEM;
			goto out;
		}
1379

1380
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1381
				       copy_size - scratch_size, false);
J
Johannes Berg 已提交
1382 1383
	}

1384
	/* map the remaining (adjusted) nocopy/dup fragments */
1385
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1386
		const void *data = cmddata[i];
1387

1388
		if (!cmdlen[i])
1389
			continue;
1390 1391
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
1392
			continue;
1393 1394 1395
		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
			data = dup_buf;
		phys_addr = dma_map_single(trans->dev, (void *)data,
1396
					   cmdlen[i], DMA_TO_DEVICE);
1397
		if (dma_mapping_error(trans->dev, phys_addr)) {
1398
			iwl_pcie_tfd_unmap(trans, out_meta,
1399
					   &txq->tfds[q->write_ptr]);
1400 1401 1402 1403
			idx = -ENOMEM;
			goto out;
		}

1404
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1405
	}
R
Reinette Chatre 已提交
1406

1407
	out_meta->flags = cmd->flags;
1408 1409 1410
	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
		kfree(txq->entries[idx].free_buf);
	txq->entries[idx].free_buf = dup_buf;
J
Johannes Berg 已提交
1411

1412
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
R
Reinette Chatre 已提交
1413

1414 1415 1416 1417
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

1418 1419 1420 1421 1422
	spin_lock_irqsave(&trans_pcie->reg_lock, flags);

	/*
	 * wake up the NIC to make sure that the firmware will see the host
	 * command - we will let the NIC sleep once all the host commands
1423 1424
	 * returned. This needs to be done only on NICs that have
	 * apmg_wake_up_wa set.
1425
	 */
1426 1427
	if (trans->cfg->base_params->apmg_wake_up_wa &&
	    !trans_pcie->cmd_in_flight) {
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
		trans_pcie->cmd_in_flight = true;
		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
				   15000);
		if (ret < 0) {
			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
			trans_pcie->cmd_in_flight = false;
			idx = -EIO;
			goto out;
		}
	}

1446
	/* Increment and update queue's write index */
1447
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1448
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1449

1450 1451
	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);

J
Johannes Berg 已提交
1452
 out:
1453
	spin_unlock_bh(&txq->lock);
1454 1455 1456
 free_dup_buf:
	if (idx < 0)
		kfree(dup_buf);
1457
	return idx;
1458 1459
}

1460 1461
/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1462
 * @rxb: Rx buffer to reclaim
1463 1464
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
1465 1466 1467 1468 1469
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
1470 1471
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
1472
{
Z
Zhu Yi 已提交
1473
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1474 1475 1476 1477
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
1478 1479
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
1480
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1481
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1482 1483 1484 1485

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
1486
	if (WARN(txq_id != trans_pcie->cmd_queue,
1487
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1488 1489 1490
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1491
		iwl_print_hex_error(trans, pkt, 32);
1492
		return;
1493
	}
1494

1495
	spin_lock_bh(&txq->lock);
1496

1497
	cmd_index = get_cmd_index(&txq->q, index);
1498 1499
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
1500

1501
	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
R
Reinette Chatre 已提交
1502

1503
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
1504
	if (meta->flags & CMD_WANT_SKB) {
1505
		struct page *p = rxb_steal_page(rxb);
1506 1507 1508

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1509
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1510 1511
		meta->source->handler_status = handler_status;
	}
1512

1513
	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1514

J
Johannes Berg 已提交
1515
	if (!(meta->flags & CMD_ASYNC)) {
1516
		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1517 1518
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
1519
				 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1520
		}
1521
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1522
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1523
			       get_cmd_string(trans_pcie, cmd->hdr.cmd));
1524
		wake_up(&trans_pcie->wait_command_queue);
1525
	}
1526

Z
Zhu Yi 已提交
1527
	meta->flags = 0;
1528

1529
	spin_unlock_bh(&txq->lock);
1530
}
1531

1532
#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
1533

1534 1535
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
				    struct iwl_host_cmd *cmd)
1536
{
J
Johannes Berg 已提交
1537
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1538 1539 1540 1541 1542 1543
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

1544
	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1545
	if (ret < 0) {
1546
		IWL_ERR(trans,
1547
			"Error sending %s: enqueue_hcmd failed: %d\n",
1548
			get_cmd_string(trans_pcie, cmd->id), ret);
1549 1550 1551 1552 1553
		return ret;
	}
	return 0;
}

1554 1555
static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
				   struct iwl_host_cmd *cmd)
1556
{
1557
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1558 1559 1560
	int cmd_idx;
	int ret;

1561
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1562
		       get_cmd_string(trans_pcie, cmd->id));
1563

1564 1565
	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
				  &trans->status),
1566 1567
		 "Command %s: a command is already active!\n",
		 get_cmd_string(trans_pcie, cmd->id)))
1568 1569
		return -EIO;

1570
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1571
		       get_cmd_string(trans_pcie, cmd->id));
1572

1573
	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1574 1575
	if (cmd_idx < 0) {
		ret = cmd_idx;
1576
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1577
		IWL_ERR(trans,
1578
			"Error sending %s: enqueue_hcmd failed: %d\n",
1579
			get_cmd_string(trans_pcie, cmd->id), ret);
1580 1581 1582
		return ret;
	}

1583 1584 1585 1586
	ret = wait_event_timeout(trans_pcie->wait_command_queue,
				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
					   &trans->status),
				 HOST_COMPLETE_TIMEOUT);
1587
	if (!ret) {
1588 1589
		struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
		struct iwl_queue *q = &txq->q;
1590

1591 1592 1593
		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
			get_cmd_string(trans_pcie, cmd->id),
			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1594

1595 1596
		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
			q->read_ptr, q->write_ptr);
1597

1598
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1599 1600 1601
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
			       get_cmd_string(trans_pcie, cmd->id));
		ret = -ETIMEDOUT;
1602

1603
		iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
1604
		iwl_trans_fw_error(trans);
1605

1606
		goto cancel;
1607 1608
	}

1609
	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1610
		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1611
			get_cmd_string(trans_pcie, cmd->id));
1612
		dump_stack();
1613 1614 1615 1616
		ret = -EIO;
		goto cancel;
	}

1617
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1618
	    test_bit(STATUS_RFKILL, &trans->status)) {
1619 1620 1621 1622 1623
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

1624
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1625
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1626
			get_cmd_string(trans_pcie, cmd->id));
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1641 1642
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1643
	}
1644

1645 1646 1647
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1648 1649 1650 1651 1652
	}

	return ret;
}

1653
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1654
{
1655
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1656
	    test_bit(STATUS_RFKILL, &trans->status)) {
1657 1658
		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
				  cmd->id);
1659
		return -ERFKILL;
1660
	}
1661

1662
	if (cmd->flags & CMD_ASYNC)
1663
		return iwl_pcie_send_hcmd_async(trans, cmd);
1664

1665
	/* We still can fail on RFKILL that can be asserted while we wait */
1666
	return iwl_pcie_send_hcmd_sync(trans, cmd);
1667 1668
}

1669 1670
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id)
1671
{
1672
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1673 1674 1675 1676 1677
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	struct iwl_cmd_meta *out_meta;
	struct iwl_txq *txq;
	struct iwl_queue *q;
1678 1679 1680
	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
	void *tb1_addr;
	u16 len, tb1_len, tb2_len;
1681
	bool wait_write_ptr;
1682 1683
	__le16 fc = hdr->frame_control;
	u8 hdr_len = ieee80211_hdrlen(fc);
1684
	u16 wifi_seq;
1685 1686 1687

	txq = &trans_pcie->txq[txq_id];
	q = &txq->q;
1688

1689 1690
	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
		      "TX on unused queue %d\n", txq_id))
1691
		return -EINVAL;
1692

1693
	spin_lock(&txq->lock);
1694

1695 1696 1697 1698 1699
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
1700
	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1701
	WARN_ONCE(txq->ampdu &&
1702
		  (wifi_seq & 0xff) != q->write_ptr,
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);

	/* Set up driver data for this TFD */
	txq->entries[q->write_ptr].skb = skb;
	txq->entries[q->write_ptr].cmd = dev_cmd;

	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
			    INDEX_TO_SEQ(q->write_ptr)));

1714 1715 1716 1717 1718 1719 1720
	tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
		       offsetof(struct iwl_tx_cmd, scratch);

	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1721 1722
	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->entries[q->write_ptr].meta;
1723

1724
	/*
1725 1726 1727 1728
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
1729
	 */
1730 1731
	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
	      hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1732
	tb1_len = ALIGN(len, 4);
1733 1734

	/* Tell NIC about any 2-byte padding after MAC header */
1735
	if (tb1_len != len)
1736 1737
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

1738 1739 1740 1741
	/* The first TB points to the scratchbuf data - min_copy bytes */
	memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
	       IWL_HCMD_SCRATCHBUF_SIZE);
	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1742
			       IWL_HCMD_SCRATCHBUF_SIZE, true);
1743

1744 1745 1746 1747 1748 1749 1750 1751
	/* there must be data left over for TB1 or this code must be changed */
	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);

	/* map the data for TB1 */
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
		goto out_err;
1752
	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1753

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
	/*
	 * Set up TFD's third entry to point directly to remainder
	 * of skb, if any (802.11 null frames have no payload).
	 */
	tb2_len = skb->len - hdr_len;
	if (tb2_len > 0) {
		dma_addr_t tb2_phys = dma_map_single(trans->dev,
						     skb->data + hdr_len,
						     tb2_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
1766 1767
			goto out_err;
		}
1768
		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1769
	}
1770

1771 1772
	/* Set up entry for this TFD in Tx byte-count array */
	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1773

1774 1775 1776
	trace_iwlwifi_dev_tx(trans->dev, skb,
			     &txq->tfds[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
1777 1778
			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
			     skb->data + hdr_len, tb2_len);
1779
	trace_iwlwifi_dev_tx_data(trans->dev, skb,
1780 1781
				  skb->data + hdr_len, tb2_len);

1782
	wait_write_ptr = ieee80211_has_morefrags(fc);
1783

1784 1785 1786 1787 1788 1789
	/* start timer if queue currently empty */
	if (txq->need_update && q->read_ptr == q->write_ptr &&
	    trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

	/* Tell device the write index *just past* this latest filled TFD */
1790
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1791 1792
	if (!wait_write_ptr)
		iwl_pcie_txq_inc_wr_ptr(trans, txq);
1793 1794 1795

	/*
	 * At this point the frame is "transmitted" successfully
1796
	 * and we will get a TX status notification eventually.
1797 1798
	 */
	if (iwl_queue_space(q) < q->high_mark) {
1799
		if (wait_write_ptr)
1800
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
1801
		else
1802 1803 1804 1805 1806 1807 1808
			iwl_stop_queue(trans, txq);
	}
	spin_unlock(&txq->lock);
	return 0;
out_err:
	spin_unlock(&txq->lock);
	return -1;
1809
}