tx.c 52.4 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4
 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
26
 *  Intel Linux Wireless <ilw@linux.intel.com>
27 28 29
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
30
#include <linux/etherdevice.h>
31
#include <linux/slab.h>
32 33
#include <linux/sched.h>

34 35 36
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
37
#include "iwl-io.h"
38
#include "iwl-scd.h"
39
#include "iwl-op-mode.h"
40
#include "internal.h"
41
/* FIXME: need to abstract out TX command (once we know what it looks like) */
42
#include "dvm/commands.h"
43

44 45 46
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
70 71
	unsigned int max;
	unsigned int used;
72

73 74
	/*
	 * To avoid ambiguity between empty and completely full queues, there
75 76 77
	 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
	 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
	 * to reserve any queue entries for this purpose.
78
	 */
79
	if (q->n_window < TFD_QUEUE_SIZE_MAX)
80 81
		max = q->n_window;
	else
82
		max = TFD_QUEUE_SIZE_MAX - 1;
83

84
	/*
85 86
	 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
	 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
87
	 */
88
	used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
89 90 91 92 93

	if (WARN_ON(used > max))
		return 0;

	return max - used;
94 95 96 97 98
}

/*
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
99
static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
{
	q->n_window = slots_num;
	q->id = id;

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = 0;
	q->read_ptr = 0;

	return 0;
}

static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

	ptr->addr = dma_alloc_coherent(trans->dev, size,
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
	memset(ptr, 0, sizeof(*ptr));
}

static void iwl_pcie_txq_stuck_timer(unsigned long data)
{
	struct iwl_txq *txq = (void *)data;
	struct iwl_queue *q = &txq->q;
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
	u32 scd_sram_addr = trans_pcie->scd_base_addr +
				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
	u8 buf[16];
	int i;

	spin_lock(&txq->lock);
	/* check if triggered erroneously */
	if (txq->q.read_ptr == txq->q.write_ptr) {
		spin_unlock(&txq->lock);
		return;
	}
	spin_unlock(&txq->lock);

	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
		jiffies_to_msecs(trans_pcie->wd_timeout));
	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);

171
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
172 173 174 175 176 177 178 179 180 181 182 183

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
184 185 186
			iwl_trans_read_mem32(trans,
					     trans_pcie->scd_base_addr +
					     SCD_TRANS_TBL_OFFSET_QUEUE(i));
187 188 189 190 191 192 193 194 195

		if (i & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			i, active ? "" : "in", fifo, tbl_dw,
196 197
			iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
				(TFD_QUEUE_SIZE_MAX - 1),
198 199 200 201
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
	}

	for (i = q->read_ptr; i != q->write_ptr;
202
	     i = iwl_queue_inc_wrap(i))
203
		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
204
			le32_to_cpu(txq->scratchbufs[i].scratch));
205

L
Liad Kaufman 已提交
206
	iwl_force_nmi(trans);
207 208
}

209 210
/*
 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
211
 */
212 213
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
					     struct iwl_txq *txq, u16 byte_cnt)
214
{
215
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
216
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
217 218 219 220 221 222
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
223
	struct iwl_tx_cmd *tx_cmd =
224
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
225

226 227
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

228 229
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

230 231
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
232 233 234

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
235
		len += IEEE80211_CCMP_MIC_LEN;
236 237
		break;
	case TX_CMD_SEC_TKIP:
238
		len += IEEE80211_TKIP_ICV_LEN;
239 240
		break;
	case TX_CMD_SEC_WEP:
241
		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
242 243 244
		break;
	}

245 246 247 248
	if (trans_pcie->bc_table_dword)
		len = DIV_ROUND_UP(len, 4);

	bc_ent = cpu_to_le16(len | (sta_id << 12));
249 250 251 252 253 254 255 256

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
					    struct iwl_txq *txq)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
	struct iwl_tx_cmd *tx_cmd =
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != trans_pcie->cmd_queue)
		sta_id = tx_cmd->sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

283 284
/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
285
 */
286 287
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_txq *txq)
288
{
289
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
290 291 292
	u32 reg = 0;
	int txq_id = txq->q.id;

293
	lockdep_assert_held(&txq->lock);
294

295 296 297 298 299 300 301 302 303
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. NIC is woken up for CMD regardless of shadow outside this function
	 * 3. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    txq_id != trans_pcie->cmd_queue &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
W
Wey-Yi Guy 已提交
304
		/*
305 306 307
		 * wake up nic if it's powered down ...
		 * uCode will wake up, and interrupt us again, so next
		 * time we'll skip this part.
W
Wey-Yi Guy 已提交
308
		 */
309 310 311 312 313 314 315
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
				       txq_id, reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
316
			txq->need_update = true;
317 318
			return;
		}
W
Wey-Yi Guy 已提交
319
	}
320 321 322 323 324 325 326

	/*
	 * if not in power-save mode, uCode will never sleep when we're
	 * trying to tx (during RFKILL, we're not trying to tx).
	 */
	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
	iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
327
}
328

329 330 331 332 333 334 335 336
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		struct iwl_txq *txq = &trans_pcie->txq[i];

337
		spin_lock_bh(&txq->lock);
338 339 340 341
		if (trans_pcie->txq[i].need_update) {
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
			trans_pcie->txq[i].need_update = false;
		}
342
		spin_unlock_bh(&txq->lock);
343
	}
344 345
}

346
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
347 348 349 350 351 352 353 354 355 356 357
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

358 359
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				       dma_addr_t addr, u16 len)
J
Johannes Berg 已提交
360 361 362 363 364 365 366 367 368 369 370 371 372
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

373
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
J
Johannes Berg 已提交
374 375 376 377
{
	return tfd->num_tbs & 0x1f;
}

378
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
379 380
			       struct iwl_cmd_meta *meta,
			       struct iwl_tfd *tfd)
J
Johannes Berg 已提交
381 382 383 384 385
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
386
	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
J
Johannes Berg 已提交
387 388

	if (num_tbs >= IWL_NUM_OF_TBS) {
389
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
390 391 392 393
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

394
	/* first TB is never freed - it's the scratchbuf data */
J
Johannes Berg 已提交
395 396

	for (i = 1; i < num_tbs; i++)
397
		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
398 399
				 iwl_pcie_tfd_tb_get_len(tfd, i),
				 DMA_TO_DEVICE);
400 401

	tfd->num_tbs = 0;
402 403
}

404 405
/*
 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
406
 * @trans - transport private data
407
 * @txq - tx queue
408
 * @dma_dir - the direction of the DMA mapping
409 410 411 412
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
413
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
414 415 416
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

417 418 419
	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
	 * idx is bounded by n_window
	 */
420 421 422
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

423 424
	lockdep_assert_held(&txq->lock);

425 426 427
	/* We have only q->n_window txq->entries, but we use
	 * TFD_QUEUE_SIZE_MAX tfds
	 */
428
	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
J
Johannes Berg 已提交
429 430

	/* free SKB */
431
	if (txq->entries) {
J
Johannes Berg 已提交
432 433
		struct sk_buff *skb;

434
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
435

436 437 438 439
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
440
		if (skb) {
441
			iwl_op_mode_free_skb(trans->op_mode, skb);
442
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
443 444 445 446
		}
	}
}

447
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
448
				  dma_addr_t addr, u16 len, bool reset)
J
Johannes Berg 已提交
449 450 451 452 453 454
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
455
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
456 457
	tfd = &tfd_tmp[q->write_ptr];

458 459 460 461 462 463 464 465 466 467 468 469
	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
			IWL_NUM_OF_TBS);
		return -EINVAL;
	}

470 471
	if (WARN(addr & ~IWL_TX_DMA_MASK,
		 "Unaligned address = %llx\n", (unsigned long long)addr))
472 473 474 475 476 477 478 479 480 481 482 483 484
		return -EINVAL;

	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
			       struct iwl_txq *txq, int slots_num,
			       u32 txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
485
	size_t scratchbuf_sz;
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	int i;

	if (WARN_ON(txq->entries || txq->tfds))
		return -EINVAL;

	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
		    (unsigned long)txq);
	txq->trans_pcie = trans_pcie;

	txq->q.n_window = slots_num;

	txq->entries = kcalloc(slots_num,
			       sizeof(struct iwl_pcie_txq_entry),
			       GFP_KERNEL);

	if (!txq->entries)
		goto error;

	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->entries[i].cmd =
				kmalloc(sizeof(struct iwl_device_cmd),
					GFP_KERNEL);
			if (!txq->entries[i].cmd)
				goto error;
		}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
517
	if (!txq->tfds)
518
		goto error;
519 520 521 522 523 524 525 526 527 528 529 530 531 532

	BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
	BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
			sizeof(struct iwl_cmd_header) +
			offsetof(struct iwl_tx_cmd, scratch));

	scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;

	txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
					      &txq->scratchbufs_dma,
					      GFP_KERNEL);
	if (!txq->scratchbufs)
		goto err_free_tfds;

533 534 535
	txq->q.id = txq_id;

	return 0;
536 537
err_free_tfds:
	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
error:
	if (txq->entries && txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++)
			kfree(txq->entries[i].cmd);
	kfree(txq->entries);
	txq->entries = NULL;

	return -ENOMEM;

}

static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
			      int slots_num, u32 txq_id)
{
	int ret;

554
	txq->need_update = false;
555 556 557 558 559 560

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
561
	ret = iwl_queue_init(&txq->q, slots_num, txq_id);
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	if (ret)
		return ret;

	spin_lock_init(&txq->lock);

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
			   txq->q.dma_addr >> 8);

	return 0;
}

/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;

	spin_lock_bh(&txq->lock);
	while (q->write_ptr != q->read_ptr) {
588 589
		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
				   txq_id, q->read_ptr);
590
		iwl_pcie_txq_free_tfd(trans, txq);
591
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
592
	}
593
	txq->active = false;
594
	spin_unlock_bh(&txq->lock);
595 596 597

	/* just in case - this queue may have been stopped */
	iwl_wake_queue(trans, txq);
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct device *dev = trans->dev;
	int i;

	if (WARN_ON(!txq))
		return;

	iwl_pcie_txq_unmap(trans, txq_id);

	/* De-alloc array of command/tx buffers */
	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++) {
623 624
			kzfree(txq->entries[i].cmd);
			kzfree(txq->entries[i].free_buf);
625 626 627
		}

	/* De-alloc circular buffer of TFDs */
628 629 630 631
	if (txq->tfds) {
		dma_free_coherent(dev,
				  sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
				  txq->tfds, txq->q.dma_addr);
632
		txq->q.dma_addr = 0;
633
		txq->tfds = NULL;
634 635 636 637

		dma_free_coherent(dev,
				  sizeof(*txq->scratchbufs) * txq->q.n_window,
				  txq->scratchbufs, txq->scratchbufs_dma);
638 639 640 641 642 643 644 645 646 647 648 649 650 651
	}

	kfree(txq->entries);
	txq->entries = NULL;

	del_timer_sync(&txq->stuck_timer);

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
652
	int nq = trans->cfg->base_params->num_of_queues;
653 654
	int chan;
	u32 reg_val;
655 656
	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
657 658 659 660 661 662 663 664 665 666 667

	/* make sure all queue are not stopped/used */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	trans_pcie->scd_base_addr =
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

668 669 670 671
	/* reset context data, TX status and translation data */
	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
				   SCD_CONTEXT_MEM_LOWER_BOUND,
			    NULL, clear_dwords);
672 673 674 675 676 677 678

	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
		       trans_pcie->scd_bc_tbls.dma >> 10);

	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
679 680
	if (trans->cfg->base_params->scd_chain_ext_wa)
		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
681 682 683 684 685

	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
				trans_pcie->cmd_fifo);

	/* Activate all Tx DMA/FIFO channels */
686
	iwl_scd_activate_fifos(trans);
687 688 689 690 691 692 693 694 695 696 697 698 699

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
700 701 702
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
703 704
}

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		struct iwl_txq *txq = &trans_pcie->txq[txq_id];

		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
				   txq->q.dma_addr >> 8);
		iwl_pcie_txq_unmap(trans, txq_id);
		txq->q.read_ptr = 0;
		txq->q.write_ptr = 0;
	}

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

	iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
}

728 729 730 731 732 733 734 735 736
/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ch, txq_id, ret;

	/* Turn off all Tx DMA fifos */
737
	spin_lock(&trans_pcie->irq_lock);
738

739
	iwl_scd_deactivate_fifos(trans);
740 741 742 743 744 745 746 747 748 749 750 751 752 753

	/* Stop each Tx DMA channel, and wait for it to be idle */
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
		iwl_write_direct32(trans,
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
		if (ret < 0)
			IWL_ERR(trans,
				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
				ch,
				iwl_read_direct32(trans,
						  FH_TSSR_TX_STATUS_REG));
	}
754
	spin_unlock(&trans_pcie->irq_lock);
755

756 757 758 759 760 761 762 763 764 765
	/*
	 * This function can be called before the op_mode disabled the
	 * queues. This happens when we have an rfkill interrupt.
	 * Since we stop Tx altogether - mark the queues as stopped.
	 */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	/* This can happen: start_hw, stop_device */
	if (!trans_pcie->txq)
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
		return 0;

	/* Unmap DMA from host system and free skb's */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++)
		iwl_pcie_txq_unmap(trans, txq_id);

	return 0;
}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{
	int txq_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* Tx queues */
	if (trans_pcie->txq) {
		for (txq_id = 0;
		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
			iwl_pcie_txq_free(trans, txq_id);
	}

	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{
	int ret;
	int txq_id, slots_num;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
			sizeof(struct iwlagn_scd_bc_tbl);

	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
	if (WARN_ON(trans_pcie->txq)) {
		ret = -EINVAL;
		goto error;
	}

	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
				   scd_bc_tbls_size);
	if (ret) {
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
		goto error;
	}

	/* Alloc keep-warm buffer */
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
	if (ret) {
		IWL_ERR(trans, "Keep Warm allocation failed\n");
		goto error;
	}

	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
				  sizeof(struct iwl_txq), GFP_KERNEL);
	if (!trans_pcie->txq) {
		IWL_ERR(trans, "Not enough memory for txq\n");
839
		ret = -ENOMEM;
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
			goto error;
		}
	}

	return 0;

error:
	iwl_pcie_tx_free(trans);

	return ret;
}
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;
	int txq_id, slots_num;
	bool alloc = false;

	if (!trans_pcie->txq) {
		ret = iwl_pcie_tx_alloc(trans);
		if (ret)
			goto error;
		alloc = true;
	}

877
	spin_lock(&trans_pcie->irq_lock);
878 879

	/* Turn off all Tx DMA fifos */
880
	iwl_scd_deactivate_fifos(trans);
881 882 883 884 885

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

886
	spin_unlock(&trans_pcie->irq_lock);
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
		iwl_pcie_tx_free(trans);
	return ret;
}

static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
					   struct iwl_txq *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

/* Frees buffers until index _not_ inclusive */
926 927
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs)
928 929 930
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
931
	int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
932 933 934 935 936
	struct iwl_queue *q = &txq->q;
	int last_to_free;

	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
937
		return;
J
Johannes Berg 已提交
938

939
	spin_lock_bh(&txq->lock);
940

941 942 943 944 945 946
	if (!txq->active) {
		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
				    txq_id, ssn);
		goto out;
	}

947 948 949 950 951
	if (txq->q.read_ptr == tfd_num)
		goto out;

	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
			   txq_id, txq->q.read_ptr, tfd_num, ssn);
J
Johannes Berg 已提交
952

953 954
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
955
	last_to_free = iwl_queue_dec_wrap(tfd_num);
956

957
	if (!iwl_queue_used(q, last_to_free)) {
958 959
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
960
			__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
961
			q->write_ptr, q->read_ptr);
962
		goto out;
J
Johannes Berg 已提交
963 964
	}

965
	if (WARN_ON(!skb_queue_empty(skbs)))
966
		goto out;
J
Johannes Berg 已提交
967

968
	for (;
969
	     q->read_ptr != tfd_num;
970
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
J
Johannes Berg 已提交
971

972 973
		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
			continue;
J
Johannes Berg 已提交
974

975
		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
J
Johannes Berg 已提交
976

977
		txq->entries[txq->q.read_ptr].skb = NULL;
978

979
		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
980

981
		iwl_pcie_txq_free_tfd(trans, txq);
982
	}
983

984 985
	iwl_pcie_txq_progress(trans_pcie, txq);

986 987
	if (iwl_queue_space(&txq->q) > txq->q.low_mark)
		iwl_wake_queue(trans, txq);
988 989 990 991 992 993

	if (q->read_ptr == q->write_ptr) {
		IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
		iwl_trans_pcie_unref(trans);
	}

994
out:
995
	spin_unlock_bh(&txq->lock);
996 997
}

998 999
static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
				      const struct iwl_host_cmd *cmd)
1000 1001 1002 1003 1004 1005
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;

	lockdep_assert_held(&trans_pcie->reg_lock);

1006 1007 1008 1009 1010 1011 1012
	if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
	    !trans_pcie->ref_cmd_in_flight) {
		trans_pcie->ref_cmd_in_flight = true;
		IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
		iwl_trans_pcie_ref(trans);
	}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
	if (trans_pcie->cmd_in_flight)
		return 0;

	trans_pcie->cmd_in_flight = true;

	/*
	 * wake up the NIC to make sure that the firmware will see the host
	 * command - we will let the NIC sleep once all the host commands
	 * returned. This needs to be done only on NICs that have
	 * apmg_wake_up_wa set.
	 */
	if (trans->cfg->base_params->apmg_wake_up_wa) {
		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			udelay(2);

		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
				   15000);
		if (ret < 0) {
			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
			trans_pcie->cmd_in_flight = false;
			IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
			return -EIO;
		}
	}

	return 0;
}

static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->reg_lock);

1053 1054 1055 1056 1057 1058
	if (trans_pcie->ref_cmd_in_flight) {
		trans_pcie->ref_cmd_in_flight = false;
		IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
		iwl_trans_pcie_unref(trans);
	}

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	if (WARN_ON(!trans_pcie->cmd_in_flight))
		return 0;

	trans_pcie->cmd_in_flight = false;

	if (trans->cfg->base_params->apmg_wake_up_wa)
		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);

	return 0;
}

1071 1072 1073 1074 1075 1076 1077 1078
/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1079
{
1080 1081 1082
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
1083
	unsigned long flags;
1084
	int nfreed = 0;
1085

1086
	lockdep_assert_held(&txq->lock);
1087

1088
	if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
1089 1090
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1091
			__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1092 1093 1094
			q->write_ptr, q->read_ptr);
		return;
	}
1095

1096 1097
	for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1098

1099 1100 1101
		if (nfreed++ > 0) {
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
L
Liad Kaufman 已提交
1102
			iwl_force_nmi(trans);
1103 1104 1105
		}
	}

1106
	if (q->read_ptr == q->write_ptr) {
1107
		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1108
		iwl_pcie_clear_cmd_in_flight(trans);
1109 1110 1111
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
	}

1112
	iwl_pcie_txq_progress(trans_pcie, txq);
1113 1114
}

1115
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1116
				 u16 txq_id)
1117
{
1118
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1119 1120 1121 1122 1123 1124
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

1125
	tbl_dw_addr = trans_pcie->scd_base_addr +
1126 1127
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

1128
	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1129 1130 1131 1132 1133 1134

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

1135
	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1136 1137 1138 1139

	return 0;
}

1140 1141 1142 1143
/* Receiver address (actually, Rx station's index into station table),
 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))

1144 1145
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
			       const struct iwl_trans_txq_scd_cfg *cfg)
1146
{
1147
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1148
	int fifo = -1;
1149

1150 1151
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1152

1153 1154
	if (cfg) {
		fifo = cfg->fifo;
1155

1156
		/* Disable the scheduler prior configuring the cmd queue */
1157 1158
		if (txq_id == trans_pcie->cmd_queue &&
		    trans_pcie->scd_set_active)
1159 1160
			iwl_scd_enable_set_active(trans, 0);

1161 1162
		/* Stop this Tx queue before configuring it */
		iwl_scd_txq_set_inactive(trans, txq_id);
1163

1164 1165 1166
		/* Set this queue as a chain-building queue unless it is CMD */
		if (txq_id != trans_pcie->cmd_queue)
			iwl_scd_txq_set_chain(trans, txq_id);
1167

1168
		if (cfg->aggregate) {
1169
			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1170

1171 1172
			/* Map receiver-address / traffic-ID to this queue */
			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1173

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
			/* enable aggregations for the queue */
			iwl_scd_txq_enable_agg(trans, txq_id);
			trans_pcie->txq[txq_id].ampdu = true;
		} else {
			/*
			 * disable aggregations for the queue, this will also
			 * make the ra_tid mapping configuration irrelevant
			 * since it is now a non-AGG queue.
			 */
			iwl_scd_txq_disable_agg(trans, txq_id);

			ssn = trans_pcie->txq[txq_id].q.read_ptr;
		}
1187
	}
1188 1189 1190

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1191 1192
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1193

1194 1195
	if (cfg) {
		u8 frame_limit = cfg->frame_limit;
1196

1197 1198 1199 1200 1201 1202 1203 1204 1205
		iwl_write_direct32(trans, HBUS_TARG_WRPTR,
				   (ssn & 0xff) | (txq_id << 8));
		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);

		/* Set up Tx window size and frame limit for this queue */
		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
		iwl_trans_write_mem32(trans,
			trans_pcie->scd_base_addr +
1206 1207
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1208
					SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1209
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1210 1211 1212 1213 1214 1215 1216 1217
					SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			       SCD_QUEUE_STTS_REG_MSK);
1218 1219

		/* enable the scheduler for this queue (only) */
1220 1221
		if (txq_id == trans_pcie->cmd_queue &&
		    trans_pcie->scd_set_active)
1222
			iwl_scd_enable_set_active(trans, BIT(txq_id));
1223 1224
	}

1225
	trans_pcie->txq[txq_id].active = true;
1226
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
1227
			    txq_id, fifo, ssn & 0xff);
1228 1229
}

1230 1231
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
				bool configure_scd)
1232
{
1233
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1234 1235 1236
	u32 stts_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
	static const u32 zero_val[4] = {};
1237

1238 1239 1240 1241 1242 1243
	/*
	 * Upon HW Rfkill - we stop the device, and then stop the queues
	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
	 * allow the op_mode to call txq_disable after it already called
	 * stop_device.
	 */
1244
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1245 1246
		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
			  "queue %d not used", txq_id);
1247
		return;
1248 1249
	}

1250 1251
	if (configure_scd) {
		iwl_scd_txq_set_inactive(trans, txq_id);
1252

1253 1254 1255
		iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
				    ARRAY_SIZE(zero_val));
	}
1256

1257
	iwl_pcie_txq_unmap(trans, txq_id);
1258
	trans_pcie->txq[txq_id].ampdu = false;
1259

1260
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1261 1262
}

1263 1264
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

1265
/*
1266
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1267
 * @priv: device private data point
1268
 * @cmd: a pointer to the ucode command structure
1269
 *
1270 1271
 * The function returns < 0 values to indicate the operation
 * failed. On success, it returns the index (>= 0) of command in the
1272 1273
 * command queue.
 */
1274 1275
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
				 struct iwl_host_cmd *cmd)
1276
{
1277
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1278
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1279
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
1280 1281
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
1282
	unsigned long flags;
1283
	void *dup_buf = NULL;
1284
	dma_addr_t phys_addr;
1285
	int idx;
1286
	u16 copy_size, cmd_size, scratch_size;
1287
	bool had_nocopy = false;
1288
	int i, ret;
1289
	u32 cmd_pos;
1290 1291
	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1292

1293 1294 1295 1296
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
1297
	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1298

1299
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1300 1301 1302
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

1303 1304
		if (!cmd->len[i])
			continue;
1305

1306 1307 1308
		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1309 1310 1311 1312 1313 1314 1315 1316

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

1317 1318
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
			/*
			 * This is also a chunk that isn't copied
			 * to the static buffer so set had_nocopy.
			 */
			had_nocopy = true;

			/* only allowed once */
			if (WARN_ON(dup_buf)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}

1336
			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1337 1338 1339
					  GFP_ATOMIC);
			if (!dup_buf)
				return -ENOMEM;
1340 1341
		} else {
			/* NOCOPY must not be followed by normal! */
1342 1343 1344 1345
			if (WARN_ON(had_nocopy)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
1346
			copy_size += cmdlen[i];
1347 1348 1349
		}
		cmd_size += cmd->len[i];
	}
1350

1351 1352
	/*
	 * If any of the command structures end up being larger than
1353 1354 1355
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
1356
	 */
1357 1358
	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
		 "Command %s (%#x) is too large (%d bytes)\n",
1359
		 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1360 1361 1362
		idx = -EINVAL;
		goto free_dup_buf;
	}
1363

1364
	spin_lock_bh(&txq->lock);
1365

J
Johannes Berg 已提交
1366
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1367
		spin_unlock_bh(&txq->lock);
1368

1369
		IWL_ERR(trans, "No space in command queue\n");
1370
		iwl_op_mode_cmd_queue_full(trans->op_mode);
1371 1372
		idx = -ENOSPC;
		goto free_dup_buf;
1373 1374
	}

1375
	idx = get_cmd_index(q, q->write_ptr);
1376 1377
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
1378

1379
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
1380 1381
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
1382

1383
	/* set up the header */
1384

1385
	out_cmd->hdr.cmd = cmd->id;
1386
	out_cmd->hdr.flags = 0;
1387
	out_cmd->hdr.sequence =
1388
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1389
					 INDEX_TO_SEQ(q->write_ptr));
1390 1391

	/* and copy the data that needs to be copied */
1392
	cmd_pos = offsetof(struct iwl_device_cmd, payload);
1393
	copy_size = sizeof(out_cmd->hdr);
1394
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1395
		int copy;
1396

1397
		if (!cmd->len[i])
1398
			continue;
1399 1400 1401

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1402
					   IWL_HCMD_DFL_DUP))) {
1403 1404 1405 1406 1407
			copy = cmd->len[i];

			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
			cmd_pos += copy;
			copy_size += copy;
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
			continue;
		}

		/*
		 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
		 * in total (for the scratchbuf handling), but copy up to what
		 * we can fit into the payload for debug dump purposes.
		 */
		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);

		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
		cmd_pos += copy;

		/* However, treat copy_size the proper way, we need it below */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;

			if (copy > cmd->len[i])
				copy = cmd->len[i];
			copy_size += copy;
1428
		}
1429 1430
	}

J
Johannes Berg 已提交
1431
	IWL_DEBUG_HC(trans,
1432
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1433
		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1434 1435
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1436

1437 1438 1439 1440 1441
	/* start the TFD with the scratchbuf */
	scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
	memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
	iwl_pcie_txq_build_tfd(trans, txq,
			       iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1442
			       scratch_size, true);
1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455

	/* map first command fragment, if any remains */
	if (copy_size > scratch_size) {
		phys_addr = dma_map_single(trans->dev,
					   ((u8 *)&out_cmd->hdr) + scratch_size,
					   copy_size - scratch_size,
					   DMA_TO_DEVICE);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
			idx = -ENOMEM;
			goto out;
		}
1456

1457
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1458
				       copy_size - scratch_size, false);
J
Johannes Berg 已提交
1459 1460
	}

1461
	/* map the remaining (adjusted) nocopy/dup fragments */
1462
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1463
		const void *data = cmddata[i];
1464

1465
		if (!cmdlen[i])
1466
			continue;
1467 1468
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
1469
			continue;
1470 1471 1472
		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
			data = dup_buf;
		phys_addr = dma_map_single(trans->dev, (void *)data,
1473
					   cmdlen[i], DMA_TO_DEVICE);
1474
		if (dma_mapping_error(trans->dev, phys_addr)) {
1475
			iwl_pcie_tfd_unmap(trans, out_meta,
1476
					   &txq->tfds[q->write_ptr]);
1477 1478 1479 1480
			idx = -ENOMEM;
			goto out;
		}

1481
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1482
	}
R
Reinette Chatre 已提交
1483

1484
	out_meta->flags = cmd->flags;
1485
	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1486
		kzfree(txq->entries[idx].free_buf);
1487
	txq->entries[idx].free_buf = dup_buf;
J
Johannes Berg 已提交
1488

1489
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
R
Reinette Chatre 已提交
1490

1491 1492 1493 1494
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

1495
	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1496
	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1497 1498 1499 1500
	if (ret < 0) {
		idx = ret;
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
		goto out;
1501 1502
	}

1503
	/* Increment and update queue's write index */
1504
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1505
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1506

1507 1508
	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);

J
Johannes Berg 已提交
1509
 out:
1510
	spin_unlock_bh(&txq->lock);
1511 1512 1513
 free_dup_buf:
	if (idx < 0)
		kfree(dup_buf);
1514
	return idx;
1515 1516
}

1517 1518
/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1519
 * @rxb: Rx buffer to reclaim
1520 1521
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
1522 1523 1524 1525 1526
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
1527 1528
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
1529
{
Z
Zhu Yi 已提交
1530
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1531 1532 1533 1534
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
1535 1536
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
1537
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1538
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1539 1540 1541 1542

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
1543
	if (WARN(txq_id != trans_pcie->cmd_queue,
1544
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1545 1546 1547
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1548
		iwl_print_hex_error(trans, pkt, 32);
1549
		return;
1550
	}
1551

1552
	spin_lock_bh(&txq->lock);
1553

1554
	cmd_index = get_cmd_index(&txq->q, index);
1555 1556
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
1557

1558
	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
R
Reinette Chatre 已提交
1559

1560
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
1561
	if (meta->flags & CMD_WANT_SKB) {
1562
		struct page *p = rxb_steal_page(rxb);
1563 1564 1565

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1566
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1567 1568
		meta->source->handler_status = handler_status;
	}
1569

1570
	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1571

J
Johannes Berg 已提交
1572
	if (!(meta->flags & CMD_ASYNC)) {
1573
		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1574 1575
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
1576
				 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1577
		}
1578
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1579
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1580
			       get_cmd_string(trans_pcie, cmd->hdr.cmd));
1581
		wake_up(&trans_pcie->wait_command_queue);
1582
	}
1583

Z
Zhu Yi 已提交
1584
	meta->flags = 0;
1585

1586
	spin_unlock_bh(&txq->lock);
1587
}
1588

1589
#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
1590

1591 1592
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
				    struct iwl_host_cmd *cmd)
1593
{
J
Johannes Berg 已提交
1594
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1595 1596 1597 1598 1599 1600
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

1601
	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1602
	if (ret < 0) {
1603
		IWL_ERR(trans,
1604
			"Error sending %s: enqueue_hcmd failed: %d\n",
1605
			get_cmd_string(trans_pcie, cmd->id), ret);
1606 1607 1608 1609 1610
		return ret;
	}
	return 0;
}

1611 1612
static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
				   struct iwl_host_cmd *cmd)
1613
{
1614
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1615 1616 1617
	int cmd_idx;
	int ret;

1618
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1619
		       get_cmd_string(trans_pcie, cmd->id));
1620

1621 1622
	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
				  &trans->status),
1623 1624
		 "Command %s: a command is already active!\n",
		 get_cmd_string(trans_pcie, cmd->id)))
1625 1626
		return -EIO;

1627
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1628
		       get_cmd_string(trans_pcie, cmd->id));
1629

1630
	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1631 1632
	if (cmd_idx < 0) {
		ret = cmd_idx;
1633
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1634
		IWL_ERR(trans,
1635
			"Error sending %s: enqueue_hcmd failed: %d\n",
1636
			get_cmd_string(trans_pcie, cmd->id), ret);
1637 1638 1639
		return ret;
	}

1640 1641 1642 1643
	ret = wait_event_timeout(trans_pcie->wait_command_queue,
				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
					   &trans->status),
				 HOST_COMPLETE_TIMEOUT);
1644
	if (!ret) {
1645 1646
		struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
		struct iwl_queue *q = &txq->q;
1647

1648 1649 1650
		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
			get_cmd_string(trans_pcie, cmd->id),
			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1651

1652 1653
		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
			q->read_ptr, q->write_ptr);
1654

1655
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1656 1657 1658
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
			       get_cmd_string(trans_pcie, cmd->id));
		ret = -ETIMEDOUT;
1659

L
Liad Kaufman 已提交
1660
		iwl_force_nmi(trans);
1661
		iwl_trans_fw_error(trans);
1662

1663
		goto cancel;
1664 1665
	}

1666
	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1667
		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1668
			get_cmd_string(trans_pcie, cmd->id));
1669
		dump_stack();
1670 1671 1672 1673
		ret = -EIO;
		goto cancel;
	}

1674
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1675
	    test_bit(STATUS_RFKILL, &trans->status)) {
1676 1677 1678 1679 1680
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

1681
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1682
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1683
			get_cmd_string(trans_pcie, cmd->id));
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1698 1699
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1700
	}
1701

1702 1703 1704
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1705 1706 1707 1708 1709
	}

	return ret;
}

1710
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1711
{
1712
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1713
	    test_bit(STATUS_RFKILL, &trans->status)) {
1714 1715
		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
				  cmd->id);
1716
		return -ERFKILL;
1717
	}
1718

1719
	if (cmd->flags & CMD_ASYNC)
1720
		return iwl_pcie_send_hcmd_async(trans, cmd);
1721

1722
	/* We still can fail on RFKILL that can be asserted while we wait */
1723
	return iwl_pcie_send_hcmd_sync(trans, cmd);
1724 1725
}

1726 1727
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id)
1728
{
1729
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1730 1731 1732 1733 1734
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	struct iwl_cmd_meta *out_meta;
	struct iwl_txq *txq;
	struct iwl_queue *q;
1735 1736 1737
	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
	void *tb1_addr;
	u16 len, tb1_len, tb2_len;
1738
	bool wait_write_ptr;
1739 1740
	__le16 fc = hdr->frame_control;
	u8 hdr_len = ieee80211_hdrlen(fc);
1741
	u16 wifi_seq;
1742 1743 1744

	txq = &trans_pcie->txq[txq_id];
	q = &txq->q;
1745

1746 1747
	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
		      "TX on unused queue %d\n", txq_id))
1748
		return -EINVAL;
1749

1750
	spin_lock(&txq->lock);
1751

1752 1753 1754 1755 1756
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
1757
	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1758
	WARN_ONCE(txq->ampdu &&
1759
		  (wifi_seq & 0xff) != q->write_ptr,
1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);

	/* Set up driver data for this TFD */
	txq->entries[q->write_ptr].skb = skb;
	txq->entries[q->write_ptr].cmd = dev_cmd;

	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
			    INDEX_TO_SEQ(q->write_ptr)));

1771 1772 1773 1774 1775 1776 1777
	tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
		       offsetof(struct iwl_tx_cmd, scratch);

	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1778 1779
	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->entries[q->write_ptr].meta;
1780

1781
	/*
1782 1783 1784 1785
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
1786
	 */
1787 1788
	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
	      hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1789
	tb1_len = ALIGN(len, 4);
1790 1791

	/* Tell NIC about any 2-byte padding after MAC header */
1792
	if (tb1_len != len)
1793 1794
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

1795 1796 1797 1798
	/* The first TB points to the scratchbuf data - min_copy bytes */
	memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
	       IWL_HCMD_SCRATCHBUF_SIZE);
	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1799
			       IWL_HCMD_SCRATCHBUF_SIZE, true);
1800

1801 1802 1803 1804 1805 1806 1807 1808
	/* there must be data left over for TB1 or this code must be changed */
	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);

	/* map the data for TB1 */
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
		goto out_err;
1809
	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1810

1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
	/*
	 * Set up TFD's third entry to point directly to remainder
	 * of skb, if any (802.11 null frames have no payload).
	 */
	tb2_len = skb->len - hdr_len;
	if (tb2_len > 0) {
		dma_addr_t tb2_phys = dma_map_single(trans->dev,
						     skb->data + hdr_len,
						     tb2_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
1823 1824
			goto out_err;
		}
1825
		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1826
	}
1827

1828 1829
	/* Set up entry for this TFD in Tx byte-count array */
	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1830

1831 1832 1833
	trace_iwlwifi_dev_tx(trans->dev, skb,
			     &txq->tfds[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
1834 1835
			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
			     skb->data + hdr_len, tb2_len);
1836
	trace_iwlwifi_dev_tx_data(trans->dev, skb,
1837 1838
				  skb->data + hdr_len, tb2_len);

1839
	wait_write_ptr = ieee80211_has_morefrags(fc);
1840

1841
	/* start timer if queue currently empty */
1842 1843 1844 1845 1846 1847 1848
	if (q->read_ptr == q->write_ptr) {
		if (txq->need_update && trans_pcie->wd_timeout)
			mod_timer(&txq->stuck_timer,
				  jiffies + trans_pcie->wd_timeout);
		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
		iwl_trans_pcie_ref(trans);
	}
1849 1850

	/* Tell device the write index *just past* this latest filled TFD */
1851
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1852 1853
	if (!wait_write_ptr)
		iwl_pcie_txq_inc_wr_ptr(trans, txq);
1854 1855 1856

	/*
	 * At this point the frame is "transmitted" successfully
1857
	 * and we will get a TX status notification eventually.
1858 1859
	 */
	if (iwl_queue_space(q) < q->high_mark) {
1860
		if (wait_write_ptr)
1861
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
1862
		else
1863 1864 1865 1866 1867 1868 1869
			iwl_stop_queue(trans, txq);
	}
	spin_unlock(&txq->lock);
	return 0;
out_err:
	spin_unlock(&txq->lock);
	return -1;
1870
}