tx.c 48.4 KB
Newer Older
1 2
/******************************************************************************
 *
J
Johannes Berg 已提交
3
 * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-op-mode.h"
38
#include "internal.h"
39
/* FIXME: need to abstract out TX command (once we know what it looks like) */
40
#include "dvm/commands.h"
41

42 43 44
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

/*
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = 0;
	q->read_ptr = 0;

	return 0;
}

static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

	ptr->addr = dma_alloc_coherent(trans->dev, size,
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
	memset(ptr, 0, sizeof(*ptr));
}

static void iwl_pcie_txq_stuck_timer(unsigned long data)
{
	struct iwl_txq *txq = (void *)data;
	struct iwl_queue *q = &txq->q;
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
	u32 scd_sram_addr = trans_pcie->scd_base_addr +
				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
	u8 buf[16];
	int i;

	spin_lock(&txq->lock);
	/* check if triggered erroneously */
	if (txq->q.read_ptr == txq->q.write_ptr) {
		spin_unlock(&txq->lock);
		return;
	}
	spin_unlock(&txq->lock);

	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
		jiffies_to_msecs(trans_pcie->wd_timeout));
	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);

163
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
164 165 166 167 168 169 170 171 172 173 174 175

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
176 177 178
			iwl_trans_read_mem32(trans,
					     trans_pcie->scd_base_addr +
					     SCD_TRANS_TBL_OFFSET_QUEUE(i));
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193

		if (i & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			i, active ? "" : "in", fifo, tbl_dw,
			iwl_read_prph(trans,
				      SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
	}

	for (i = q->read_ptr; i != q->write_ptr;
194
	     i = iwl_queue_inc_wrap(i, q->n_bd))
195
		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
196
			le32_to_cpu(txq->scratchbufs[i].scratch));
197 198 199 200

	iwl_op_mode_nic_error(trans->op_mode);
}

201 202
/*
 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
203
 */
204 205
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
					     struct iwl_txq *txq, u16 byte_cnt)
206
{
207
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
208
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
209 210 211 212 213 214
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
215
	struct iwl_tx_cmd *tx_cmd =
216
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
217

218 219
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

220 221
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

222 223
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
224 225 226 227 228 229 230 231 232 233 234 235 236

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

237 238 239 240
	if (trans_pcie->bc_table_dword)
		len = DIV_ROUND_UP(len, 4);

	bc_ent = cpu_to_le16(len | (sta_id << 12));
241 242 243 244 245 246 247 248

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
					    struct iwl_txq *txq)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
	struct iwl_tx_cmd *tx_cmd =
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != trans_pcie->cmd_queue)
		sta_id = tx_cmd->sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

275 276
/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
277
 */
278
void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
279 280 281 282 283
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
284
		return;
285

286
	if (trans->cfg->base_params->shadow_reg_enable) {
W
Wey-Yi Guy 已提交
287
		/* shadow register enabled */
288
		iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
289 290
			    txq->q.write_ptr | (txq_id << 8));
	} else {
D
Don Fry 已提交
291 292
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);
W
Wey-Yi Guy 已提交
293
		/* if we're trying to save power */
294
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
W
Wey-Yi Guy 已提交
295 296 297
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
298
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
299

W
Wey-Yi Guy 已提交
300
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
301
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
302 303
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
304
				iwl_set_bit(trans, CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
305 306 307
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
308

309 310 311
			IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
				     txq->q.write_ptr);

312
			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
313 314
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
315 316 317 318 319 320
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
321
			iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
322 323
				    txq->q.write_ptr | (txq_id << 8));
	}
324 325 326
	txq->need_update = 0;
}

327
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
328 329 330 331 332 333 334 335 336 337 338
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

339
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
340 341 342 343 344 345
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

346 347
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				       dma_addr_t addr, u16 len)
J
Johannes Berg 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

361
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
J
Johannes Berg 已提交
362 363 364 365
{
	return tfd->num_tbs & 0x1f;
}

366
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
367 368
			       struct iwl_cmd_meta *meta,
			       struct iwl_tfd *tfd)
J
Johannes Berg 已提交
369 370 371 372 373
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
374
	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
J
Johannes Berg 已提交
375 376

	if (num_tbs >= IWL_NUM_OF_TBS) {
377
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
378 379 380 381
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

382
	/* first TB is never freed - it's the scratchbuf data */
J
Johannes Berg 已提交
383 384

	for (i = 1; i < num_tbs; i++)
385
		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
386 387
				 iwl_pcie_tfd_tb_get_len(tfd, i),
				 DMA_TO_DEVICE);
388 389

	tfd->num_tbs = 0;
390 391
}

392 393
/*
 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
394
 * @trans - transport private data
395
 * @txq - tx queue
396
 * @dma_dir - the direction of the DMA mapping
397 398 399 400
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
401
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
402 403 404
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

405 406 407 408
	/* rd_ptr is bounded by n_bd and idx is bounded by n_window */
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

409 410
	lockdep_assert_held(&txq->lock);

411
	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
412
	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
J
Johannes Berg 已提交
413 414

	/* free SKB */
415
	if (txq->entries) {
J
Johannes Berg 已提交
416 417
		struct sk_buff *skb;

418
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
419

420 421 422 423
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
424
		if (skb) {
425
			iwl_op_mode_free_skb(trans->op_mode, skb);
426
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
427 428 429 430
		}
	}
}

431 432
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
				  dma_addr_t addr, u16 len, u8 reset)
J
Johannes Berg 已提交
433 434 435 436 437 438
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
439
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
440 441
	tfd = &tfd_tmp[q->write_ptr];

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
			IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
		IWL_ERR(trans, "Unaligned address = %llx\n",
			(unsigned long long)addr);

	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
			       struct iwl_txq *txq, int slots_num,
			       u32 txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
472
	size_t scratchbuf_sz;
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
	int i;

	if (WARN_ON(txq->entries || txq->tfds))
		return -EINVAL;

	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
		    (unsigned long)txq);
	txq->trans_pcie = trans_pcie;

	txq->q.n_window = slots_num;

	txq->entries = kcalloc(slots_num,
			       sizeof(struct iwl_pcie_txq_entry),
			       GFP_KERNEL);

	if (!txq->entries)
		goto error;

	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->entries[i].cmd =
				kmalloc(sizeof(struct iwl_device_cmd),
					GFP_KERNEL);
			if (!txq->entries[i].cmd)
				goto error;
		}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
	if (!txq->tfds) {
		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
		goto error;
	}
508 509 510 511 512 513 514 515 516 517 518 519 520 521

	BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
	BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
			sizeof(struct iwl_cmd_header) +
			offsetof(struct iwl_tx_cmd, scratch));

	scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;

	txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
					      &txq->scratchbufs_dma,
					      GFP_KERNEL);
	if (!txq->scratchbufs)
		goto err_free_tfds;

522 523 524
	txq->q.id = txq_id;

	return 0;
525 526
err_free_tfds:
	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
error:
	if (txq->entries && txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++)
			kfree(txq->entries[i].cmd);
	kfree(txq->entries);
	txq->entries = NULL;

	return -ENOMEM;

}

static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
			      int slots_num, u32 txq_id)
{
	int ret;

	txq->need_update = 0;

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
			txq_id);
	if (ret)
		return ret;

	spin_lock_init(&txq->lock);

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
			   txq->q.dma_addr >> 8);

	return 0;
}

/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;

	if (!q->n_bd)
		return;

	spin_lock_bh(&txq->lock);
	while (q->write_ptr != q->read_ptr) {
581
		iwl_pcie_txq_free_tfd(trans, txq);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
	spin_unlock_bh(&txq->lock);
}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct device *dev = trans->dev;
	int i;

	if (WARN_ON(!txq))
		return;

	iwl_pcie_txq_unmap(trans, txq_id);

	/* De-alloc array of command/tx buffers */
	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++) {
			kfree(txq->entries[i].cmd);
			kfree(txq->entries[i].free_buf);
		}

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd) {
		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
618
		txq->q.dma_addr = 0;
619 620 621 622

		dma_free_coherent(dev,
				  sizeof(*txq->scratchbufs) * txq->q.n_window,
				  txq->scratchbufs, txq->scratchbufs_dma);
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
	}

	kfree(txq->entries);
	txq->entries = NULL;

	del_timer_sync(&txq->stuck_timer);

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
 */
static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
	struct iwl_trans_pcie __maybe_unused *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write_prph(trans, SCD_TXFACT, mask);
}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
648
	int nq = trans->cfg->base_params->num_of_queues;
649 650
	int chan;
	u32 reg_val;
651 652
	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
653 654 655 656 657 658 659 660 661 662 663

	/* make sure all queue are not stopped/used */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	trans_pcie->scd_base_addr =
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

664 665 666 667
	/* reset context data, TX status and translation data */
	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
				   SCD_CONTEXT_MEM_LOWER_BOUND,
			    NULL, clear_dwords);
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698

	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
		       trans_pcie->scd_bc_tbls.dma >> 10);

	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);

	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
				trans_pcie->cmd_fifo);

	/* Activate all Tx DMA/FIFO channels */
	iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
			    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		struct iwl_txq *txq = &trans_pcie->txq[txq_id];

		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
				   txq->q.dma_addr >> 8);
		iwl_pcie_txq_unmap(trans, txq_id);
		txq->q.read_ptr = 0;
		txq->q.write_ptr = 0;
	}

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

	iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
}

722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ch, txq_id, ret;
	unsigned long flags;

	/* Turn off all Tx DMA fifos */
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

	iwl_pcie_txq_set_sched(trans, 0);

	/* Stop each Tx DMA channel, and wait for it to be idle */
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
		iwl_write_direct32(trans,
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
		if (ret < 0)
			IWL_ERR(trans,
				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
				ch,
				iwl_read_direct32(trans,
						  FH_TSSR_TX_STATUS_REG));
	}
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	if (!trans_pcie->txq) {
		IWL_WARN(trans,
			 "Stopping tx queues that aren't allocated...\n");
		return 0;
	}

	/* Unmap DMA from host system and free skb's */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++)
		iwl_pcie_txq_unmap(trans, txq_id);

	return 0;
}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{
	int txq_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* Tx queues */
	if (trans_pcie->txq) {
		for (txq_id = 0;
		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
			iwl_pcie_txq_free(trans, txq_id);
	}

	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{
	int ret;
	int txq_id, slots_num;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
			sizeof(struct iwlagn_scd_bc_tbl);

	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
	if (WARN_ON(trans_pcie->txq)) {
		ret = -EINVAL;
		goto error;
	}

	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
				   scd_bc_tbls_size);
	if (ret) {
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
		goto error;
	}

	/* Alloc keep-warm buffer */
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
	if (ret) {
		IWL_ERR(trans, "Keep Warm allocation failed\n");
		goto error;
	}

	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
				  sizeof(struct iwl_txq), GFP_KERNEL);
	if (!trans_pcie->txq) {
		IWL_ERR(trans, "Not enough memory for txq\n");
		ret = ENOMEM;
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
			goto error;
		}
	}

	return 0;

error:
	iwl_pcie_tx_free(trans);

	return ret;
}
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;
	int txq_id, slots_num;
	unsigned long flags;
	bool alloc = false;

	if (!trans_pcie->txq) {
		ret = iwl_pcie_tx_alloc(trans);
		if (ret)
			goto error;
		alloc = true;
	}

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

	/* Turn off all Tx DMA fifos */
	iwl_write_prph(trans, SCD_TXFACT, 0);

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
		iwl_pcie_tx_free(trans);
	return ret;
}

static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
					   struct iwl_txq *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

/* Frees buffers until index _not_ inclusive */
916 917
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs)
918 919 920
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
921 922
	/* n_bd is usually 256 => n_bd - 1 = 0xff */
	int tfd_num = ssn & (txq->q.n_bd - 1);
923 924 925 926 927
	struct iwl_queue *q = &txq->q;
	int last_to_free;

	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
928
		return;
J
Johannes Berg 已提交
929

930
	spin_lock_bh(&txq->lock);
931 932 933 934 935 936

	if (txq->q.read_ptr == tfd_num)
		goto out;

	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
			   txq_id, txq->q.read_ptr, tfd_num, ssn);
J
Johannes Berg 已提交
937

938 939
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
940
	last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
941

942
	if (!iwl_queue_used(q, last_to_free)) {
943 944 945 946
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, last_to_free, q->n_bd,
			q->write_ptr, q->read_ptr);
947
		goto out;
J
Johannes Berg 已提交
948 949
	}

950
	if (WARN_ON(!skb_queue_empty(skbs)))
951
		goto out;
J
Johannes Berg 已提交
952

953
	for (;
954
	     q->read_ptr != tfd_num;
955
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
J
Johannes Berg 已提交
956

957 958
		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
			continue;
J
Johannes Berg 已提交
959

960
		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
J
Johannes Berg 已提交
961

962
		txq->entries[txq->q.read_ptr].skb = NULL;
963

964
		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
965

966
		iwl_pcie_txq_free_tfd(trans, txq);
967
	}
968

969 970
	iwl_pcie_txq_progress(trans_pcie, txq);

971 972 973
	if (iwl_queue_space(&txq->q) > txq->q.low_mark)
		iwl_wake_queue(trans, txq);
out:
974
	spin_unlock_bh(&txq->lock);
975 976
}

977 978 979 980 981 982 983 984
/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
985
{
986 987 988 989
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;
990

991
	lockdep_assert_held(&txq->lock);
992

993
	if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
994 995 996 997 998 999
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, idx, q->n_bd,
			q->write_ptr, q->read_ptr);
		return;
	}
1000

1001 1002
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1003

1004 1005 1006 1007 1008 1009 1010 1011
		if (nfreed++ > 0) {
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
			iwl_op_mode_nic_error(trans->op_mode);
		}
	}

	iwl_pcie_txq_progress(trans_pcie, txq);
1012 1013
}

1014
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1015
				 u16 txq_id)
1016
{
1017
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1018 1019 1020 1021 1022 1023
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

1024
	tbl_dw_addr = trans_pcie->scd_base_addr +
1025 1026
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

1027
	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1028 1029 1030 1031 1032 1033

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

1034
	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1035 1036 1037 1038

	return 0;
}

1039 1040
static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
					     u16 txq_id)
1041 1042 1043
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1044
	iwl_write_prph(trans,
1045 1046 1047 1048 1049
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

1050 1051
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
1052
{
1053
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1054

1055 1056
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1057 1058

	/* Stop this Tx queue before configuring it */
1059
	iwl_pcie_txq_set_inactive(trans, txq_id);
1060

1061 1062 1063 1064 1065 1066 1067
	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
	if (sta_id != IWL_INVALID_STATION) {
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);
1068

1069
		/* Map receiver-address / traffic-ID to this queue */
1070
		iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1071

1072 1073
		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1074 1075 1076 1077 1078 1079 1080
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1081
	}
1082 1083 1084

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1085 1086
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1087 1088 1089 1090

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1091 1092

	/* Set up Tx window size and frame limit for this queue */
1093
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1094
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1095
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1096 1097 1098 1099 1100
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1101 1102

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1103 1104 1105 1106 1107 1108 1109
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
1110 1111
}

1112
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1113
{
1114
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1115 1116 1117
	u32 stts_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
	static const u32 zero_val[4] = {};
1118

1119 1120 1121
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d not used", txq_id);
		return;
1122 1123
	}

1124
	iwl_pcie_txq_set_inactive(trans, txq_id);
1125

1126 1127
	iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
			    ARRAY_SIZE(zero_val));
1128

1129
	iwl_pcie_txq_unmap(trans, txq_id);
1130

1131
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1132 1133
}

1134 1135
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

1136
/*
1137
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1138 1139 1140 1141 1142 1143 1144
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
1145 1146
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
				 struct iwl_host_cmd *cmd)
1147
{
1148
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1149
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1150
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
1151 1152
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
1153
	void *dup_buf = NULL;
1154
	dma_addr_t phys_addr;
1155
	int idx;
1156
	u16 copy_size, cmd_size, scratch_size;
1157 1158
	bool had_nocopy = false;
	int i;
1159
	u32 cmd_pos;
1160 1161
	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1162

1163 1164 1165 1166
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
1167
	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1168

1169
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1170 1171 1172
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

1173 1174
		if (!cmd->len[i])
			continue;
1175

1176 1177 1178
		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1179 1180 1181 1182 1183 1184 1185 1186

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

1187 1188
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
			/*
			 * This is also a chunk that isn't copied
			 * to the static buffer so set had_nocopy.
			 */
			had_nocopy = true;

			/* only allowed once */
			if (WARN_ON(dup_buf)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}

1206
			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1207 1208 1209
					  GFP_ATOMIC);
			if (!dup_buf)
				return -ENOMEM;
1210 1211
		} else {
			/* NOCOPY must not be followed by normal! */
1212 1213 1214 1215
			if (WARN_ON(had_nocopy)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
1216
			copy_size += cmdlen[i];
1217 1218 1219
		}
		cmd_size += cmd->len[i];
	}
1220

1221 1222
	/*
	 * If any of the command structures end up being larger than
1223 1224 1225
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
1226
	 */
1227 1228
	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
		 "Command %s (%#x) is too large (%d bytes)\n",
1229
		 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1230 1231 1232
		idx = -EINVAL;
		goto free_dup_buf;
	}
1233

1234
	spin_lock_bh(&txq->lock);
1235

J
Johannes Berg 已提交
1236
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1237
		spin_unlock_bh(&txq->lock);
1238

1239
		IWL_ERR(trans, "No space in command queue\n");
1240
		iwl_op_mode_cmd_queue_full(trans->op_mode);
1241 1242
		idx = -ENOSPC;
		goto free_dup_buf;
1243 1244
	}

1245
	idx = get_cmd_index(q, q->write_ptr);
1246 1247
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
1248

1249
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
1250 1251
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
1252

1253
	/* set up the header */
1254

1255
	out_cmd->hdr.cmd = cmd->id;
1256
	out_cmd->hdr.flags = 0;
1257
	out_cmd->hdr.sequence =
1258
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1259
					 INDEX_TO_SEQ(q->write_ptr));
1260 1261

	/* and copy the data that needs to be copied */
1262
	cmd_pos = offsetof(struct iwl_device_cmd, payload);
1263
	copy_size = sizeof(out_cmd->hdr);
1264
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1265 1266 1267
		int copy = 0;

		if (!cmd->len)
1268
			continue;
1269

1270 1271 1272
		/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
		if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
			copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287

			if (copy > cmd->len[i])
				copy = cmd->len[i];
		}

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
			copy = cmd->len[i];

		if (copy) {
			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
			cmd_pos += copy;
			copy_size += copy;
		}
1288 1289
	}

J
Johannes Berg 已提交
1290
	IWL_DEBUG_HC(trans,
1291
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1292
		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1293 1294
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1295

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
	/* start the TFD with the scratchbuf */
	scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
	memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
	iwl_pcie_txq_build_tfd(trans, txq,
			       iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
			       scratch_size, 1);

	/* map first command fragment, if any remains */
	if (copy_size > scratch_size) {
		phys_addr = dma_map_single(trans->dev,
					   ((u8 *)&out_cmd->hdr) + scratch_size,
					   copy_size - scratch_size,
					   DMA_TO_DEVICE);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
			idx = -ENOMEM;
			goto out;
		}
1315

1316 1317
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
				       copy_size - scratch_size, 0);
J
Johannes Berg 已提交
1318 1319
	}

1320
	/* map the remaining (adjusted) nocopy/dup fragments */
1321
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1322
		const void *data = cmddata[i];
1323

1324
		if (!cmdlen[i])
1325
			continue;
1326 1327
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
1328
			continue;
1329 1330 1331
		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
			data = dup_buf;
		phys_addr = dma_map_single(trans->dev, (void *)data,
1332
					   cmdlen[i], DMA_TO_DEVICE);
1333
		if (dma_mapping_error(trans->dev, phys_addr)) {
1334
			iwl_pcie_tfd_unmap(trans, out_meta,
1335
					   &txq->tfds[q->write_ptr]);
1336 1337 1338 1339
			idx = -ENOMEM;
			goto out;
		}

1340
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
1341
	}
R
Reinette Chatre 已提交
1342

1343
	out_meta->flags = cmd->flags;
1344 1345 1346
	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
		kfree(txq->entries[idx].free_buf);
	txq->entries[idx].free_buf = dup_buf;
J
Johannes Berg 已提交
1347 1348 1349

	txq->need_update = 1;

1350
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
R
Reinette Chatre 已提交
1351

1352 1353 1354 1355
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

1356 1357
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1358
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1359

J
Johannes Berg 已提交
1360
 out:
1361
	spin_unlock_bh(&txq->lock);
1362 1363 1364
 free_dup_buf:
	if (idx < 0)
		kfree(dup_buf);
1365
	return idx;
1366 1367
}

1368 1369
/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1370
 * @rxb: Rx buffer to reclaim
1371 1372
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
1373 1374 1375 1376 1377
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
1378 1379
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
1380
{
Z
Zhu Yi 已提交
1381
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1382 1383 1384 1385
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
1386 1387
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
1388
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1389
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1390 1391 1392 1393

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
1394
	if (WARN(txq_id != trans_pcie->cmd_queue,
1395
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1396 1397 1398
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1399
		iwl_print_hex_error(trans, pkt, 32);
1400
		return;
1401
	}
1402

1403
	spin_lock_bh(&txq->lock);
1404

1405
	cmd_index = get_cmd_index(&txq->q, index);
1406 1407
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
1408

1409
	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
R
Reinette Chatre 已提交
1410

1411
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
1412
	if (meta->flags & CMD_WANT_SKB) {
1413
		struct page *p = rxb_steal_page(rxb);
1414 1415 1416

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1417
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1418 1419
		meta->source->handler_status = handler_status;
	}
1420

1421
	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1422

J
Johannes Berg 已提交
1423
	if (!(meta->flags & CMD_ASYNC)) {
D
Don Fry 已提交
1424
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1425 1426
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
1427
				 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1428
		}
D
Don Fry 已提交
1429
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1430
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1431
			       get_cmd_string(trans_pcie, cmd->hdr.cmd));
1432
		wake_up(&trans_pcie->wait_command_queue);
1433
	}
1434

Z
Zhu Yi 已提交
1435
	meta->flags = 0;
1436

1437
	spin_unlock_bh(&txq->lock);
1438
}
1439 1440 1441

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

1442 1443
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
				    struct iwl_host_cmd *cmd)
1444
{
J
Johannes Berg 已提交
1445
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1446 1447 1448 1449 1450 1451
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

1452
	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1453
	if (ret < 0) {
1454
		IWL_ERR(trans,
1455
			"Error sending %s: enqueue_hcmd failed: %d\n",
1456
			get_cmd_string(trans_pcie, cmd->id), ret);
1457 1458 1459 1460 1461
		return ret;
	}
	return 0;
}

1462 1463
static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
				   struct iwl_host_cmd *cmd)
1464
{
1465
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1466 1467 1468
	int cmd_idx;
	int ret;

1469
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1470
		       get_cmd_string(trans_pcie, cmd->id));
1471

1472
	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
D
Don Fry 已提交
1473
				     &trans_pcie->status))) {
1474
		IWL_ERR(trans, "Command %s: a command is already active!\n",
1475
			get_cmd_string(trans_pcie, cmd->id));
1476 1477 1478
		return -EIO;
	}

1479
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1480
		       get_cmd_string(trans_pcie, cmd->id));
1481

1482
	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1483 1484
	if (cmd_idx < 0) {
		ret = cmd_idx;
D
Don Fry 已提交
1485
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1486
		IWL_ERR(trans,
1487
			"Error sending %s: enqueue_hcmd failed: %d\n",
1488
			get_cmd_string(trans_pcie, cmd->id), ret);
1489 1490 1491
		return ret;
	}

1492
	ret = wait_event_timeout(trans_pcie->wait_command_queue,
1493 1494 1495
				 !test_bit(STATUS_HCMD_ACTIVE,
					   &trans_pcie->status),
				 HOST_COMPLETE_TIMEOUT);
1496
	if (!ret) {
D
Don Fry 已提交
1497
		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1498
			struct iwl_txq *txq =
1499
				&trans_pcie->txq[trans_pcie->cmd_queue];
1500 1501
			struct iwl_queue *q = &txq->q;

1502
			IWL_ERR(trans,
1503
				"Error sending %s: time out after %dms.\n",
1504
				get_cmd_string(trans_pcie, cmd->id),
1505 1506
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

1507
			IWL_ERR(trans,
1508 1509 1510
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

D
Don Fry 已提交
1511
			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
J
Johannes Berg 已提交
1512 1513
			IWL_DEBUG_INFO(trans,
				       "Clearing HCMD_ACTIVE for command %s\n",
1514
				       get_cmd_string(trans_pcie, cmd->id));
1515 1516 1517 1518 1519
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

1520 1521
	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1522
			get_cmd_string(trans_pcie, cmd->id));
1523 1524 1525 1526
		ret = -EIO;
		goto cancel;
	}

1527 1528 1529 1530 1531 1532
	if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

1533
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1534
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1535
			get_cmd_string(trans_pcie, cmd->id));
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1550 1551
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1552
	}
1553

1554 1555 1556
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1557 1558 1559 1560 1561
	}

	return ret;
}

1562
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1563
{
1564 1565
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1566 1567 1568
	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
		return -EIO;

1569 1570 1571
	if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
				  cmd->id);
1572
		return -ERFKILL;
1573
	}
1574

1575
	if (cmd->flags & CMD_ASYNC)
1576
		return iwl_pcie_send_hcmd_async(trans, cmd);
1577

1578
	/* We still can fail on RFKILL that can be asserted while we wait */
1579
	return iwl_pcie_send_hcmd_sync(trans, cmd);
1580 1581
}

1582 1583
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id)
1584
{
1585
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1586 1587 1588 1589 1590
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	struct iwl_cmd_meta *out_meta;
	struct iwl_txq *txq;
	struct iwl_queue *q;
1591 1592 1593
	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
	void *tb1_addr;
	u16 len, tb1_len, tb2_len;
1594 1595 1596 1597 1598 1599 1600
	u8 wait_write_ptr = 0;
	__le16 fc = hdr->frame_control;
	u8 hdr_len = ieee80211_hdrlen(fc);
	u16 __maybe_unused wifi_seq;

	txq = &trans_pcie->txq[txq_id];
	q = &txq->q;
1601

1602 1603 1604 1605
	if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
		WARN_ON_ONCE(1);
		return -EINVAL;
	}
1606

1607
	spin_lock(&txq->lock);
1608

1609 1610 1611 1612 1613 1614
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
#ifdef CONFIG_IWLWIFI_DEBUG
1615
	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
	WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
		  ((wifi_seq & 0xff) != q->write_ptr),
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);
#endif

	/* Set up driver data for this TFD */
	txq->entries[q->write_ptr].skb = skb;
	txq->entries[q->write_ptr].cmd = dev_cmd;

	dev_cmd->hdr.cmd = REPLY_TX;
	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
			    INDEX_TO_SEQ(q->write_ptr)));

1631 1632 1633 1634 1635 1636 1637
	tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
		       offsetof(struct iwl_tx_cmd, scratch);

	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1638 1639
	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->entries[q->write_ptr].meta;
1640

1641
	/*
1642 1643 1644 1645
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
1646
	 */
1647 1648 1649
	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
	      hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
	tb1_len = (len + 3) & ~3;
1650 1651

	/* Tell NIC about any 2-byte padding after MAC header */
1652
	if (tb1_len != len)
1653 1654
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

1655 1656 1657 1658 1659
	/* The first TB points to the scratchbuf data - min_copy bytes */
	memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
	       IWL_HCMD_SCRATCHBUF_SIZE);
	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
			       IWL_HCMD_SCRATCHBUF_SIZE, 1);
1660

1661 1662 1663 1664 1665 1666 1667 1668 1669
	/* there must be data left over for TB1 or this code must be changed */
	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);

	/* map the data for TB1 */
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
		goto out_err;
	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
1670

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
	/*
	 * Set up TFD's third entry to point directly to remainder
	 * of skb, if any (802.11 null frames have no payload).
	 */
	tb2_len = skb->len - hdr_len;
	if (tb2_len > 0) {
		dma_addr_t tb2_phys = dma_map_single(trans->dev,
						     skb->data + hdr_len,
						     tb2_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
1683 1684
			goto out_err;
		}
1685
		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
1686
	}
1687

1688 1689
	/* Set up entry for this TFD in Tx byte-count array */
	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1690

1691 1692 1693
	trace_iwlwifi_dev_tx(trans->dev, skb,
			     &txq->tfds[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
1694 1695
			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
			     skb->data + hdr_len, tb2_len);
1696
	trace_iwlwifi_dev_tx_data(trans->dev, skb,
1697 1698 1699 1700 1701 1702 1703 1704
				  skb->data + hdr_len, tb2_len);

	if (!ieee80211_has_morefrags(fc)) {
		txq->need_update = 1;
	} else {
		wait_write_ptr = 1;
		txq->need_update = 0;
	}
1705

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
	/* start timer if queue currently empty */
	if (txq->need_update && q->read_ptr == q->write_ptr &&
	    trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

	/* Tell device the write index *just past* this latest filled TFD */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_pcie_txq_inc_wr_ptr(trans, txq);

	/*
	 * At this point the frame is "transmitted" successfully
	 * and we will get a TX status notification eventually,
	 * regardless of the value of ret. "ret" only indicates
	 * whether or not we should update the write pointer.
	 */
	if (iwl_queue_space(q) < q->high_mark) {
		if (wait_write_ptr) {
			txq->need_update = 1;
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
		} else {
			iwl_stop_queue(trans, txq);
		}
	}
	spin_unlock(&txq->lock);
	return 0;
out_err:
	spin_unlock(&txq->lock);
	return -1;
1734
}