tx.c 48.4 KB
Newer Older
1 2
/******************************************************************************
 *
J
Johannes Berg 已提交
3
 * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-op-mode.h"
38
#include "internal.h"
39
/* FIXME: need to abstract out TX command (once we know what it looks like) */
40
#include "dvm/commands.h"
41

42 43 44
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

/*
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = 0;
	q->read_ptr = 0;

	return 0;
}

static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

	ptr->addr = dma_alloc_coherent(trans->dev, size,
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
	memset(ptr, 0, sizeof(*ptr));
}

static void iwl_pcie_txq_stuck_timer(unsigned long data)
{
	struct iwl_txq *txq = (void *)data;
	struct iwl_queue *q = &txq->q;
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
	u32 scd_sram_addr = trans_pcie->scd_base_addr +
				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
	u8 buf[16];
	int i;

	spin_lock(&txq->lock);
	/* check if triggered erroneously */
	if (txq->q.read_ptr == txq->q.write_ptr) {
		spin_unlock(&txq->lock);
		return;
	}
	spin_unlock(&txq->lock);

	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
		jiffies_to_msecs(trans_pcie->wd_timeout));
	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);

163
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
164 165 166 167 168 169 170 171 172 173 174 175

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
176 177 178
			iwl_trans_read_mem32(trans,
					     trans_pcie->scd_base_addr +
					     SCD_TRANS_TBL_OFFSET_QUEUE(i));
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203

		if (i & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			i, active ? "" : "in", fifo, tbl_dw,
			iwl_read_prph(trans,
				      SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
	}

	for (i = q->read_ptr; i != q->write_ptr;
	     i = iwl_queue_inc_wrap(i, q->n_bd)) {
		struct iwl_tx_cmd *tx_cmd =
			(struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
			get_unaligned_le32(&tx_cmd->scratch));
	}

	iwl_op_mode_nic_error(trans->op_mode);
}

204 205
/*
 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
206
 */
207 208
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
					     struct iwl_txq *txq, u16 byte_cnt)
209
{
210
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
211
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
212 213 214 215 216 217
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
218
	struct iwl_tx_cmd *tx_cmd =
219
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
220

221 222
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

223 224
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

225 226
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
227 228 229 230 231 232 233 234 235 236 237 238 239

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

240 241 242 243
	if (trans_pcie->bc_table_dword)
		len = DIV_ROUND_UP(len, 4);

	bc_ent = cpu_to_le16(len | (sta_id << 12));
244 245 246 247 248 249 250 251

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
					    struct iwl_txq *txq)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
	struct iwl_tx_cmd *tx_cmd =
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != trans_pcie->cmd_queue)
		sta_id = tx_cmd->sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

278 279
/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
280
 */
281
void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
282 283 284 285 286
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
287
		return;
288

289
	if (trans->cfg->base_params->shadow_reg_enable) {
W
Wey-Yi Guy 已提交
290
		/* shadow register enabled */
291
		iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
292 293
			    txq->q.write_ptr | (txq_id << 8));
	} else {
D
Don Fry 已提交
294 295
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);
W
Wey-Yi Guy 已提交
296
		/* if we're trying to save power */
297
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
W
Wey-Yi Guy 已提交
298 299 300
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
301
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
302

W
Wey-Yi Guy 已提交
303
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
304
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
305 306
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
307
				iwl_set_bit(trans, CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
308 309 310
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
311

312 313 314
			IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
				     txq->q.write_ptr);

315
			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
316 317
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
318 319 320 321 322 323
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
324
			iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
325 326
				    txq->q.write_ptr | (txq_id << 8));
	}
327 328 329
	txq->need_update = 0;
}

330
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
331 332 333 334 335 336 337 338 339 340 341
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

342
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
343 344 345 346 347 348
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

349 350
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				       dma_addr_t addr, u16 len)
J
Johannes Berg 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

364
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
J
Johannes Berg 已提交
365 366 367 368
{
	return tfd->num_tbs & 0x1f;
}

369
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
370 371
			       struct iwl_cmd_meta *meta,
			       struct iwl_tfd *tfd)
J
Johannes Berg 已提交
372 373 374 375 376
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
377
	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
J
Johannes Berg 已提交
378 379

	if (num_tbs >= IWL_NUM_OF_TBS) {
380
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
381 382 383 384 385 386
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
387
		dma_unmap_single(trans->dev,
388 389
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
390
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
391 392 393

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
394
		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
395 396
				 iwl_pcie_tfd_tb_get_len(tfd, i),
				 DMA_TO_DEVICE);
397 398

	tfd->num_tbs = 0;
399 400
}

401 402
/*
 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
403
 * @trans - transport private data
404
 * @txq - tx queue
405
 * @dma_dir - the direction of the DMA mapping
406 407 408 409
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
410
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
411 412 413
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

414 415 416 417
	/* rd_ptr is bounded by n_bd and idx is bounded by n_window */
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

418 419
	lockdep_assert_held(&txq->lock);

420
	/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
421
	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
J
Johannes Berg 已提交
422 423

	/* free SKB */
424
	if (txq->entries) {
J
Johannes Berg 已提交
425 426
		struct sk_buff *skb;

427
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
428

429 430 431 432
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
433
		if (skb) {
434
			iwl_op_mode_free_skb(trans->op_mode, skb);
435
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
436 437 438 439
		}
	}
}

440 441
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
				  dma_addr_t addr, u16 len, u8 reset)
J
Johannes Berg 已提交
442 443 444 445 446 447
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
448
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
449 450
	tfd = &tfd_tmp[q->write_ptr];

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
			IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
		IWL_ERR(trans, "Unaligned address = %llx\n",
			(unsigned long long)addr);

	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
			       struct iwl_txq *txq, int slots_num,
			       u32 txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
	int i;

	if (WARN_ON(txq->entries || txq->tfds))
		return -EINVAL;

	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
		    (unsigned long)txq);
	txq->trans_pcie = trans_pcie;

	txq->q.n_window = slots_num;

	txq->entries = kcalloc(slots_num,
			       sizeof(struct iwl_pcie_txq_entry),
			       GFP_KERNEL);

	if (!txq->entries)
		goto error;

	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->entries[i].cmd =
				kmalloc(sizeof(struct iwl_device_cmd),
					GFP_KERNEL);
			if (!txq->entries[i].cmd)
				goto error;
		}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
	if (!txq->tfds) {
		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
		goto error;
	}
	txq->q.id = txq_id;

	return 0;
error:
	if (txq->entries && txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++)
			kfree(txq->entries[i].cmd);
	kfree(txq->entries);
	txq->entries = NULL;

	return -ENOMEM;

}

static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
			      int slots_num, u32 txq_id)
{
	int ret;

	txq->need_update = 0;

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
			txq_id);
	if (ret)
		return ret;

	spin_lock_init(&txq->lock);

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
			   txq->q.dma_addr >> 8);

	return 0;
}

/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;

	if (!q->n_bd)
		return;

	spin_lock_bh(&txq->lock);
	while (q->write_ptr != q->read_ptr) {
573
		iwl_pcie_txq_free_tfd(trans, txq);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
	spin_unlock_bh(&txq->lock);
}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct device *dev = trans->dev;
	int i;

	if (WARN_ON(!txq))
		return;

	iwl_pcie_txq_unmap(trans, txq_id);

	/* De-alloc array of command/tx buffers */
	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++) {
			kfree(txq->entries[i].cmd);
			kfree(txq->entries[i].copy_cmd);
			kfree(txq->entries[i].free_buf);
		}

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd) {
		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
611
		txq->q.dma_addr = 0;
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
	}

	kfree(txq->entries);
	txq->entries = NULL;

	del_timer_sync(&txq->stuck_timer);

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
 */
static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
	struct iwl_trans_pcie __maybe_unused *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write_prph(trans, SCD_TXFACT, mask);
}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
637
	int nq = trans->cfg->base_params->num_of_queues;
638 639
	int chan;
	u32 reg_val;
640 641
	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
642 643 644 645 646 647 648 649 650 651 652

	/* make sure all queue are not stopped/used */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	trans_pcie->scd_base_addr =
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

653 654 655 656
	/* reset context data, TX status and translation data */
	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
				   SCD_CONTEXT_MEM_LOWER_BOUND,
			    NULL, clear_dwords);
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687

	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
		       trans_pcie->scd_bc_tbls.dma >> 10);

	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);

	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
				trans_pcie->cmd_fifo);

	/* Activate all Tx DMA/FIFO channels */
	iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
			    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}

688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		struct iwl_txq *txq = &trans_pcie->txq[txq_id];

		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
				   txq->q.dma_addr >> 8);
		iwl_pcie_txq_unmap(trans, txq_id);
		txq->q.read_ptr = 0;
		txq->q.write_ptr = 0;
	}

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

	iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ch, txq_id, ret;
	unsigned long flags;

	/* Turn off all Tx DMA fifos */
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

	iwl_pcie_txq_set_sched(trans, 0);

	/* Stop each Tx DMA channel, and wait for it to be idle */
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
		iwl_write_direct32(trans,
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
		if (ret < 0)
			IWL_ERR(trans,
				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
				ch,
				iwl_read_direct32(trans,
						  FH_TSSR_TX_STATUS_REG));
	}
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	if (!trans_pcie->txq) {
		IWL_WARN(trans,
			 "Stopping tx queues that aren't allocated...\n");
		return 0;
	}

	/* Unmap DMA from host system and free skb's */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++)
		iwl_pcie_txq_unmap(trans, txq_id);

	return 0;
}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{
	int txq_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* Tx queues */
	if (trans_pcie->txq) {
		for (txq_id = 0;
		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
			iwl_pcie_txq_free(trans, txq_id);
	}

	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{
	int ret;
	int txq_id, slots_num;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
			sizeof(struct iwlagn_scd_bc_tbl);

	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
	if (WARN_ON(trans_pcie->txq)) {
		ret = -EINVAL;
		goto error;
	}

	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
				   scd_bc_tbls_size);
	if (ret) {
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
		goto error;
	}

	/* Alloc keep-warm buffer */
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
	if (ret) {
		IWL_ERR(trans, "Keep Warm allocation failed\n");
		goto error;
	}

	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
				  sizeof(struct iwl_txq), GFP_KERNEL);
	if (!trans_pcie->txq) {
		IWL_ERR(trans, "Not enough memory for txq\n");
		ret = ENOMEM;
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
			goto error;
		}
	}

	return 0;

error:
	iwl_pcie_tx_free(trans);

	return ret;
}
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;
	int txq_id, slots_num;
	unsigned long flags;
	bool alloc = false;

	if (!trans_pcie->txq) {
		ret = iwl_pcie_tx_alloc(trans);
		if (ret)
			goto error;
		alloc = true;
	}

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

	/* Turn off all Tx DMA fifos */
	iwl_write_prph(trans, SCD_TXFACT, 0);

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
		iwl_pcie_tx_free(trans);
	return ret;
}

static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
					   struct iwl_txq *txq)
{
	if (!trans_pcie->wd_timeout)
		return;

	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
}

/* Frees buffers until index _not_ inclusive */
905 906
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs)
907 908 909
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
910 911
	/* n_bd is usually 256 => n_bd - 1 = 0xff */
	int tfd_num = ssn & (txq->q.n_bd - 1);
912 913 914 915 916
	struct iwl_queue *q = &txq->q;
	int last_to_free;

	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
917
		return;
J
Johannes Berg 已提交
918

919
	spin_lock_bh(&txq->lock);
920 921 922 923 924 925

	if (txq->q.read_ptr == tfd_num)
		goto out;

	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
			   txq_id, txq->q.read_ptr, tfd_num, ssn);
J
Johannes Berg 已提交
926

927 928
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
929
	last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
930

931
	if (!iwl_queue_used(q, last_to_free)) {
932 933 934 935
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, last_to_free, q->n_bd,
			q->write_ptr, q->read_ptr);
936
		goto out;
J
Johannes Berg 已提交
937 938
	}

939
	if (WARN_ON(!skb_queue_empty(skbs)))
940
		goto out;
J
Johannes Berg 已提交
941

942
	for (;
943
	     q->read_ptr != tfd_num;
944
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
J
Johannes Berg 已提交
945

946 947
		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
			continue;
J
Johannes Berg 已提交
948

949
		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
J
Johannes Berg 已提交
950

951
		txq->entries[txq->q.read_ptr].skb = NULL;
952

953
		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
954

955
		iwl_pcie_txq_free_tfd(trans, txq);
956
	}
957

958 959
	iwl_pcie_txq_progress(trans_pcie, txq);

960 961 962
	if (iwl_queue_space(&txq->q) > txq->q.low_mark)
		iwl_wake_queue(trans, txq);
out:
963
	spin_unlock_bh(&txq->lock);
964 965
}

966 967 968 969 970 971 972 973
/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
974
{
975 976 977 978
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;
979

980
	lockdep_assert_held(&txq->lock);
981

982
	if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
983 984 985 986 987 988
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
			__func__, txq_id, idx, q->n_bd,
			q->write_ptr, q->read_ptr);
		return;
	}
989

990 991
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
992

993 994 995 996 997 998 999 1000
		if (nfreed++ > 0) {
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
			iwl_op_mode_nic_error(trans->op_mode);
		}
	}

	iwl_pcie_txq_progress(trans_pcie, txq);
1001 1002
}

1003
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1004
				 u16 txq_id)
1005
{
1006
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1007 1008 1009 1010 1011 1012
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

1013
	tbl_dw_addr = trans_pcie->scd_base_addr +
1014 1015
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

1016
	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1017 1018 1019 1020 1021 1022

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

1023
	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1024 1025 1026 1027

	return 0;
}

1028 1029
static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
					     u16 txq_id)
1030 1031 1032
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1033
	iwl_write_prph(trans,
1034 1035 1036 1037 1038
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

1039 1040
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
1041
{
1042
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1043

1044 1045
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1046 1047

	/* Stop this Tx queue before configuring it */
1048
	iwl_pcie_txq_set_inactive(trans, txq_id);
1049

1050 1051 1052 1053 1054 1055 1056
	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
	if (sta_id != IWL_INVALID_STATION) {
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);
1057

1058
		/* Map receiver-address / traffic-ID to this queue */
1059
		iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1060

1061 1062
		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1063 1064 1065 1066 1067 1068 1069
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1070
	}
1071 1072 1073

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1074 1075
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1076 1077 1078 1079

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1080 1081

	/* Set up Tx window size and frame limit for this queue */
1082
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1083
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1084
	iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1085 1086 1087 1088 1089
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1090 1091

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1092 1093 1094 1095 1096 1097 1098
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
1099 1100
}

1101
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1102
{
1103
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1104 1105 1106
	u32 stts_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
	static const u32 zero_val[4] = {};
1107

1108 1109 1110
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d not used", txq_id);
		return;
1111 1112
	}

1113
	iwl_pcie_txq_set_inactive(trans, txq_id);
1114

1115 1116
	iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
			    ARRAY_SIZE(zero_val));
1117

1118
	iwl_pcie_txq_unmap(trans, txq_id);
1119

1120
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1121 1122
}

1123 1124
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

1125
/*
1126
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1127 1128 1129 1130 1131 1132 1133
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
1134 1135
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
				 struct iwl_host_cmd *cmd)
1136
{
1137
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1138
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1139
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
1140 1141
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
1142
	void *dup_buf = NULL;
1143
	dma_addr_t phys_addr;
1144
	int idx;
1145
	u16 copy_size, cmd_size, dma_size;
1146 1147
	bool had_nocopy = false;
	int i;
1148
	u32 cmd_pos;
1149 1150
	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1151

1152 1153 1154 1155
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
1156
	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1157

1158
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1159 1160 1161
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

1162 1163
		if (!cmd->len[i])
			continue;
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175

		/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
		if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
			int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

1176 1177
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
			/*
			 * This is also a chunk that isn't copied
			 * to the static buffer so set had_nocopy.
			 */
			had_nocopy = true;

			/* only allowed once */
			if (WARN_ON(dup_buf)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}

1195
			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1196 1197 1198
					  GFP_ATOMIC);
			if (!dup_buf)
				return -ENOMEM;
1199 1200
		} else {
			/* NOCOPY must not be followed by normal! */
1201 1202 1203 1204
			if (WARN_ON(had_nocopy)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
1205
			copy_size += cmdlen[i];
1206 1207 1208
		}
		cmd_size += cmd->len[i];
	}
1209

1210 1211
	/*
	 * If any of the command structures end up being larger than
1212 1213 1214
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
1215
	 */
1216 1217
	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
		 "Command %s (%#x) is too large (%d bytes)\n",
1218
		 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1219 1220 1221
		idx = -EINVAL;
		goto free_dup_buf;
	}
1222

1223
	spin_lock_bh(&txq->lock);
1224

J
Johannes Berg 已提交
1225
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1226
		spin_unlock_bh(&txq->lock);
1227

1228
		IWL_ERR(trans, "No space in command queue\n");
1229
		iwl_op_mode_cmd_queue_full(trans->op_mode);
1230 1231
		idx = -ENOSPC;
		goto free_dup_buf;
1232 1233
	}

1234
	idx = get_cmd_index(q, q->write_ptr);
1235 1236
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
1237

1238
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
1239 1240
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
1241

1242
	/* set up the header */
1243

1244
	out_cmd->hdr.cmd = cmd->id;
1245
	out_cmd->hdr.flags = 0;
1246
	out_cmd->hdr.sequence =
1247
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1248
					 INDEX_TO_SEQ(q->write_ptr));
1249 1250

	/* and copy the data that needs to be copied */
1251
	cmd_pos = offsetof(struct iwl_device_cmd, payload);
1252
	copy_size = sizeof(out_cmd->hdr);
1253
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1254 1255 1256
		int copy = 0;

		if (!cmd->len)
1257
			continue;
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276

		/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
		if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
			copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;

			if (copy > cmd->len[i])
				copy = cmd->len[i];
		}

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
			copy = cmd->len[i];

		if (copy) {
			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
			cmd_pos += copy;
			copy_size += copy;
		}
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
	}

	WARN_ON_ONCE(txq->entries[idx].copy_cmd);

	/*
	 * since out_cmd will be the source address of the FH, it will write
	 * the retry count there. So when the user needs to receivce the HCMD
	 * that corresponds to the response in the response handler, it needs
	 * to set CMD_WANT_HCMD.
	 */
	if (cmd->flags & CMD_WANT_HCMD) {
		txq->entries[idx].copy_cmd =
			kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
		if (unlikely(!txq->entries[idx].copy_cmd)) {
			idx = -ENOMEM;
			goto out;
		}
1294
	}
1295

J
Johannes Berg 已提交
1296
	IWL_DEBUG_HC(trans,
1297
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1298
		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1299 1300
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1301

1302 1303 1304 1305 1306 1307 1308 1309
	/*
	 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
	 * still map at least that many bytes for the hardware to write back to.
	 * We have enough space, so that's not a problem.
	 */
	dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);

	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
1310
				   DMA_BIDIRECTIONAL);
1311
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
J
Johannes Berg 已提交
1312 1313 1314 1315
		idx = -ENOMEM;
		goto out;
	}

1316
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
1317
	dma_unmap_len_set(out_meta, len, dma_size);
1318

1319
	iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1320

1321
	/* map the remaining (adjusted) nocopy/dup fragments */
1322
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1323
		const void *data = cmddata[i];
1324

1325
		if (!cmdlen[i])
1326
			continue;
1327 1328
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
1329
			continue;
1330 1331 1332
		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
			data = dup_buf;
		phys_addr = dma_map_single(trans->dev, (void *)data,
1333
					   cmdlen[i], DMA_TO_DEVICE);
1334
		if (dma_mapping_error(trans->dev, phys_addr)) {
1335
			iwl_pcie_tfd_unmap(trans, out_meta,
1336
					   &txq->tfds[q->write_ptr]);
1337 1338 1339 1340
			idx = -ENOMEM;
			goto out;
		}

1341
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
1342
	}
R
Reinette Chatre 已提交
1343

1344
	out_meta->flags = cmd->flags;
1345 1346 1347
	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
		kfree(txq->entries[idx].free_buf);
	txq->entries[idx].free_buf = dup_buf;
J
Johannes Berg 已提交
1348 1349 1350

	txq->need_update = 1;

1351
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
R
Reinette Chatre 已提交
1352

1353 1354 1355 1356
	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

1357 1358
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1359
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1360

J
Johannes Berg 已提交
1361
 out:
1362
	spin_unlock_bh(&txq->lock);
1363 1364 1365
 free_dup_buf:
	if (idx < 0)
		kfree(dup_buf);
1366
	return idx;
1367 1368
}

1369 1370
/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1371
 * @rxb: Rx buffer to reclaim
1372 1373
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
1374 1375 1376 1377 1378
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
1379 1380
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
1381
{
Z
Zhu Yi 已提交
1382
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1383 1384 1385 1386
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
1387 1388
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
1389
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1390
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1391 1392 1393 1394

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
1395
	if (WARN(txq_id != trans_pcie->cmd_queue,
1396
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1397 1398 1399
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1400
		iwl_print_hex_error(trans, pkt, 32);
1401
		return;
1402
	}
1403

1404
	spin_lock_bh(&txq->lock);
1405

1406
	cmd_index = get_cmd_index(&txq->q, index);
1407 1408
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
1409

1410
	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
R
Reinette Chatre 已提交
1411

1412
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
1413
	if (meta->flags & CMD_WANT_SKB) {
1414
		struct page *p = rxb_steal_page(rxb);
1415 1416 1417

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1418
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1419 1420
		meta->source->handler_status = handler_status;
	}
1421

1422
	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1423

J
Johannes Berg 已提交
1424
	if (!(meta->flags & CMD_ASYNC)) {
D
Don Fry 已提交
1425
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1426 1427
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
1428
				 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1429
		}
D
Don Fry 已提交
1430
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1431
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1432
			       get_cmd_string(trans_pcie, cmd->hdr.cmd));
1433
		wake_up(&trans_pcie->wait_command_queue);
1434
	}
1435

Z
Zhu Yi 已提交
1436
	meta->flags = 0;
1437

1438
	spin_unlock_bh(&txq->lock);
1439
}
1440 1441 1442

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

1443 1444
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
				    struct iwl_host_cmd *cmd)
1445
{
J
Johannes Berg 已提交
1446
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1447 1448 1449 1450 1451 1452
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

1453
	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1454
	if (ret < 0) {
1455
		IWL_ERR(trans,
1456
			"Error sending %s: enqueue_hcmd failed: %d\n",
1457
			get_cmd_string(trans_pcie, cmd->id), ret);
1458 1459 1460 1461 1462
		return ret;
	}
	return 0;
}

1463 1464
static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
				   struct iwl_host_cmd *cmd)
1465
{
1466
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1467 1468 1469
	int cmd_idx;
	int ret;

1470
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1471
		       get_cmd_string(trans_pcie, cmd->id));
1472

1473
	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
D
Don Fry 已提交
1474
				     &trans_pcie->status))) {
1475
		IWL_ERR(trans, "Command %s: a command is already active!\n",
1476
			get_cmd_string(trans_pcie, cmd->id));
1477 1478 1479
		return -EIO;
	}

1480
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1481
		       get_cmd_string(trans_pcie, cmd->id));
1482

1483
	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1484 1485
	if (cmd_idx < 0) {
		ret = cmd_idx;
D
Don Fry 已提交
1486
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1487
		IWL_ERR(trans,
1488
			"Error sending %s: enqueue_hcmd failed: %d\n",
1489
			get_cmd_string(trans_pcie, cmd->id), ret);
1490 1491 1492
		return ret;
	}

1493
	ret = wait_event_timeout(trans_pcie->wait_command_queue,
1494 1495 1496
				 !test_bit(STATUS_HCMD_ACTIVE,
					   &trans_pcie->status),
				 HOST_COMPLETE_TIMEOUT);
1497
	if (!ret) {
D
Don Fry 已提交
1498
		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1499
			struct iwl_txq *txq =
1500
				&trans_pcie->txq[trans_pcie->cmd_queue];
1501 1502
			struct iwl_queue *q = &txq->q;

1503
			IWL_ERR(trans,
1504
				"Error sending %s: time out after %dms.\n",
1505
				get_cmd_string(trans_pcie, cmd->id),
1506 1507
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

1508
			IWL_ERR(trans,
1509 1510 1511
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

D
Don Fry 已提交
1512
			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
J
Johannes Berg 已提交
1513 1514
			IWL_DEBUG_INFO(trans,
				       "Clearing HCMD_ACTIVE for command %s\n",
1515
				       get_cmd_string(trans_pcie, cmd->id));
1516 1517 1518 1519 1520
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

1521 1522
	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1523
			get_cmd_string(trans_pcie, cmd->id));
1524 1525 1526 1527
		ret = -EIO;
		goto cancel;
	}

1528 1529 1530 1531 1532 1533
	if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

1534
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1535
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1536
			get_cmd_string(trans_pcie, cmd->id));
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1551 1552
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1553
	}
1554

1555 1556 1557
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1558 1559 1560 1561 1562
	}

	return ret;
}

1563
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1564
{
1565 1566
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1567 1568 1569
	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
		return -EIO;

1570 1571 1572
	if (test_bit(STATUS_RFKILL, &trans_pcie->status))
		return -ERFKILL;

1573
	if (cmd->flags & CMD_ASYNC)
1574
		return iwl_pcie_send_hcmd_async(trans, cmd);
1575

1576
	/* We still can fail on RFKILL that can be asserted while we wait */
1577
	return iwl_pcie_send_hcmd_sync(trans, cmd);
1578 1579
}

1580 1581
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id)
1582
{
1583
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	struct iwl_cmd_meta *out_meta;
	struct iwl_txq *txq;
	struct iwl_queue *q;
	dma_addr_t phys_addr = 0;
	dma_addr_t txcmd_phys;
	dma_addr_t scratch_phys;
	u16 len, firstlen, secondlen;
	u8 wait_write_ptr = 0;
	__le16 fc = hdr->frame_control;
	u8 hdr_len = ieee80211_hdrlen(fc);
	u16 __maybe_unused wifi_seq;

	txq = &trans_pcie->txq[txq_id];
	q = &txq->q;
1600

1601 1602 1603 1604
	if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
		WARN_ON_ONCE(1);
		return -EINVAL;
	}
1605

1606
	spin_lock(&txq->lock);
1607

1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
#ifdef CONFIG_IWLWIFI_DEBUG
	wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
	WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
		  ((wifi_seq & 0xff) != q->write_ptr),
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);
#endif

	/* Set up driver data for this TFD */
	txq->entries[q->write_ptr].skb = skb;
	txq->entries[q->write_ptr].cmd = dev_cmd;

	dev_cmd->hdr.cmd = REPLY_TX;
	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
			    INDEX_TO_SEQ(q->write_ptr)));

	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->entries[q->write_ptr].meta;
1632

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
	/*
	 * Use the first empty entry in this queue's command buffer array
	 * to contain the Tx command and MAC header concatenated together
	 * (payload data will be in another buffer).
	 * Size of this varies, due to varying MAC header length.
	 * If end is not dword aligned, we'll have 2 extra bytes at the end
	 * of the MAC header (device reads on dword boundaries).
	 * We'll tell device about this padding later.
	 */
	len = sizeof(struct iwl_tx_cmd) +
		sizeof(struct iwl_cmd_header) + hdr_len;
	firstlen = (len + 3) & ~3;

	/* Tell NIC about any 2-byte padding after MAC header */
	if (firstlen != len)
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

	/* Physical address of this Tx command's header (not MAC header!),
	 * within command buffer array. */
	txcmd_phys = dma_map_single(trans->dev,
				    &dev_cmd->hdr, firstlen,
				    DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
		goto out_err;
	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
	dma_unmap_len_set(out_meta, len, firstlen);

	if (!ieee80211_has_morefrags(fc)) {
		txq->need_update = 1;
	} else {
		wait_write_ptr = 1;
		txq->need_update = 0;
1665 1666
	}

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
	/* Set up TFD's 2nd entry to point directly to remainder of skb,
	 * if any (802.11 null frames have no payload). */
	secondlen = skb->len - hdr_len;
	if (secondlen > 0) {
		phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
					   secondlen, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
			dma_unmap_single(trans->dev,
					 dma_unmap_addr(out_meta, mapping),
					 dma_unmap_len(out_meta, len),
					 DMA_BIDIRECTIONAL);
			goto out_err;
		}
	}
1681

1682 1683 1684 1685
	/* Attach buffers to TFD */
	iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
	if (secondlen > 0)
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
1686

1687 1688
	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
				offsetof(struct iwl_tx_cmd, scratch);
1689

1690 1691 1692 1693 1694
	/* take back ownership of DMA buffer to enable update */
	dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
				DMA_BIDIRECTIONAL);
	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1695

1696 1697
	/* Set up entry for this TFD in Tx byte-count array */
	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1698

1699 1700
	dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
				   DMA_BIDIRECTIONAL);
1701

1702 1703 1704 1705 1706 1707 1708
	trace_iwlwifi_dev_tx(trans->dev, skb,
			     &txq->tfds[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
			     &dev_cmd->hdr, firstlen,
			     skb->data + hdr_len, secondlen);
	trace_iwlwifi_dev_tx_data(trans->dev, skb,
				  skb->data + hdr_len, secondlen);
1709

1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	/* start timer if queue currently empty */
	if (txq->need_update && q->read_ptr == q->write_ptr &&
	    trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

	/* Tell device the write index *just past* this latest filled TFD */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_pcie_txq_inc_wr_ptr(trans, txq);

	/*
	 * At this point the frame is "transmitted" successfully
	 * and we will get a TX status notification eventually,
	 * regardless of the value of ret. "ret" only indicates
	 * whether or not we should update the write pointer.
	 */
	if (iwl_queue_space(q) < q->high_mark) {
		if (wait_write_ptr) {
			txq->need_update = 1;
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
		} else {
			iwl_stop_queue(trans, txq);
		}
	}
	spin_unlock(&txq->lock);
	return 0;
out_err:
	spin_unlock(&txq->lock);
	return -1;
1738
}