tx.c 60.0 KB
Newer Older
1
/******************************************************************************
2 3 4 5 6
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
7
 *
8
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
 * Copyright(c) 2018 - 2020 Intel Corporation
12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * The full GNU General Public License is included in this distribution in the
23
 * file called COPYING.
24 25
 *
 * Contact Information:
26
 *  Intel Linux Wireless <linuxwifi@intel.com>
27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
29 30 31 32 33
 * BSD LICENSE
 *
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
 * Copyright(c) 2018 - 2020 Intel Corporation
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
63
 *****************************************************************************/
64
#include <linux/etherdevice.h>
65
#include <linux/ieee80211.h>
66
#include <linux/slab.h>
67
#include <linux/sched.h>
68 69
#include <net/ip6_checksum.h>
#include <net/tso.h>
70

71 72 73
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
74
#include "iwl-io.h"
75
#include "iwl-scd.h"
76
#include "iwl-op-mode.h"
77
#include "internal.h"
J
Johannes Berg 已提交
78
#include "fw/api/tx.h"
79

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/
101

102

103 104
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
			   struct iwl_dma_ptr *ptr, size_t size)
105 106 107 108 109 110 111 112 113 114 115 116
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

	ptr->addr = dma_alloc_coherent(trans->dev, size,
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

117
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
118 119 120 121 122 123 124 125
{
	if (unlikely(!ptr->addr))
		return;

	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
	memset(ptr, 0, sizeof(*ptr));
}

126 127
/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
128
 */
129 130
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_txq *txq)
131 132
{
	u32 reg = 0;
133
	int txq_id = txq->id;
134

135
	lockdep_assert_held(&txq->lock);
136

137 138 139 140 141 142
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. NIC is woken up for CMD regardless of shadow outside this function
	 * 3. there is a chance that the NIC is asleep
	 */
143
	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
144
	    txq_id != trans->txqs.cmd.q_id &&
145
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
W
Wey-Yi Guy 已提交
146
		/*
147 148 149
		 * wake up nic if it's powered down ...
		 * uCode will wake up, and interrupt us again, so next
		 * time we'll skip this part.
W
Wey-Yi Guy 已提交
150
		 */
151 152 153 154 155 156
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
				       txq_id, reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
157
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
158
			txq->need_update = true;
159 160
			return;
		}
W
Wey-Yi Guy 已提交
161
	}
162 163 164 165 166

	/*
	 * if not in power-save mode, uCode will never sleep when we're
	 * trying to tx (during RFKILL, we're not trying to tx).
	 */
167
	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
168 169
	if (!txq->block)
		iwl_write32(trans, HBUS_TARG_WRPTR,
170
			    txq->write_ptr | (txq_id << 8));
171
}
172

173 174 175 176
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
	int i;

177
	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
178
		struct iwl_txq *txq = trans->txqs.txq[i];
179

180
		if (!test_bit(i, trans->txqs.queue_used))
181 182
			continue;

183
		spin_lock_bh(&txq->lock);
184
		if (txq->need_update) {
185
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
186
			txq->need_update = false;
187
		}
188
		spin_unlock_bh(&txq->lock);
189
	}
190 191
}

192 193
static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
				       u8 idx, dma_addr_t addr, u16 len)
J
Johannes Berg 已提交
194
{
195 196
	struct iwl_tfd *tfd_fh = (void *)tfd;
	struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
197

198
	u16 hi_n_len = len << 4;
J
Johannes Berg 已提交
199

200 201
	put_unaligned_le32(addr, &tb->lo);
	hi_n_len |= iwl_get_dma_hi_addr(addr);
J
Johannes Berg 已提交
202

203
	tb->hi_n_len = cpu_to_le16(hi_n_len);
204

205
	tfd_fh->num_tbs = idx + 1;
J
Johannes Berg 已提交
206 207
}

208 209
/*
 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
210
 * @trans - transport private data
211
 * @txq - tx queue
212
 * @dma_dir - the direction of the DMA mapping
213 214 215 216
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
217
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
218
{
219 220 221
	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
	 * idx is bounded by n_window
	 */
222
	int rd_ptr = txq->read_ptr;
223
	int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
224

225 226
	lockdep_assert_held(&txq->lock);

227 228 229
	/* We have only q->n_window txq->entries, but we use
	 * TFD_QUEUE_SIZE_MAX tfds
	 */
230
	iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
J
Johannes Berg 已提交
231 232

	/* free SKB */
233
	if (txq->entries) {
J
Johannes Berg 已提交
234 235
		struct sk_buff *skb;

236
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
237

238 239 240 241
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
242
		if (skb) {
243
			iwl_op_mode_free_skb(trans->op_mode, skb);
244
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
245 246 247 248
		}
	}
}

249
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
250
				  dma_addr_t addr, u16 len, bool reset)
J
Johannes Berg 已提交
251
{
252
	void *tfd;
J
Johannes Berg 已提交
253 254
	u32 num_tbs;

255
	tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
J
Johannes Berg 已提交
256

257
	if (reset)
258
		memset(tfd, 0, trans->txqs.tfd.size);
259

260
	num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
261

262
	/* Each TFD can point to a maximum max_tbs Tx buffers */
263
	if (num_tbs >= trans->txqs.tfd.max_tbs) {
264
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
265
			trans->txqs.tfd.max_tbs);
266 267 268
		return -EINVAL;
	}

269 270
	if (WARN(addr & ~IWL_TX_DMA_MASK,
		 "Unaligned address = %llx\n", (unsigned long long)addr))
271 272
		return -EINVAL;

273
	iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
274

J
Johannes Berg 已提交
275
	return num_tbs;
276 277
}

278 279 280 281 282 283
static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->reg_lock);

284
	if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
285 286 287 288 289 290
		return;
	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
		return;

	trans_pcie->cmd_hold_nic_awake = false;
	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
291
				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
292 293
}

294 295 296 297 298 299
/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
300
	struct iwl_txq *txq = trans->txqs.txq[txq_id];
301 302

	spin_lock_bh(&txq->lock);
303
	while (txq->write_ptr != txq->read_ptr) {
304
		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
305
				   txq_id, txq->read_ptr);
306

307
		if (txq_id != trans->txqs.cmd.q_id) {
308
			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
309 310 311 312

			if (WARN_ON_ONCE(!skb))
				continue;

313
			iwl_txq_free_tso_page(trans, skb);
314
		}
315
		iwl_pcie_txq_free_tfd(trans, txq);
316
		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
317

318
		if (txq->read_ptr == txq->write_ptr) {
319 320 321
			unsigned long flags;

			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
322
			if (txq_id == trans->txqs.cmd.q_id)
323 324 325
				iwl_pcie_clear_cmd_in_flight(trans);
			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
		}
326
	}
327 328 329 330 331 332 333

	while (!skb_queue_empty(&txq->overflow_q)) {
		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);

		iwl_op_mode_free_skb(trans->op_mode, skb);
	}

334
	spin_unlock_bh(&txq->lock);
335 336 337

	/* just in case - this queue may have been stopped */
	iwl_wake_queue(trans, txq);
338 339 340 341 342 343 344 345 346 347 348 349
}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
350
	struct iwl_txq *txq = trans->txqs.txq[txq_id];
351 352 353 354 355 356 357 358 359
	struct device *dev = trans->dev;
	int i;

	if (WARN_ON(!txq))
		return;

	iwl_pcie_txq_unmap(trans, txq_id);

	/* De-alloc array of command/tx buffers */
360
	if (txq_id == trans->txqs.cmd.q_id)
361
		for (i = 0; i < txq->n_window; i++) {
362 363
			kfree_sensitive(txq->entries[i].cmd);
			kfree_sensitive(txq->entries[i].free_buf);
364 365 366
		}

	/* De-alloc circular buffer of TFDs */
367 368
	if (txq->tfds) {
		dma_free_coherent(dev,
369
				  trans->txqs.tfd.size *
370
				  trans->trans_cfg->base_params->max_tfd_queue_size,
371 372
				  txq->tfds, txq->dma_addr);
		txq->dma_addr = 0;
373
		txq->tfds = NULL;
374 375

		dma_free_coherent(dev,
376
				  sizeof(*txq->first_tb_bufs) * txq->n_window,
377
				  txq->first_tb_bufs, txq->first_tb_dma);
378 379 380 381 382 383 384 385 386 387 388 389 390 391
	}

	kfree(txq->entries);
	txq->entries = NULL;

	del_timer_sync(&txq->stuck_timer);

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392
	int nq = trans->trans_cfg->base_params->num_of_queues;
393 394
	int chan;
	u32 reg_val;
395 396
	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
397 398

	/* make sure all queue are not stopped/used */
399 400 401
	memset(trans->txqs.queue_stopped, 0,
	       sizeof(trans->txqs.queue_stopped));
	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
402 403 404 405 406 407 408

	trans_pcie->scd_base_addr =
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

409 410 411 412
	/* reset context data, TX status and translation data */
	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
				   SCD_CONTEXT_MEM_LOWER_BOUND,
			    NULL, clear_dwords);
413 414

	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
415
		       trans->txqs.scd_bc_tbls.dma >> 10);
416 417 418 419

	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
420
	if (trans->trans_cfg->base_params->scd_chain_ext_wa)
421
		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
422

423 424 425
	iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
				trans->txqs.cmd.fifo,
				trans->txqs.cmd.wdg_timeout);
426 427

	/* Activate all Tx DMA/FIFO channels */
428
	iwl_scd_activate_fifos(trans);
429 430 431 432 433 434 435 436 437 438 439 440 441

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
442
	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
443 444
		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
445 446
}

447 448 449 450 451
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

452 453 454 455
	/*
	 * we should never get here in gen2 trans mode return early to avoid
	 * having invalid accesses
	 */
456
	if (WARN_ON_ONCE(trans->trans_cfg->gen2))
457 458
		return;

459
	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
460
	     txq_id++) {
461
		struct iwl_txq *txq = trans->txqs.txq[txq_id];
462
		if (trans->trans_cfg->use_tfh)
463 464
			iwl_write_direct64(trans,
					   FH_MEM_CBBC_QUEUE(trans, txq_id),
465
					   txq->dma_addr);
466 467 468
		else
			iwl_write_direct32(trans,
					   FH_MEM_CBBC_QUEUE(trans, txq_id),
469
					   txq->dma_addr >> 8);
470
		iwl_pcie_txq_unmap(trans, txq_id);
471 472
		txq->read_ptr = 0;
		txq->write_ptr = 0;
473 474 475 476 477 478
	}

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

479 480 481 482 483 484
	/*
	 * Send 0 as the scd_base_addr since the device may have be reset
	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
	 * contain garbage.
	 */
	iwl_pcie_tx_start(trans, 0);
485 486
}

487 488 489 490 491 492 493 494 495
static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;
	int ch, ret;
	u32 mask = 0;

	spin_lock(&trans_pcie->irq_lock);

496
	if (!iwl_trans_grab_nic_access(trans, &flags))
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
		goto out;

	/* Stop each Tx DMA channel */
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
	}

	/* Wait for DMA channels to be idle */
	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
	if (ret < 0)
		IWL_ERR(trans,
			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));

	iwl_trans_release_nic_access(trans, &flags);

out:
	spin_unlock(&trans_pcie->irq_lock);
}

518 519 520 521 522 523
/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
524
	int txq_id;
525 526

	/* Turn off all Tx DMA fifos */
527
	iwl_scd_deactivate_fifos(trans);
528

529 530
	/* Turn off all Tx DMA channels */
	iwl_pcie_tx_stop_fh(trans);
531

532 533 534 535 536
	/*
	 * This function can be called before the op_mode disabled the
	 * queues. This happens when we have an rfkill interrupt.
	 * Since we stop Tx altogether - mark the queues as stopped.
	 */
537 538 539
	memset(trans->txqs.queue_stopped, 0,
	       sizeof(trans->txqs.queue_stopped));
	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
540 541

	/* This can happen: start_hw, stop_device */
542
	if (!trans_pcie->txq_memory)
543 544 545
		return 0;

	/* Unmap DMA from host system and free skb's */
546
	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	     txq_id++)
		iwl_pcie_txq_unmap(trans, txq_id);

	return 0;
}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{
	int txq_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

563
	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
564

565
	/* Tx queues */
566
	if (trans_pcie->txq_memory) {
567
		for (txq_id = 0;
568
		     txq_id < trans->trans_cfg->base_params->num_of_queues;
569
		     txq_id++) {
570
			iwl_pcie_txq_free(trans, txq_id);
571
			trans->txqs.txq[txq_id] = NULL;
572
		}
573 574
	}

575 576
	kfree(trans_pcie->txq_memory);
	trans_pcie->txq_memory = NULL;
577 578 579

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);

580
	iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
581 582 583 584 585 586 587 588 589 590 591
}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{
	int ret;
	int txq_id, slots_num;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
592
	u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
593

594 595 596 597
	if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
		return -EINVAL;

	bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
598 599 600

	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
601
	if (WARN_ON(trans_pcie->txq_memory)) {
602 603 604 605
		ret = -EINVAL;
		goto error;
	}

606
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
607
				     bc_tbls_size);
608 609 610 611 612 613 614 615 616 617 618 619
	if (ret) {
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
		goto error;
	}

	/* Alloc keep-warm buffer */
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
	if (ret) {
		IWL_ERR(trans, "Keep Warm allocation failed\n");
		goto error;
	}

620
	trans_pcie->txq_memory =
621
		kcalloc(trans->trans_cfg->base_params->num_of_queues,
622
			sizeof(struct iwl_txq), GFP_KERNEL);
623
	if (!trans_pcie->txq_memory) {
624
		IWL_ERR(trans, "Not enough memory for txq\n");
625
		ret = -ENOMEM;
626 627 628 629
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
630
	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
631
	     txq_id++) {
632
		bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
633

634
		if (cmd_queue)
635
			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
636 637
					  trans->cfg->min_txq_size);
		else
638
			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
639
					  trans->cfg->min_256_ba_txq_size);
640
		trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
641 642
		ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
				    cmd_queue);
643 644 645 646
		if (ret) {
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
			goto error;
		}
647
		trans->txqs.txq[txq_id]->id = txq_id;
648 649 650 651 652 653 654 655 656
	}

	return 0;

error:
	iwl_pcie_tx_free(trans);

	return ret;
}
657

658 659 660 661 662 663 664
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;
	int txq_id, slots_num;
	bool alloc = false;

665
	if (!trans_pcie->txq_memory) {
666 667 668 669 670 671
		ret = iwl_pcie_tx_alloc(trans);
		if (ret)
			goto error;
		alloc = true;
	}

672
	spin_lock(&trans_pcie->irq_lock);
673 674

	/* Turn off all Tx DMA fifos */
675
	iwl_scd_deactivate_fifos(trans);
676 677 678 679 680

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

681
	spin_unlock(&trans_pcie->irq_lock);
682 683

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
684
	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
685
	     txq_id++) {
686
		bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
687

688
		if (cmd_queue)
689
			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
690 691
					  trans->cfg->min_txq_size);
		else
692
			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
693
					  trans->cfg->min_256_ba_txq_size);
694 695
		ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
				   cmd_queue);
696 697 698 699 700
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
		}

701 702 703 704 705 706 707
		/*
		 * Tell nic where to find circular buffer of TFDs for a
		 * given Tx queue, and enable the DMA channel used for that
		 * queue.
		 * Circular buffer (TFD queue in DRAM) physical base address
		 */
		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
708
				   trans->txqs.txq[txq_id]->dma_addr >> 8);
709
	}
710

711
	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
712
	if (trans->trans_cfg->base_params->num_of_queues > 20)
713 714 715
		iwl_set_bits_prph(trans, SCD_GP_CTRL,
				  SCD_GP_CTRL_ENABLE_31_QUEUES);

716 717 718 719 720 721 722 723
	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
		iwl_pcie_tx_free(trans);
	return ret;
}

724
static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
725
{
726 727
	lockdep_assert_held(&txq->lock);

728
	if (!txq->wd_timeout)
729 730
		return;

731 732 733 734 735 736 737
	/*
	 * station is asleep and we send data - that must
	 * be uAPSD or PS-Poll. Don't rearm the timer.
	 */
	if (txq->frozen)
		return;

738 739 740 741
	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
742
	if (txq->read_ptr == txq->write_ptr)
743 744
		del_timer(&txq->stuck_timer);
	else
745
		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
746 747 748
}

/* Frees buffers until index _not_ inclusive */
749 750
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs)
751
{
752
	struct iwl_txq *txq = trans->txqs.txq[txq_id];
753 754
	int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
	int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
755 756 757
	int last_to_free;

	/* This function is not meant to release cmd queue*/
758
	if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
759
		return;
J
Johannes Berg 已提交
760

761
	spin_lock_bh(&txq->lock);
762

763
	if (!test_bit(txq_id, trans->txqs.queue_used)) {
764 765 766 767 768
		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
				    txq_id, ssn);
		goto out;
	}

769
	if (read_ptr == tfd_num)
770 771 772
		goto out;

	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
773
			   txq_id, txq->read_ptr, tfd_num, ssn);
J
Johannes Berg 已提交
774

775 776
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
777
	last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
778

779
	if (!iwl_txq_used(txq, last_to_free)) {
780
		IWL_ERR(trans,
781
			"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
782
			__func__, txq_id, last_to_free,
783
			trans->trans_cfg->base_params->max_tfd_queue_size,
784
			txq->write_ptr, txq->read_ptr);
785
		goto out;
J
Johannes Berg 已提交
786 787
	}

788
	if (WARN_ON(!skb_queue_empty(skbs)))
789
		goto out;
J
Johannes Berg 已提交
790

791
	for (;
792
	     read_ptr != tfd_num;
793 794
	     txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
	     read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
795
		struct sk_buff *skb = txq->entries[read_ptr].skb;
J
Johannes Berg 已提交
796

797
		if (WARN_ON_ONCE(!skb))
798
			continue;
J
Johannes Berg 已提交
799

800
		iwl_txq_free_tso_page(trans, skb);
801 802

		__skb_queue_tail(skbs, skb);
J
Johannes Berg 已提交
803

804
		txq->entries[read_ptr].skb = NULL;
805

806
		if (!trans->trans_cfg->use_tfh)
807
			iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
808

809
		iwl_pcie_txq_free_tfd(trans, txq);
810
	}
811

812
	iwl_pcie_txq_progress(txq);
813

814
	if (iwl_txq_space(trans, txq) > txq->low_mark &&
815
	    test_bit(txq_id, trans->txqs.queue_stopped)) {
816
		struct sk_buff_head overflow_skbs;
817

818 819
		__skb_queue_head_init(&overflow_skbs);
		skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
820

821 822 823 824 825 826 827 828 829
		/*
		 * We are going to transmit from the overflow queue.
		 * Remember this state so that wait_for_txq_empty will know we
		 * are adding more packets to the TFD queue. It cannot rely on
		 * the state of &txq->overflow_q, as we just emptied it, but
		 * haven't TXed the content yet.
		 */
		txq->overflow_tx = true;

830 831 832 833 834 835 836 837 838
		/*
		 * This is tricky: we are in reclaim path which is non
		 * re-entrant, so noone will try to take the access the
		 * txq data from that path. We stopped tx, so we can't
		 * have tx as well. Bottom line, we can unlock and re-lock
		 * later.
		 */
		spin_unlock_bh(&txq->lock);

839 840
		while (!skb_queue_empty(&overflow_skbs)) {
			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
841
			struct iwl_device_tx_cmd *dev_cmd_ptr;
842 843

			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
844
						 trans->txqs.dev_cmd_offs);
845 846 847

			/*
			 * Note that we can very well be overflowing again.
848
			 * In that case, iwl_txq_space will be small again
849 850
			 * and we won't wake mac80211's queue.
			 */
851
			iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
852 853
		}

854
		if (iwl_txq_space(trans, txq) > txq->low_mark)
855
			iwl_wake_queue(trans, txq);
856 857

		spin_lock_bh(&txq->lock);
858
		txq->overflow_tx = false;
859
	}
860

861
out:
862
	spin_unlock_bh(&txq->lock);
863 864
}

865 866 867
/* Set wr_ptr of specific device and txq  */
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
{
868
	struct iwl_txq *txq = trans->txqs.txq[txq_id];
869 870 871 872 873 874 875 876 877

	spin_lock_bh(&txq->lock);

	txq->write_ptr = ptr;
	txq->read_ptr = txq->write_ptr;

	spin_unlock_bh(&txq->lock);
}

878 879
static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
				      const struct iwl_host_cmd *cmd)
880 881 882 883 884 885
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;

	lockdep_assert_held(&trans_pcie->reg_lock);

886
	/* Make sure the NIC is still alive in the bus */
887 888
	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
		return -ENODEV;
889

890 891 892 893 894 895
	/*
	 * wake up the NIC to make sure that the firmware will see the host
	 * command - we will let the NIC sleep once all the host commands
	 * returned. This needs to be done only on NICs that have
	 * apmg_wake_up_wa set.
	 */
896
	if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
897
	    !trans_pcie->cmd_hold_nic_awake) {
898
		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
899
					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
900 901

		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
902 903
				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
904 905 906 907
				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
				   15000);
		if (ret < 0) {
			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
908
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
909 910 911
			IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
			return -EIO;
		}
912
		trans_pcie->cmd_hold_nic_awake = true;
913 914 915 916 917
	}

	return 0;
}

918 919 920 921 922 923 924
/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
925
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
926
{
927
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
928
	struct iwl_txq *txq = trans->txqs.txq[txq_id];
929
	unsigned long flags;
930
	int nfreed = 0;
931
	u16 r;
932

933
	lockdep_assert_held(&txq->lock);
934

935 936
	idx = iwl_txq_get_cmd_index(txq, idx);
	r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
937

938
	if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
939
	    (!iwl_txq_used(txq, idx))) {
940
		WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
S
Sara Sharon 已提交
941 942
			  "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, idx,
943
			  trans->trans_cfg->base_params->max_tfd_queue_size,
S
Sara Sharon 已提交
944
			  txq->write_ptr, txq->read_ptr);
945 946
		return;
	}
947

948 949 950
	for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
	     r = iwl_txq_inc_wrap(trans, r)) {
		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
951

952 953
		if (nfreed++ > 0) {
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
954
				idx, txq->write_ptr, r);
L
Liad Kaufman 已提交
955
			iwl_force_nmi(trans);
956 957 958
		}
	}

959
	if (txq->read_ptr == txq->write_ptr) {
960
		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
961
		iwl_pcie_clear_cmd_in_flight(trans);
962 963 964
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
	}

965
	iwl_pcie_txq_progress(txq);
966 967
}

968
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
969
				 u16 txq_id)
970
{
971
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
972 973 974 975 976 977
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

978
	tbl_dw_addr = trans_pcie->scd_base_addr +
979 980
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

981
	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
982 983 984 985 986 987

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

988
	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
989 990 991 992

	return 0;
}

993 994 995 996
/* Receiver address (actually, Rx station's index into station table),
 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))

997
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
998 999
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout)
1000
{
1001
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1002
	struct iwl_txq *txq = trans->txqs.txq[txq_id];
1003
	int fifo = -1;
1004
	bool scd_bug = false;
1005

1006
	if (test_and_set_bit(txq_id, trans->txqs.queue_used))
1007
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1008

1009 1010
	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);

1011 1012
	if (cfg) {
		fifo = cfg->fifo;
1013

1014
		/* Disable the scheduler prior configuring the cmd queue */
1015
		if (txq_id == trans->txqs.cmd.q_id &&
1016
		    trans_pcie->scd_set_active)
1017 1018
			iwl_scd_enable_set_active(trans, 0);

1019 1020
		/* Stop this Tx queue before configuring it */
		iwl_scd_txq_set_inactive(trans, txq_id);
1021

1022
		/* Set this queue as a chain-building queue unless it is CMD */
1023
		if (txq_id != trans->txqs.cmd.q_id)
1024
			iwl_scd_txq_set_chain(trans, txq_id);
1025

1026
		if (cfg->aggregate) {
1027
			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1028

1029 1030
			/* Map receiver-address / traffic-ID to this queue */
			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1031

1032 1033
			/* enable aggregations for the queue */
			iwl_scd_txq_enable_agg(trans, txq_id);
1034
			txq->ampdu = true;
1035 1036 1037 1038 1039 1040 1041 1042
		} else {
			/*
			 * disable aggregations for the queue, this will also
			 * make the ra_tid mapping configuration irrelevant
			 * since it is now a non-AGG queue.
			 */
			iwl_scd_txq_disable_agg(trans, txq_id);

1043
			ssn = txq->read_ptr;
1044
		}
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	} else {
		/*
		 * If we need to move the SCD write pointer by steps of
		 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
		 * the op_mode know by returning true later.
		 * Do this only in case cfg is NULL since this trick can
		 * be done only if we have DQA enabled which is true for mvm
		 * only. And mvm never sets a cfg pointer.
		 * This is really ugly, but this is the easiest way out for
		 * this sad hardware issue.
		 * This bug has been fixed on devices 9000 and up.
		 */
1057
		scd_bug = !trans->trans_cfg->mq_rx_supported &&
1058 1059 1060 1061
			!((ssn - txq->write_ptr) & 0x3f) &&
			(ssn != txq->write_ptr);
		if (scd_bug)
			ssn++;
1062
	}
1063 1064 1065

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1066 1067
	txq->read_ptr = (ssn & 0xff);
	txq->write_ptr = (ssn & 0xff);
1068 1069
	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
1070

1071 1072
	if (cfg) {
		u8 frame_limit = cfg->frame_limit;
1073

1074 1075 1076 1077 1078 1079 1080
		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);

		/* Set up Tx window size and frame limit for this queue */
		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
		iwl_trans_write_mem32(trans,
			trans_pcie->scd_base_addr +
1081
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1082 1083
			SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
			SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
1084 1085 1086 1087 1088 1089 1090

		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			       SCD_QUEUE_STTS_REG_MSK);
1091 1092

		/* enable the scheduler for this queue (only) */
1093
		if (txq_id == trans->txqs.cmd.q_id &&
1094
		    trans_pcie->scd_set_active)
1095
			iwl_scd_enable_set_active(trans, BIT(txq_id));
1096 1097 1098 1099 1100 1101 1102 1103

		IWL_DEBUG_TX_QUEUES(trans,
				    "Activate queue %d on FIFO %d WrPtr: %d\n",
				    txq_id, fifo, ssn & 0xff);
	} else {
		IWL_DEBUG_TX_QUEUES(trans,
				    "Activate queue %d WrPtr: %d\n",
				    txq_id, ssn & 0xff);
1104
	}
1105 1106

	return scd_bug;
1107 1108
}

1109 1110 1111
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
					bool shared_mode)
{
1112
	struct iwl_txq *txq = trans->txqs.txq[txq_id];
1113 1114 1115 1116

	txq->ampdu = !shared_mode;
}

1117 1118
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
				bool configure_scd)
1119
{
1120
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1121 1122 1123
	u32 stts_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
	static const u32 zero_val[4] = {};
1124

1125 1126
	trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
	trans->txqs.txq[txq_id]->frozen = false;
1127

1128 1129 1130 1131 1132 1133
	/*
	 * Upon HW Rfkill - we stop the device, and then stop the queues
	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
	 * allow the op_mode to call txq_disable after it already called
	 * stop_device.
	 */
1134
	if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
1135 1136
		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
			  "queue %d not used", txq_id);
1137
		return;
1138 1139
	}

1140 1141
	if (configure_scd) {
		iwl_scd_txq_set_inactive(trans, txq_id);
1142

1143 1144 1145
		iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
				    ARRAY_SIZE(zero_val));
	}
1146

1147
	iwl_pcie_txq_unmap(trans, txq_id);
1148
	trans->txqs.txq[txq_id]->ampdu = false;
1149

1150
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1151 1152
}

1153 1154
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

1155
/*
1156
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1157
 * @priv: device private data point
1158
 * @cmd: a pointer to the ucode command structure
1159
 *
1160 1161
 * The function returns < 0 values to indicate the operation
 * failed. On success, it returns the index (>= 0) of command in the
1162 1163
 * command queue.
 */
1164 1165
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
				 struct iwl_host_cmd *cmd)
1166
{
1167
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1168
	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
J
Johannes Berg 已提交
1169 1170
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
1171
	unsigned long flags;
1172
	void *dup_buf = NULL;
1173
	dma_addr_t phys_addr;
1174
	int idx;
1175
	u16 copy_size, cmd_size, tb0_size;
1176
	bool had_nocopy = false;
1177
	u8 group_id = iwl_cmd_groupid(cmd->id);
1178
	int i, ret;
1179
	u32 cmd_pos;
1180 1181
	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1182

1183 1184 1185 1186 1187 1188 1189
	if (group_id != 0) {
		copy_size = sizeof(struct iwl_cmd_header_wide);
		cmd_size = sizeof(struct iwl_cmd_header_wide);
	} else {
		copy_size = sizeof(struct iwl_cmd_header);
		cmd_size = sizeof(struct iwl_cmd_header);
	}
1190 1191

	/* need one for the header if the first is NOCOPY */
1192
	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1193

1194
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1195 1196 1197
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

1198 1199
		if (!cmd->len[i])
			continue;
1200

1201 1202 1203
		/* need at least IWL_FIRST_TB_SIZE copied */
		if (copy_size < IWL_FIRST_TB_SIZE) {
			int copy = IWL_FIRST_TB_SIZE - copy_size;
1204 1205 1206 1207 1208 1209 1210 1211

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

1212 1213
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
			/*
			 * This is also a chunk that isn't copied
			 * to the static buffer so set had_nocopy.
			 */
			had_nocopy = true;

			/* only allowed once */
			if (WARN_ON(dup_buf)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}

1231
			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1232 1233 1234
					  GFP_ATOMIC);
			if (!dup_buf)
				return -ENOMEM;
1235 1236
		} else {
			/* NOCOPY must not be followed by normal! */
1237 1238 1239 1240
			if (WARN_ON(had_nocopy)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
1241
			copy_size += cmdlen[i];
1242 1243 1244
		}
		cmd_size += cmd->len[i];
	}
1245

1246 1247
	/*
	 * If any of the command structures end up being larger than
1248 1249 1250
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
1251
	 */
1252 1253
	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
		 "Command %s (%#x) is too large (%d bytes)\n",
1254 1255
		 iwl_get_cmd_string(trans, cmd->id),
		 cmd->id, copy_size)) {
1256 1257 1258
		idx = -EINVAL;
		goto free_dup_buf;
	}
1259

1260
	spin_lock_bh(&txq->lock);
1261

1262
	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1263
		spin_unlock_bh(&txq->lock);
1264

1265
		IWL_ERR(trans, "No space in command queue\n");
1266
		iwl_op_mode_cmd_queue_full(trans->op_mode);
1267 1268
		idx = -ENOSPC;
		goto free_dup_buf;
1269 1270
	}

1271
	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1272 1273
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
1274

1275
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
1276 1277
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
1278

1279
	/* set up the header */
1280 1281 1282 1283 1284 1285 1286 1287 1288
	if (group_id != 0) {
		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
		out_cmd->hdr_wide.group_id = group_id;
		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
		out_cmd->hdr_wide.length =
			cpu_to_le16(cmd_size -
				    sizeof(struct iwl_cmd_header_wide));
		out_cmd->hdr_wide.reserved = 0;
		out_cmd->hdr_wide.sequence =
1289
			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1290
						 INDEX_TO_SEQ(txq->write_ptr));
1291 1292 1293 1294 1295 1296

		cmd_pos = sizeof(struct iwl_cmd_header_wide);
		copy_size = sizeof(struct iwl_cmd_header_wide);
	} else {
		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
		out_cmd->hdr.sequence =
1297
			cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1298
						 INDEX_TO_SEQ(txq->write_ptr));
1299 1300 1301 1302 1303
		out_cmd->hdr.group_id = 0;

		cmd_pos = sizeof(struct iwl_cmd_header);
		copy_size = sizeof(struct iwl_cmd_header);
	}
1304 1305

	/* and copy the data that needs to be copied */
1306
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1307
		int copy;
1308

1309
		if (!cmd->len[i])
1310
			continue;
1311 1312 1313

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1314
					   IWL_HCMD_DFL_DUP))) {
1315 1316 1317 1318 1319
			copy = cmd->len[i];

			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
			cmd_pos += copy;
			copy_size += copy;
1320 1321 1322 1323
			continue;
		}

		/*
1324 1325
		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
		 * in total (for bi-directional DMA), but copy up to what
1326 1327 1328 1329 1330 1331 1332 1333
		 * we can fit into the payload for debug dump purposes.
		 */
		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);

		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
		cmd_pos += copy;

		/* However, treat copy_size the proper way, we need it below */
1334 1335
		if (copy_size < IWL_FIRST_TB_SIZE) {
			copy = IWL_FIRST_TB_SIZE - copy_size;
1336 1337 1338 1339

			if (copy > cmd->len[i])
				copy = cmd->len[i];
			copy_size += copy;
1340
		}
1341 1342
	}

J
Johannes Berg 已提交
1343
	IWL_DEBUG_HC(trans,
1344
		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1345
		     iwl_get_cmd_string(trans, cmd->id),
1346 1347
		     group_id, out_cmd->hdr.cmd,
		     le16_to_cpu(out_cmd->hdr.sequence),
1348
		     cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
1349

1350 1351 1352
	/* start the TFD with the minimum copy bytes */
	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1353
	iwl_pcie_txq_build_tfd(trans, txq,
1354
			       iwl_txq_get_first_tb_dma(txq, idx),
1355
			       tb0_size, true);
1356 1357

	/* map first command fragment, if any remains */
1358
	if (copy_size > tb0_size) {
1359
		phys_addr = dma_map_single(trans->dev,
1360 1361
					   ((u8 *)&out_cmd->hdr) + tb0_size,
					   copy_size - tb0_size,
1362 1363
					   DMA_TO_DEVICE);
		if (dma_mapping_error(trans->dev, phys_addr)) {
1364 1365
			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
					       txq->write_ptr);
1366 1367 1368
			idx = -ENOMEM;
			goto out;
		}
1369

1370
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1371
				       copy_size - tb0_size, false);
J
Johannes Berg 已提交
1372 1373
	}

1374
	/* map the remaining (adjusted) nocopy/dup fragments */
1375
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1376
		const void *data = cmddata[i];
1377

1378
		if (!cmdlen[i])
1379
			continue;
1380 1381
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
1382
			continue;
1383 1384 1385
		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
			data = dup_buf;
		phys_addr = dma_map_single(trans->dev, (void *)data,
1386
					   cmdlen[i], DMA_TO_DEVICE);
1387
		if (dma_mapping_error(trans->dev, phys_addr)) {
1388 1389
			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
					       txq->write_ptr);
1390 1391 1392 1393
			idx = -ENOMEM;
			goto out;
		}

1394
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1395
	}
R
Reinette Chatre 已提交
1396

1397
	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1398
	out_meta->flags = cmd->flags;
1399
	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1400
		kfree_sensitive(txq->entries[idx].free_buf);
1401
	txq->entries[idx].free_buf = dup_buf;
J
Johannes Berg 已提交
1402

1403
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
R
Reinette Chatre 已提交
1404

1405
	/* start timer if queue currently empty */
1406
	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1407
		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1408

1409
	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1410
	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1411 1412 1413 1414
	if (ret < 0) {
		idx = ret;
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
		goto out;
1415 1416
	}

1417
	/* Increment and update queue's write index */
1418
	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1419
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1420

1421 1422
	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);

J
Johannes Berg 已提交
1423
 out:
1424
	spin_unlock_bh(&txq->lock);
1425 1426 1427
 free_dup_buf:
	if (idx < 0)
		kfree(dup_buf);
1428
	return idx;
1429 1430
}

1431 1432
/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1433 1434
 * @rxb: Rx buffer to reclaim
 */
1435
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1436
			    struct iwl_rx_cmd_buffer *rxb)
1437
{
Z
Zhu Yi 已提交
1438
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1439
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1440
	u8 group_id;
1441
	u32 cmd_id;
1442 1443 1444
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
1445 1446
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
1447
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1448
	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1449 1450 1451 1452

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
1453
	if (WARN(txq_id != trans->txqs.cmd.q_id,
1454
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1455
		 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
1456
		 txq->write_ptr)) {
1457
		iwl_print_hex_error(trans, pkt, 32);
1458
		return;
1459
	}
1460

1461
	spin_lock_bh(&txq->lock);
1462

1463
	cmd_index = iwl_txq_get_cmd_index(txq, index);
1464 1465
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
1466
	group_id = cmd->hdr.group_id;
1467
	cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
1468

1469
	iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
R
Reinette Chatre 已提交
1470

1471
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
1472
	if (meta->flags & CMD_WANT_SKB) {
1473
		struct page *p = rxb_steal_page(rxb);
1474 1475 1476

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1477
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1478
	}
1479

1480 1481 1482
	if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
		iwl_op_mode_async_cb(trans->op_mode, cmd);

1483
	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1484

J
Johannes Berg 已提交
1485
	if (!(meta->flags & CMD_ASYNC)) {
1486
		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1487 1488
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
1489
				 iwl_get_cmd_string(trans, cmd_id));
1490
		}
1491
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1492
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1493
			       iwl_get_cmd_string(trans, cmd_id));
1494
		wake_up(&trans_pcie->wait_command_queue);
1495
	}
1496

Z
Zhu Yi 已提交
1497
	meta->flags = 0;
1498

1499
	spin_unlock_bh(&txq->lock);
1500
}
1501

1502
#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
1503

1504 1505
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
				    struct iwl_host_cmd *cmd)
1506 1507 1508 1509 1510 1511 1512
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

1513
	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1514
	if (ret < 0) {
1515
		IWL_ERR(trans,
1516
			"Error sending %s: enqueue_hcmd failed: %d\n",
1517
			iwl_get_cmd_string(trans, cmd->id), ret);
1518 1519 1520 1521 1522
		return ret;
	}
	return 0;
}

1523 1524
static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
				   struct iwl_host_cmd *cmd)
1525
{
1526
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1527
	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1528 1529 1530
	int cmd_idx;
	int ret;

1531
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1532
		       iwl_get_cmd_string(trans, cmd->id));
1533

1534 1535
	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
				  &trans->status),
1536
		 "Command %s: a command is already active!\n",
1537
		 iwl_get_cmd_string(trans, cmd->id)))
1538 1539
		return -EIO;

1540
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1541
		       iwl_get_cmd_string(trans, cmd->id));
1542

1543
	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1544 1545
	if (cmd_idx < 0) {
		ret = cmd_idx;
1546
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1547
		IWL_ERR(trans,
1548
			"Error sending %s: enqueue_hcmd failed: %d\n",
1549
			iwl_get_cmd_string(trans, cmd->id), ret);
1550 1551 1552
		return ret;
	}

1553 1554 1555 1556
	ret = wait_event_timeout(trans_pcie->wait_command_queue,
				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
					   &trans->status),
				 HOST_COMPLETE_TIMEOUT);
1557
	if (!ret) {
1558
		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1559
			iwl_get_cmd_string(trans, cmd->id),
1560
			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1561

1562
		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1563
			txq->read_ptr, txq->write_ptr);
1564

1565
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1566
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1567
			       iwl_get_cmd_string(trans, cmd->id));
1568
		ret = -ETIMEDOUT;
1569

1570
		iwl_trans_pcie_sync_nmi(trans);
1571
		goto cancel;
1572 1573
	}

1574
	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1575
		iwl_trans_pcie_dump_regs(trans);
1576
		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1577
			iwl_get_cmd_string(trans, cmd->id));
1578
		dump_stack();
1579 1580 1581 1582
		ret = -EIO;
		goto cancel;
	}

1583
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1584
	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1585 1586 1587 1588 1589
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

1590
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1591
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1592
			iwl_get_cmd_string(trans, cmd->id));
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1607
		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1608
	}
1609

1610 1611 1612
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1613 1614 1615 1616 1617
	}

	return ret;
}

1618
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1619
{
1620
	/* Make sure the NIC is still alive in the bus */
1621 1622
	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
		return -ENODEV;
1623

1624
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1625
	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1626 1627
		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
				  cmd->id);
1628
		return -ERFKILL;
1629
	}
1630

1631
	if (cmd->flags & CMD_ASYNC)
1632
		return iwl_pcie_send_hcmd_async(trans, cmd);
1633

1634
	/* We still can fail on RFKILL that can be asserted while we wait */
1635
	return iwl_pcie_send_hcmd_sync(trans, cmd);
1636 1637
}

1638 1639
static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
			     struct iwl_txq *txq, u8 hdr_len,
1640
			     struct iwl_cmd_meta *out_meta)
1641
{
1642
	u16 head_tb_len;
1643 1644 1645 1646 1647 1648
	int i;

	/*
	 * Set up TFD's third entry to point directly to remainder
	 * of skb's head, if any
	 */
1649
	head_tb_len = skb_headlen(skb) - hdr_len;
1650

1651 1652 1653 1654 1655
	if (head_tb_len > 0) {
		dma_addr_t tb_phys = dma_map_single(trans->dev,
						    skb->data + hdr_len,
						    head_tb_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1656
			return -EINVAL;
1657 1658
		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
					tb_phys, head_tb_len);
1659
		iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
	}

	/* set up the remaining entries to point to the data */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		dma_addr_t tb_phys;
		int tb_idx;

		if (!skb_frag_size(frag))
			continue;

		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
					   skb_frag_size(frag), DMA_TO_DEVICE);

1674
		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1675
			return -EINVAL;
1676 1677
		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
					tb_phys, skb_frag_size(frag));
1678 1679
		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
						skb_frag_size(frag), false);
1680 1681
		if (tb_idx < 0)
			return tb_idx;
1682

1683
		out_meta->tbs |= BIT(tb_idx);
1684 1685 1686 1687 1688
	}

	return 0;
}

1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
#ifdef CONFIG_INET
static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
					bool ipv6, unsigned int len)
{
	if (ipv6) {
		struct ipv6hdr *iphv6 = iph;

		tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
					       len + tcph->doff * 4,
					       IPPROTO_TCP, 0);
	} else {
		struct iphdr *iphv4 = iph;

		ip_send_check(iphv4);
		tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
						 len + tcph->doff * 4,
						 IPPROTO_TCP, 0);
	}
}

1709 1710 1711
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
				   struct iwl_txq *txq, u8 hdr_len,
				   struct iwl_cmd_meta *out_meta,
1712 1713
				   struct iwl_device_tx_cmd *dev_cmd,
				   u16 tb1_len)
1714
{
1715
	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1716 1717
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(txq->trans);
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
	unsigned int mss = skb_shinfo(skb)->gso_size;
	u16 length, iv_len, amsdu_pad;
	u8 *start_hdr;
	struct iwl_tso_hdr_page *hdr_page;
	struct tso_t tso;

	/* if the packet is protected, then it must be CCMP or GCMP */
	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
	iv_len = ieee80211_has_protected(hdr->frame_control) ?
		IEEE80211_CCMP_HDR_LEN : 0;

	trace_iwlwifi_dev_tx(trans->dev, skb,
1732
			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1733
			     trans->txqs.tfd.size,
1734
			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745

	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
	amsdu_pad = 0;

	/* total amount of header we may need for this A-MSDU */
	hdr_room = DIV_ROUND_UP(total_len, mss) *
		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;

	/* Our device supports 9 segments at most, it will fit in 1 page */
1746
	hdr_page = get_page_hdr(trans, hdr_room, skb);
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
	if (!hdr_page)
		return -ENOMEM;

	start_hdr = hdr_page->pos;
	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
	hdr_page->pos += iv_len;

	/*
	 * Pull the ieee80211 header + IV to be able to use TSO core,
	 * we will restore it for the tx_status flow.
	 */
	skb_pull(skb, hdr_len + iv_len);

1760 1761 1762 1763 1764 1765 1766
	/*
	 * Remove the length of all the headers that we don't actually
	 * have in the MPDU by themselves, but that we duplicate into
	 * all the different MSDUs inside the A-MSDU.
	 */
	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	tso_start(skb, &tso);

	while (total_len) {
		/* this is the data left for this subframe */
		unsigned int data_left =
			min_t(unsigned int, mss, total_len);
		struct sk_buff *csum_skb = NULL;
		unsigned int hdr_tb_len;
		dma_addr_t hdr_tb_phys;
		struct tcphdr *tcph;
1777
		u8 *iph, *subf_hdrs_start = hdr_page->pos;
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805

		total_len -= data_left;

		memset(hdr_page->pos, 0, amsdu_pad);
		hdr_page->pos += amsdu_pad;
		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
				  data_left)) & 0x3;
		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
		hdr_page->pos += ETH_ALEN;
		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
		hdr_page->pos += ETH_ALEN;

		length = snap_ip_tcp_hdrlen + data_left;
		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
		hdr_page->pos += sizeof(length);

		/*
		 * This will copy the SNAP as well which will be considered
		 * as MAC header.
		 */
		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
		iph = hdr_page->pos + 8;
		tcph = (void *)(iph + ip_hdrlen);

		/* For testing on current hardware only */
		if (trans_pcie->sw_csum_tx) {
			csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
					     GFP_ATOMIC);
1806 1807
			if (!csum_skb)
				return -ENOMEM;
1808 1809 1810 1811 1812 1813

			iwl_compute_pseudo_hdr_csum(iph, tcph,
						    skb->protocol ==
							htons(ETH_P_IPV6),
						    data_left);

1814
			skb_put_data(csum_skb, tcph, tcp_hdrlen(skb));
1815
			skb_reset_transport_header(csum_skb);
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
			csum_skb->csum_start =
				(unsigned char *)tcp_hdr(csum_skb) -
						 csum_skb->head;
		}

		hdr_page->pos += snap_ip_tcp_hdrlen;

		hdr_tb_len = hdr_page->pos - start_hdr;
		hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
					     hdr_tb_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
			dev_kfree_skb(csum_skb);
1828
			return -EINVAL;
1829 1830 1831
		}
		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
				       hdr_tb_len, false);
1832
		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
1833
					hdr_tb_phys, hdr_tb_len);
1834 1835
		/* add this subframe's headers' length to the tx_cmd */
		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846

		/* prepare the start_hdr for the next subframe */
		start_hdr = hdr_page->pos;

		/* put the payload */
		while (data_left) {
			unsigned int size = min_t(unsigned int, tso.size,
						  data_left);
			dma_addr_t tb_phys;

			if (trans_pcie->sw_csum_tx)
1847
				skb_put_data(csum_skb, tso.data, size);
1848 1849 1850 1851 1852

			tb_phys = dma_map_single(trans->dev, tso.data,
						 size, DMA_TO_DEVICE);
			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
				dev_kfree_skb(csum_skb);
1853
				return -EINVAL;
1854 1855 1856 1857
			}

			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
					       size, false);
1858
			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
1859
						tb_phys, size);
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891

			data_left -= size;
			tso_build_data(skb, &tso, size);
		}

		/* For testing on early hardware only */
		if (trans_pcie->sw_csum_tx) {
			__wsum csum;

			csum = skb_checksum(csum_skb,
					    skb_checksum_start_offset(csum_skb),
					    csum_skb->len -
					    skb_checksum_start_offset(csum_skb),
					    0);
			dev_kfree_skb(csum_skb);
			dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
						hdr_tb_len, DMA_TO_DEVICE);
			tcph->check = csum_fold(csum);
			dma_sync_single_for_device(trans->dev, hdr_tb_phys,
						   hdr_tb_len, DMA_TO_DEVICE);
		}
	}

	/* re -add the WiFi header and IV */
	skb_push(skb, hdr_len + iv_len);

	return 0;
}
#else /* CONFIG_INET */
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
				   struct iwl_txq *txq, u8 hdr_len,
				   struct iwl_cmd_meta *out_meta,
1892 1893
				   struct iwl_device_tx_cmd *dev_cmd,
				   u16 tb1_len)
1894 1895 1896 1897 1898 1899 1900 1901
{
	/* No A-MSDU without CONFIG_INET */
	WARN_ON(1);

	return -1;
}
#endif /* CONFIG_INET */

1902
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1903
		      struct iwl_device_tx_cmd *dev_cmd, int txq_id)
1904
{
1905
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
J
Johannes Berg 已提交
1906
	struct ieee80211_hdr *hdr;
1907 1908 1909
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	struct iwl_cmd_meta *out_meta;
	struct iwl_txq *txq;
1910 1911
	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
	void *tb1_addr;
1912
	void *tfd;
1913
	u16 len, tb1_len;
1914
	bool wait_write_ptr;
J
Johannes Berg 已提交
1915 1916
	__le16 fc;
	u8 hdr_len;
1917
	u16 wifi_seq;
1918
	bool amsdu;
1919

1920
	txq = trans->txqs.txq[txq_id];
1921

1922
	if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
1923
		      "TX on unused queue %d\n", txq_id))
1924
		return -EINVAL;
1925

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
	if (unlikely(trans_pcie->sw_csum_tx &&
		     skb->ip_summed == CHECKSUM_PARTIAL)) {
		int offs = skb_checksum_start_offset(skb);
		int csum_offs = offs + skb->csum_offset;
		__wsum csum;

		if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
			return -1;

		csum = skb_checksum(skb, offs, skb->len - offs, 0);
		*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
1937 1938

		skb->ip_summed = CHECKSUM_UNNECESSARY;
1939 1940
	}

J
Johannes Berg 已提交
1941
	if (skb_is_nonlinear(skb) &&
1942
	    skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
J
Johannes Berg 已提交
1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
	    __skb_linearize(skb))
		return -ENOMEM;

	/* mac80211 always puts the full header into the SKB's head,
	 * so there's no need to check if it's readable there
	 */
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
	hdr_len = ieee80211_hdrlen(fc);

1953
	spin_lock(&txq->lock);
1954

1955 1956
	if (iwl_txq_space(trans, txq) < txq->high_mark) {
		iwl_txq_stop(trans, txq);
1957 1958

		/* don't put the packet on the ring, if there is no room */
1959
		if (unlikely(iwl_txq_space(trans, txq) < 3)) {
1960
			struct iwl_device_tx_cmd **dev_cmd_ptr;
1961 1962

			dev_cmd_ptr = (void *)((u8 *)skb->cb +
1963
					       trans->txqs.dev_cmd_offs);
1964

1965
			*dev_cmd_ptr = dev_cmd;
1966 1967 1968 1969 1970 1971 1972
			__skb_queue_tail(&txq->overflow_q, skb);

			spin_unlock(&txq->lock);
			return 0;
		}
	}

1973 1974 1975 1976 1977
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
1978
	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1979
	WARN_ONCE(txq->ampdu &&
1980
		  (wifi_seq & 0xff) != txq->write_ptr,
1981
		  "Q: %d WiFi Seq %d tfdNum %d",
1982
		  txq_id, wifi_seq, txq->write_ptr);
1983 1984

	/* Set up driver data for this TFD */
1985 1986
	txq->entries[txq->write_ptr].skb = skb;
	txq->entries[txq->write_ptr].cmd = dev_cmd;
1987 1988 1989

	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1990
			    INDEX_TO_SEQ(txq->write_ptr)));
1991

1992
	tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
1993 1994 1995 1996 1997 1998
	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
		       offsetof(struct iwl_tx_cmd, scratch);

	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1999
	/* Set up first empty entry in queue's array of Tx/cmd buffers */
2000
	out_meta = &txq->entries[txq->write_ptr].meta;
J
Johannes Berg 已提交
2001
	out_meta->flags = 0;
2002

2003
	/*
2004 2005 2006 2007
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
2008
	 */
2009
	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
2010
	      hdr_len - IWL_FIRST_TB_SIZE;
2011 2012 2013 2014 2015 2016 2017 2018
	/* do not align A-MSDU to dword as the subframe header aligns it */
	amsdu = ieee80211_is_data_qos(fc) &&
		(*ieee80211_get_qos_ctl(hdr) &
		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
	if (trans_pcie->sw_csum_tx || !amsdu) {
		tb1_len = ALIGN(len, 4);
		/* Tell NIC about any 2-byte padding after MAC header */
		if (tb1_len != len)
J
Johannes Berg 已提交
2019
			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
2020 2021 2022
	} else {
		tb1_len = len;
	}
2023

2024 2025 2026 2027
	/*
	 * The first TB points to bi-directional DMA data, we'll
	 * memcpy the data into it later.
	 */
2028
	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2029
			       IWL_FIRST_TB_SIZE, true);
2030

2031
	/* there must be data left over for TB1 or this code must be changed */
2032
	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
2033 2034

	/* map the data for TB1 */
2035
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2036 2037 2038
	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
		goto out_err;
2039
	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2040

2041
	trace_iwlwifi_dev_tx(trans->dev, skb,
2042
			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
2043
			     trans->txqs.tfd.size,
2044 2045 2046
			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
			     hdr_len);

2047 2048 2049 2050 2051 2052 2053
	/*
	 * If gso_size wasn't set, don't give the frame "amsdu treatment"
	 * (adding subframes, etc.).
	 * This can happen in some testing flows when the amsdu was already
	 * pre-built, and we just need to send the resulting skb.
	 */
	if (amsdu && skb_shinfo(skb)->gso_size) {
2054 2055 2056 2057
		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
						     out_meta, dev_cmd,
						     tb1_len)))
			goto out_err;
2058
	} else {
2059 2060
		struct sk_buff *frag;

2061 2062 2063 2064
		if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
					       out_meta)))
			goto out_err;

2065 2066 2067 2068 2069
		skb_walk_frags(skb, frag) {
			if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
						       out_meta)))
				goto out_err;
		}
2070
	}
J
Johannes Berg 已提交
2071

2072
	/* building the A-MSDU might have changed this data, so memcpy it now */
2073
	memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
2074

2075
	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
2076
	/* Set up entry for this TFD in Tx byte-count array */
2077 2078 2079
	iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
					 iwl_txq_gen1_tfd_get_num_tbs(trans,
								      tfd));
2080

2081
	wait_write_ptr = ieee80211_has_morefrags(fc);
2082

2083
	/* start timer if queue currently empty */
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
		/*
		 * If the TXQ is active, then set the timer, if not,
		 * set the timer in remainder so that the timer will
		 * be armed with the right value when the station will
		 * wake up.
		 */
		if (!txq->frozen)
			mod_timer(&txq->stuck_timer,
				  jiffies + txq->wd_timeout);
		else
			txq->frozen_expiry_remainder = txq->wd_timeout;
2096
	}
2097 2098

	/* Tell device the write index *just past* this latest filled TFD */
2099
	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
2100 2101
	if (!wait_write_ptr)
		iwl_pcie_txq_inc_wr_ptr(trans, txq);
2102 2103 2104

	/*
	 * At this point the frame is "transmitted" successfully
2105
	 * and we will get a TX status notification eventually.
2106 2107 2108 2109
	 */
	spin_unlock(&txq->lock);
	return 0;
out_err:
2110
	iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2111 2112
	spin_unlock(&txq->lock);
	return -1;
2113
}