tx.c 65.5 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 5
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 * Copyright(c) 2016 Intel Deutschland GmbH
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
31
#include <linux/etherdevice.h>
32
#include <linux/ieee80211.h>
33
#include <linux/slab.h>
34
#include <linux/sched.h>
35
#include <linux/pm_runtime.h>
36 37
#include <net/ip6_checksum.h>
#include <net/tso.h>
38

39 40 41
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
42
#include "iwl-io.h"
43
#include "iwl-scd.h"
44
#include "iwl-op-mode.h"
45
#include "internal.h"
46
/* FIXME: need to abstract out TX command (once we know what it looks like) */
47
#include "dvm/commands.h"
48

49 50 51
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/
73

74 75
static int iwl_queue_space(const struct iwl_queue *q)
{
76 77
	unsigned int max;
	unsigned int used;
78

79 80
	/*
	 * To avoid ambiguity between empty and completely full queues, there
81 82 83
	 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
	 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
	 * to reserve any queue entries for this purpose.
84
	 */
85
	if (q->n_window < TFD_QUEUE_SIZE_MAX)
86 87
		max = q->n_window;
	else
88
		max = TFD_QUEUE_SIZE_MAX - 1;
89

90
	/*
91 92
	 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
	 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
93
	 */
94
	used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
95 96 97 98 99

	if (WARN_ON(used > max))
		return 0;

	return max - used;
100 101 102 103 104
}

/*
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
105
static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
{
	q->n_window = slots_num;
	q->id = id;

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = 0;
	q->read_ptr = 0;

	return 0;
}

static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

	ptr->addr = dma_alloc_coherent(trans->dev, size,
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
				  struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
	memset(ptr, 0, sizeof(*ptr));
}

static void iwl_pcie_txq_stuck_timer(unsigned long data)
{
	struct iwl_txq *txq = (void *)data;
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);

	spin_lock(&txq->lock);
	/* check if triggered erroneously */
	if (txq->q.read_ptr == txq->q.write_ptr) {
		spin_unlock(&txq->lock);
		return;
	}
	spin_unlock(&txq->lock);

	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
168
		jiffies_to_msecs(txq->wd_timeout));
169 170

	iwl_trans_pcie_log_scd_error(trans, txq);
171

L
Liad Kaufman 已提交
172
	iwl_force_nmi(trans);
173 174
}

175 176
/*
 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
177
 */
178 179
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
					     struct iwl_txq *txq, u16 byte_cnt)
180
{
181
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
182
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183 184 185 186 187 188
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
189
	struct iwl_tx_cmd *tx_cmd =
190
		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
191

192 193
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

194 195
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
196 197 198

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
199
		len += IEEE80211_CCMP_MIC_LEN;
200 201
		break;
	case TX_CMD_SEC_TKIP:
202
		len += IEEE80211_TKIP_ICV_LEN;
203 204
		break;
	case TX_CMD_SEC_WEP:
205
		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
206 207 208
		break;
	}

209 210 211
	if (trans_pcie->bc_table_dword)
		len = DIV_ROUND_UP(len, 4);

212 213 214
	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
		return;

215
	bc_ent = cpu_to_le16(len | (sta_id << 12));
216 217 218 219 220 221 222 223

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
					    struct iwl_txq *txq)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
	struct iwl_tx_cmd *tx_cmd =
		(void *)txq->entries[txq->q.read_ptr].cmd->payload;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != trans_pcie->cmd_queue)
		sta_id = tx_cmd->sta_id;

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

250 251
/*
 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
252
 */
253 254
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_txq *txq)
255
{
256
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
257 258 259
	u32 reg = 0;
	int txq_id = txq->q.id;

260
	lockdep_assert_held(&txq->lock);
261

262 263 264 265 266 267 268 269 270
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. NIC is woken up for CMD regardless of shadow outside this function
	 * 3. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    txq_id != trans_pcie->cmd_queue &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
W
Wey-Yi Guy 已提交
271
		/*
272 273 274
		 * wake up nic if it's powered down ...
		 * uCode will wake up, and interrupt us again, so next
		 * time we'll skip this part.
W
Wey-Yi Guy 已提交
275
		 */
276 277 278 279 280 281 282
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
				       txq_id, reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
283
			txq->need_update = true;
284 285
			return;
		}
W
Wey-Yi Guy 已提交
286
	}
287 288 289 290 291 292

	/*
	 * if not in power-save mode, uCode will never sleep when we're
	 * trying to tx (during RFKILL, we're not trying to tx).
	 */
	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
293 294 295
	if (!txq->block)
		iwl_write32(trans, HBUS_TARG_WRPTR,
			    txq->q.write_ptr | (txq_id << 8));
296
}
297

298 299 300 301 302 303 304 305
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		struct iwl_txq *txq = &trans_pcie->txq[i];

306
		spin_lock_bh(&txq->lock);
307 308 309 310
		if (trans_pcie->txq[i].need_update) {
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
			trans_pcie->txq[i].need_update = false;
		}
311
		spin_unlock_bh(&txq->lock);
312
	}
313 314
}

315
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
J
Johannes Berg 已提交
316 317 318 319 320 321 322 323 324 325 326
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

327 328
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				       dma_addr_t addr, u16 len)
J
Johannes Berg 已提交
329 330 331 332 333 334 335 336 337 338 339 340 341
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

342
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
J
Johannes Berg 已提交
343 344 345 346
{
	return tfd->num_tbs & 0x1f;
}

347
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
348 349
			       struct iwl_cmd_meta *meta,
			       struct iwl_tfd *tfd)
J
Johannes Berg 已提交
350 351 352 353 354
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
355
	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
J
Johannes Berg 已提交
356 357

	if (num_tbs >= IWL_NUM_OF_TBS) {
358
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
359 360 361 362
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

363
	/* first TB is never freed - it's the bidirectional DMA data */
J
Johannes Berg 已提交
364

J
Johannes Berg 已提交
365 366 367 368 369 370 371 372 373 374 375 376
	for (i = 1; i < num_tbs; i++) {
		if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
			dma_unmap_page(trans->dev,
				       iwl_pcie_tfd_tb_get_addr(tfd, i),
				       iwl_pcie_tfd_tb_get_len(tfd, i),
				       DMA_TO_DEVICE);
		else
			dma_unmap_single(trans->dev,
					 iwl_pcie_tfd_tb_get_addr(tfd, i),
					 iwl_pcie_tfd_tb_get_len(tfd, i),
					 DMA_TO_DEVICE);
	}
377
	tfd->num_tbs = 0;
378 379
}

380 381
/*
 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
382
 * @trans - transport private data
383
 * @txq - tx queue
384
 * @dma_dir - the direction of the DMA mapping
385 386 387 388
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
389
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
390 391 392
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

393 394 395
	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
	 * idx is bounded by n_window
	 */
396 397 398
	int rd_ptr = txq->q.read_ptr;
	int idx = get_cmd_index(&txq->q, rd_ptr);

399 400
	lockdep_assert_held(&txq->lock);

401 402 403
	/* We have only q->n_window txq->entries, but we use
	 * TFD_QUEUE_SIZE_MAX tfds
	 */
404
	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
J
Johannes Berg 已提交
405 406

	/* free SKB */
407
	if (txq->entries) {
J
Johannes Berg 已提交
408 409
		struct sk_buff *skb;

410
		skb = txq->entries[idx].skb;
J
Johannes Berg 已提交
411

412 413 414 415
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
416
		if (skb) {
417
			iwl_op_mode_free_skb(trans->op_mode, skb);
418
			txq->entries[idx].skb = NULL;
J
Johannes Berg 已提交
419 420 421 422
		}
	}
}

423
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
424
				  dma_addr_t addr, u16 len, bool reset)
J
Johannes Berg 已提交
425 426 427 428 429 430
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
431
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
432 433
	tfd = &tfd_tmp[q->write_ptr];

434 435 436 437 438 439 440 441 442 443 444 445
	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
			IWL_NUM_OF_TBS);
		return -EINVAL;
	}

446 447
	if (WARN(addr & ~IWL_TX_DMA_MASK,
		 "Unaligned address = %llx\n", (unsigned long long)addr))
448 449 450 451
		return -EINVAL;

	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);

J
Johannes Berg 已提交
452
	return num_tbs;
453 454 455 456 457 458 459 460
}

static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
			       struct iwl_txq *txq, int slots_num,
			       u32 txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
461
	size_t tb0_buf_sz;
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
	int i;

	if (WARN_ON(txq->entries || txq->tfds))
		return -EINVAL;

	setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
		    (unsigned long)txq);
	txq->trans_pcie = trans_pcie;

	txq->q.n_window = slots_num;

	txq->entries = kcalloc(slots_num,
			       sizeof(struct iwl_pcie_txq_entry),
			       GFP_KERNEL);

	if (!txq->entries)
		goto error;

	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->entries[i].cmd =
				kmalloc(sizeof(struct iwl_device_cmd),
					GFP_KERNEL);
			if (!txq->entries[i].cmd)
				goto error;
		}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
493
	if (!txq->tfds)
494
		goto error;
495

496
	BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
497

498
	tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
499

500 501
	txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
					      &txq->first_tb_dma,
502
					      GFP_KERNEL);
503
	if (!txq->first_tb_bufs)
504 505
		goto err_free_tfds;

506 507 508
	txq->q.id = txq_id;

	return 0;
509 510
err_free_tfds:
	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
error:
	if (txq->entries && txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < slots_num; i++)
			kfree(txq->entries[i].cmd);
	kfree(txq->entries);
	txq->entries = NULL;

	return -ENOMEM;

}

static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
			      int slots_num, u32 txq_id)
{
	int ret;

527
	txq->need_update = false;
528 529 530 531 532 533

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
534
	ret = iwl_queue_init(&txq->q, slots_num, txq_id);
535 536 537 538
	if (ret)
		return ret;

	spin_lock_init(&txq->lock);
539
	__skb_queue_head_init(&txq->overflow_q);
540 541 542 543 544

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
545 546 547 548 549 550 551
	if (trans->cfg->use_tfh)
		iwl_write_direct64(trans,
				   FH_MEM_CBBC_QUEUE(trans, txq_id),
				   txq->q.dma_addr);
	else
		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
				   txq->q.dma_addr >> 8);
552 553 554 555

	return 0;
}

556 557
static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
				   struct sk_buff *skb)
558
{
559
	struct page **page_ptr;
560

561
	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
562

563 564 565
	if (*page_ptr) {
		__free_page(*page_ptr);
		*page_ptr = NULL;
566 567 568
	}
}

569 570 571 572 573 574 575 576 577
static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->reg_lock);

	if (trans_pcie->ref_cmd_in_flight) {
		trans_pcie->ref_cmd_in_flight = false;
		IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
578
		iwl_trans_unref(trans);
579 580 581 582 583 584 585 586 587 588 589 590
	}

	if (!trans->cfg->base_params->apmg_wake_up_wa)
		return;
	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
		return;

	trans_pcie->cmd_hold_nic_awake = false;
	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}

591 592 593 594 595 596 597 598 599 600 601
/*
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;

	spin_lock_bh(&txq->lock);
	while (q->write_ptr != q->read_ptr) {
602 603
		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
				   txq_id, q->read_ptr);
604 605 606 607 608 609 610

		if (txq_id != trans_pcie->cmd_queue) {
			struct sk_buff *skb = txq->entries[q->read_ptr].skb;

			if (WARN_ON_ONCE(!skb))
				continue;

611
			iwl_pcie_free_tso_page(trans_pcie, skb);
612
		}
613
		iwl_pcie_txq_free_tfd(trans, txq);
614
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
615 616 617 618 619 620 621 622

		if (q->read_ptr == q->write_ptr) {
			unsigned long flags;

			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
			if (txq_id != trans_pcie->cmd_queue) {
				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
					      q->id);
623
				iwl_trans_unref(trans);
624 625 626 627 628
			} else {
				iwl_pcie_clear_cmd_in_flight(trans);
			}
			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
		}
629
	}
630
	txq->active = false;
631 632 633 634 635 636 637

	while (!skb_queue_empty(&txq->overflow_q)) {
		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);

		iwl_op_mode_free_skb(trans->op_mode, skb);
	}

638
	spin_unlock_bh(&txq->lock);
639 640 641

	/* just in case - this queue may have been stopped */
	iwl_wake_queue(trans, txq);
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
}

/*
 * iwl_pcie_txq_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct device *dev = trans->dev;
	int i;

	if (WARN_ON(!txq))
		return;

	iwl_pcie_txq_unmap(trans, txq_id);

	/* De-alloc array of command/tx buffers */
	if (txq_id == trans_pcie->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++) {
667 668
			kzfree(txq->entries[i].cmd);
			kzfree(txq->entries[i].free_buf);
669 670 671
		}

	/* De-alloc circular buffer of TFDs */
672 673 674 675
	if (txq->tfds) {
		dma_free_coherent(dev,
				  sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
				  txq->tfds, txq->q.dma_addr);
676
		txq->q.dma_addr = 0;
677
		txq->tfds = NULL;
678 679

		dma_free_coherent(dev,
680 681
				  sizeof(*txq->first_tb_bufs) * txq->q.n_window,
				  txq->first_tb_bufs, txq->first_tb_dma);
682 683 684 685 686 687 688 689 690 691 692 693 694 695
	}

	kfree(txq->entries);
	txq->entries = NULL;

	del_timer_sync(&txq->stuck_timer);

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
696
	int nq = trans->cfg->base_params->num_of_queues;
697 698
	int chan;
	u32 reg_val;
699 700
	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
701 702 703 704 705 706 707 708 709 710 711

	/* make sure all queue are not stopped/used */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	trans_pcie->scd_base_addr =
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

712 713 714 715
	/* reset context data, TX status and translation data */
	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
				   SCD_CONTEXT_MEM_LOWER_BOUND,
			    NULL, clear_dwords);
716 717 718 719 720 721 722

	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
		       trans_pcie->scd_bc_tbls.dma >> 10);

	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
723 724
	if (trans->cfg->base_params->scd_chain_ext_wa)
		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
725 726

	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
727 728
				trans_pcie->cmd_fifo,
				trans_pcie->cmd_q_wdg_timeout);
729 730

	/* Activate all Tx DMA/FIFO channels */
731
	iwl_scd_activate_fifos(trans);
732 733 734 735 736 737 738 739 740 741 742 743 744

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
745 746 747
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
748 749
}

750 751 752 753 754 755 756 757
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		struct iwl_txq *txq = &trans_pcie->txq[txq_id];
758 759 760 761 762 763 764 765
		if (trans->cfg->use_tfh)
			iwl_write_direct64(trans,
					   FH_MEM_CBBC_QUEUE(trans, txq_id),
					   txq->q.dma_addr);
		else
			iwl_write_direct32(trans,
					   FH_MEM_CBBC_QUEUE(trans, txq_id),
					   txq->q.dma_addr >> 8);
766 767 768 769 770 771 772 773 774
		iwl_pcie_txq_unmap(trans, txq_id);
		txq->q.read_ptr = 0;
		txq->q.write_ptr = 0;
	}

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

775 776 777 778 779 780
	/*
	 * Send 0 as the scd_base_addr since the device may have be reset
	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
	 * contain garbage.
	 */
	iwl_pcie_tx_start(trans, 0);
781 782
}

783 784 785 786 787 788 789 790 791
static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;
	int ch, ret;
	u32 mask = 0;

	spin_lock(&trans_pcie->irq_lock);

792
	if (!iwl_trans_grab_nic_access(trans, &flags))
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
		goto out;

	/* Stop each Tx DMA channel */
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
	}

	/* Wait for DMA channels to be idle */
	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
	if (ret < 0)
		IWL_ERR(trans,
			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));

	iwl_trans_release_nic_access(trans, &flags);

out:
	spin_unlock(&trans_pcie->irq_lock);
}

814 815 816 817 818 819
/*
 * iwl_pcie_tx_stop - Stop all Tx DMA channels
 */
int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
820
	int txq_id;
821 822

	/* Turn off all Tx DMA fifos */
823
	iwl_scd_deactivate_fifos(trans);
824

825 826
	/* Turn off all Tx DMA channels */
	iwl_pcie_tx_stop_fh(trans);
827

828 829 830 831 832 833 834 835 836 837
	/*
	 * This function can be called before the op_mode disabled the
	 * queues. This happens when we have an rfkill interrupt.
	 * Since we stop Tx altogether - mark the queues as stopped.
	 */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

	/* This can happen: start_hw, stop_device */
	if (!trans_pcie->txq)
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
		return 0;

	/* Unmap DMA from host system and free skb's */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++)
		iwl_pcie_txq_unmap(trans, txq_id);

	return 0;
}

/*
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
void iwl_pcie_tx_free(struct iwl_trans *trans)
{
	int txq_id;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	/* Tx queues */
	if (trans_pcie->txq) {
		for (txq_id = 0;
		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
			iwl_pcie_txq_free(trans, txq_id);
	}

	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);

	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}

/*
 * iwl_pcie_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
{
	int ret;
	int txq_id, slots_num;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
			sizeof(struct iwlagn_scd_bc_tbl);

	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
	if (WARN_ON(trans_pcie->txq)) {
		ret = -EINVAL;
		goto error;
	}

	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
				   scd_bc_tbls_size);
	if (ret) {
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
		goto error;
	}

	/* Alloc keep-warm buffer */
	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
	if (ret) {
		IWL_ERR(trans, "Keep Warm allocation failed\n");
		goto error;
	}

	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
				  sizeof(struct iwl_txq), GFP_KERNEL);
	if (!trans_pcie->txq) {
		IWL_ERR(trans, "Not enough memory for txq\n");
911
		ret = -ENOMEM;
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
			goto error;
		}
	}

	return 0;

error:
	iwl_pcie_tx_free(trans);

	return ret;
}
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;
	int txq_id, slots_num;
	bool alloc = false;

	if (!trans_pcie->txq) {
		ret = iwl_pcie_tx_alloc(trans);
		if (ret)
			goto error;
		alloc = true;
	}

949
	spin_lock(&trans_pcie->irq_lock);
950 951

	/* Turn off all Tx DMA fifos */
952
	iwl_scd_deactivate_fifos(trans);
953 954 955 956 957

	/* Tell NIC where to find the "keep warm" buffer */
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);

958
	spin_unlock(&trans_pcie->irq_lock);
959 960 961 962 963 964 965 966 967 968 969 970 971 972

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
	     txq_id++) {
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
		ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
		}
	}

973 974 975 976 977 978
	if (trans->cfg->use_tfh)
		iwl_write_direct32(trans, TFH_TRANSFER_MODE,
				   TFH_TRANSFER_MAX_PENDING_REQ |
				   TFH_CHUNK_SIZE_128 |
				   TFH_CHUNK_SPLIT_MODE);

979
	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
980 981 982 983
	if (trans->cfg->base_params->num_of_queues > 20)
		iwl_set_bits_prph(trans, SCD_GP_CTRL,
				  SCD_GP_CTRL_ENABLE_31_QUEUES);

984 985 986 987 988 989 990 991
	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
		iwl_pcie_tx_free(trans);
	return ret;
}

992
static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
993
{
994 995
	lockdep_assert_held(&txq->lock);

996
	if (!txq->wd_timeout)
997 998
		return;

999 1000 1001 1002 1003 1004 1005
	/*
	 * station is asleep and we send data - that must
	 * be uAPSD or PS-Poll. Don't rearm the timer.
	 */
	if (txq->frozen)
		return;

1006 1007 1008 1009 1010 1011 1012
	/*
	 * if empty delete timer, otherwise move timer forward
	 * since we're making progress on this queue
	 */
	if (txq->q.read_ptr == txq->q.write_ptr)
		del_timer(&txq->stuck_timer);
	else
1013
		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1014 1015 1016
}

/* Frees buffers until index _not_ inclusive */
1017 1018
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs)
1019 1020 1021
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1022
	int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
1023 1024 1025 1026 1027
	struct iwl_queue *q = &txq->q;
	int last_to_free;

	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
1028
		return;
J
Johannes Berg 已提交
1029

1030
	spin_lock_bh(&txq->lock);
1031

1032 1033 1034 1035 1036 1037
	if (!txq->active) {
		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
				    txq_id, ssn);
		goto out;
	}

1038 1039 1040 1041 1042
	if (txq->q.read_ptr == tfd_num)
		goto out;

	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
			   txq_id, txq->q.read_ptr, tfd_num, ssn);
J
Johannes Berg 已提交
1043

1044 1045
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
1046
	last_to_free = iwl_queue_dec_wrap(tfd_num);
1047

1048
	if (!iwl_queue_used(q, last_to_free)) {
1049 1050
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1051
			__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
1052
			q->write_ptr, q->read_ptr);
1053
		goto out;
J
Johannes Berg 已提交
1054 1055
	}

1056
	if (WARN_ON(!skb_queue_empty(skbs)))
1057
		goto out;
J
Johannes Berg 已提交
1058

1059
	for (;
1060
	     q->read_ptr != tfd_num;
1061
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1062
		struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
J
Johannes Berg 已提交
1063

1064
		if (WARN_ON_ONCE(!skb))
1065
			continue;
J
Johannes Berg 已提交
1066

1067
		iwl_pcie_free_tso_page(trans_pcie, skb);
1068 1069

		__skb_queue_tail(skbs, skb);
J
Johannes Berg 已提交
1070

1071
		txq->entries[txq->q.read_ptr].skb = NULL;
1072

1073
		iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
1074

1075
		iwl_pcie_txq_free_tfd(trans, txq);
1076
	}
1077

1078
	iwl_pcie_txq_progress(txq);
1079

1080 1081
	if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
	    test_bit(txq_id, trans_pcie->queue_stopped)) {
1082
		struct sk_buff_head overflow_skbs;
1083

1084 1085
		__skb_queue_head_init(&overflow_skbs);
		skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095

		/*
		 * This is tricky: we are in reclaim path which is non
		 * re-entrant, so noone will try to take the access the
		 * txq data from that path. We stopped tx, so we can't
		 * have tx as well. Bottom line, we can unlock and re-lock
		 * later.
		 */
		spin_unlock_bh(&txq->lock);

1096 1097
		while (!skb_queue_empty(&overflow_skbs)) {
			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1098 1099 1100 1101
			struct iwl_device_cmd *dev_cmd_ptr;

			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
						 trans_pcie->dev_cmd_offs);
1102 1103 1104 1105 1106 1107

			/*
			 * Note that we can very well be overflowing again.
			 * In that case, iwl_queue_space will be small again
			 * and we won't wake mac80211's queue.
			 */
1108
			iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id);
1109 1110 1111 1112 1113 1114
		}
		spin_lock_bh(&txq->lock);

		if (iwl_queue_space(&txq->q) > txq->q.low_mark)
			iwl_wake_queue(trans, txq);
	}
1115 1116 1117

	if (q->read_ptr == q->write_ptr) {
		IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
1118
		iwl_trans_unref(trans);
1119 1120
	}

1121
out:
1122
	spin_unlock_bh(&txq->lock);
1123 1124
}

1125 1126
static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
				      const struct iwl_host_cmd *cmd)
1127 1128 1129 1130 1131 1132
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;

	lockdep_assert_held(&trans_pcie->reg_lock);

1133 1134 1135 1136
	if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
	    !trans_pcie->ref_cmd_in_flight) {
		trans_pcie->ref_cmd_in_flight = true;
		IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
1137
		iwl_trans_ref(trans);
1138 1139
	}

1140 1141 1142 1143 1144 1145
	/*
	 * wake up the NIC to make sure that the firmware will see the host
	 * command - we will let the NIC sleep once all the host commands
	 * returned. This needs to be done only on NICs that have
	 * apmg_wake_up_wa set.
	 */
1146 1147
	if (trans->cfg->base_params->apmg_wake_up_wa &&
	    !trans_pcie->cmd_hold_nic_awake) {
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);

		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
				   15000);
		if (ret < 0) {
			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
			IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
			return -EIO;
		}
1162
		trans_pcie->cmd_hold_nic_awake = true;
1163 1164 1165 1166 1167
	}

	return 0;
}

1168 1169 1170 1171 1172 1173 1174 1175
/*
 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1176
{
1177 1178 1179
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
1180
	unsigned long flags;
1181
	int nfreed = 0;
1182

1183
	lockdep_assert_held(&txq->lock);
1184

1185
	if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
1186 1187
		IWL_ERR(trans,
			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1188
			__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1189 1190 1191
			q->write_ptr, q->read_ptr);
		return;
	}
1192

1193 1194
	for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1195

1196 1197 1198
		if (nfreed++ > 0) {
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
				idx, q->write_ptr, q->read_ptr);
L
Liad Kaufman 已提交
1199
			iwl_force_nmi(trans);
1200 1201 1202
		}
	}

1203
	if (q->read_ptr == q->write_ptr) {
1204
		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1205
		iwl_pcie_clear_cmd_in_flight(trans);
1206 1207 1208
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
	}

1209
	iwl_pcie_txq_progress(txq);
1210 1211
}

1212
static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1213
				 u16 txq_id)
1214
{
1215
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1216 1217 1218 1219 1220 1221
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

1222
	tbl_dw_addr = trans_pcie->scd_base_addr +
1223 1224
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

1225
	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1226 1227 1228 1229 1230 1231

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

1232
	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1233 1234 1235 1236

	return 0;
}

1237 1238 1239 1240
/* Receiver address (actually, Rx station's index into station table),
 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))

1241
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1242 1243
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout)
1244
{
1245
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1246
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1247
	int fifo = -1;
1248

1249 1250
	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1251

1252 1253
	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);

1254 1255
	if (cfg) {
		fifo = cfg->fifo;
1256

1257
		/* Disable the scheduler prior configuring the cmd queue */
1258 1259
		if (txq_id == trans_pcie->cmd_queue &&
		    trans_pcie->scd_set_active)
1260 1261
			iwl_scd_enable_set_active(trans, 0);

1262 1263
		/* Stop this Tx queue before configuring it */
		iwl_scd_txq_set_inactive(trans, txq_id);
1264

1265 1266 1267
		/* Set this queue as a chain-building queue unless it is CMD */
		if (txq_id != trans_pcie->cmd_queue)
			iwl_scd_txq_set_chain(trans, txq_id);
1268

1269
		if (cfg->aggregate) {
1270
			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1271

1272 1273
			/* Map receiver-address / traffic-ID to this queue */
			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1274

1275 1276
			/* enable aggregations for the queue */
			iwl_scd_txq_enable_agg(trans, txq_id);
1277
			txq->ampdu = true;
1278 1279 1280 1281 1282 1283 1284 1285
		} else {
			/*
			 * disable aggregations for the queue, this will also
			 * make the ra_tid mapping configuration irrelevant
			 * since it is now a non-AGG queue.
			 */
			iwl_scd_txq_disable_agg(trans, txq_id);

1286
			ssn = txq->q.read_ptr;
1287
		}
1288
	}
1289 1290 1291

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1292 1293
	txq->q.read_ptr = (ssn & 0xff);
	txq->q.write_ptr = (ssn & 0xff);
1294 1295
	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
1296

1297 1298
	if (cfg) {
		u8 frame_limit = cfg->frame_limit;
1299

1300 1301 1302 1303 1304 1305 1306
		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);

		/* Set up Tx window size and frame limit for this queue */
		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
		iwl_trans_write_mem32(trans,
			trans_pcie->scd_base_addr +
1307 1308
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1309
					SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1310
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1311 1312 1313 1314 1315 1316 1317 1318
					SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			       SCD_QUEUE_STTS_REG_MSK);
1319 1320

		/* enable the scheduler for this queue (only) */
1321 1322
		if (txq_id == trans_pcie->cmd_queue &&
		    trans_pcie->scd_set_active)
1323
			iwl_scd_enable_set_active(trans, BIT(txq_id));
1324 1325 1326 1327 1328 1329 1330 1331

		IWL_DEBUG_TX_QUEUES(trans,
				    "Activate queue %d on FIFO %d WrPtr: %d\n",
				    txq_id, fifo, ssn & 0xff);
	} else {
		IWL_DEBUG_TX_QUEUES(trans,
				    "Activate queue %d WrPtr: %d\n",
				    txq_id, ssn & 0xff);
1332 1333
	}

1334
	txq->active = true;
1335 1336
}

1337 1338 1339 1340 1341 1342 1343 1344 1345
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
					bool shared_mode)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];

	txq->ampdu = !shared_mode;
}

1346 1347
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
				bool configure_scd)
1348
{
1349
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1350 1351 1352
	u32 stts_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
	static const u32 zero_val[4] = {};
1353

1354 1355 1356
	trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
	trans_pcie->txq[txq_id].frozen = false;

1357 1358 1359 1360 1361 1362
	/*
	 * Upon HW Rfkill - we stop the device, and then stop the queues
	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
	 * allow the op_mode to call txq_disable after it already called
	 * stop_device.
	 */
1363
	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1364 1365
		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
			  "queue %d not used", txq_id);
1366
		return;
1367 1368
	}

1369 1370
	if (configure_scd) {
		iwl_scd_txq_set_inactive(trans, txq_id);
1371

1372 1373 1374
		iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
				    ARRAY_SIZE(zero_val));
	}
1375

1376
	iwl_pcie_txq_unmap(trans, txq_id);
1377
	trans_pcie->txq[txq_id].ampdu = false;
1378

1379
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1380 1381
}

1382 1383
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

1384
/*
1385
 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1386
 * @priv: device private data point
1387
 * @cmd: a pointer to the ucode command structure
1388
 *
1389 1390
 * The function returns < 0 values to indicate the operation
 * failed. On success, it returns the index (>= 0) of command in the
1391 1392
 * command queue.
 */
1393 1394
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
				 struct iwl_host_cmd *cmd)
1395
{
1396
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1397
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1398
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
1399 1400
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
1401
	unsigned long flags;
1402
	void *dup_buf = NULL;
1403
	dma_addr_t phys_addr;
1404
	int idx;
1405
	u16 copy_size, cmd_size, tb0_size;
1406
	bool had_nocopy = false;
1407
	u8 group_id = iwl_cmd_groupid(cmd->id);
1408
	int i, ret;
1409
	u32 cmd_pos;
1410 1411
	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1412

1413 1414
	if (WARN(!trans_pcie->wide_cmd_header &&
		 group_id > IWL_ALWAYS_LONG_GROUP,
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
		 "unsupported wide command %#x\n", cmd->id))
		return -EINVAL;

	if (group_id != 0) {
		copy_size = sizeof(struct iwl_cmd_header_wide);
		cmd_size = sizeof(struct iwl_cmd_header_wide);
	} else {
		copy_size = sizeof(struct iwl_cmd_header);
		cmd_size = sizeof(struct iwl_cmd_header);
	}
1425 1426

	/* need one for the header if the first is NOCOPY */
1427
	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1428

1429
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1430 1431 1432
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

1433 1434
		if (!cmd->len[i])
			continue;
1435

1436 1437 1438
		/* need at least IWL_FIRST_TB_SIZE copied */
		if (copy_size < IWL_FIRST_TB_SIZE) {
			int copy = IWL_FIRST_TB_SIZE - copy_size;
1439 1440 1441 1442 1443 1444 1445 1446

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

1447 1448
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
			/*
			 * This is also a chunk that isn't copied
			 * to the static buffer so set had_nocopy.
			 */
			had_nocopy = true;

			/* only allowed once */
			if (WARN_ON(dup_buf)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}

1466
			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1467 1468 1469
					  GFP_ATOMIC);
			if (!dup_buf)
				return -ENOMEM;
1470 1471
		} else {
			/* NOCOPY must not be followed by normal! */
1472 1473 1474 1475
			if (WARN_ON(had_nocopy)) {
				idx = -EINVAL;
				goto free_dup_buf;
			}
1476
			copy_size += cmdlen[i];
1477 1478 1479
		}
		cmd_size += cmd->len[i];
	}
1480

1481 1482
	/*
	 * If any of the command structures end up being larger than
1483 1484 1485
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
1486
	 */
1487 1488
	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
		 "Command %s (%#x) is too large (%d bytes)\n",
1489 1490
		 iwl_get_cmd_string(trans, cmd->id),
		 cmd->id, copy_size)) {
1491 1492 1493
		idx = -EINVAL;
		goto free_dup_buf;
	}
1494

1495
	spin_lock_bh(&txq->lock);
1496

J
Johannes Berg 已提交
1497
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1498
		spin_unlock_bh(&txq->lock);
1499

1500
		IWL_ERR(trans, "No space in command queue\n");
1501
		iwl_op_mode_cmd_queue_full(trans->op_mode);
1502 1503
		idx = -ENOSPC;
		goto free_dup_buf;
1504 1505
	}

1506
	idx = get_cmd_index(q, q->write_ptr);
1507 1508
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;
J
Johannes Berg 已提交
1509

1510
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
1511 1512
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
1513

1514
	/* set up the header */
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
	if (group_id != 0) {
		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
		out_cmd->hdr_wide.group_id = group_id;
		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
		out_cmd->hdr_wide.length =
			cpu_to_le16(cmd_size -
				    sizeof(struct iwl_cmd_header_wide));
		out_cmd->hdr_wide.reserved = 0;
		out_cmd->hdr_wide.sequence =
			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
						 INDEX_TO_SEQ(q->write_ptr));

		cmd_pos = sizeof(struct iwl_cmd_header_wide);
		copy_size = sizeof(struct iwl_cmd_header_wide);
	} else {
		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
		out_cmd->hdr.sequence =
			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
						 INDEX_TO_SEQ(q->write_ptr));
		out_cmd->hdr.group_id = 0;

		cmd_pos = sizeof(struct iwl_cmd_header);
		copy_size = sizeof(struct iwl_cmd_header);
	}
1539 1540

	/* and copy the data that needs to be copied */
1541
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1542
		int copy;
1543

1544
		if (!cmd->len[i])
1545
			continue;
1546 1547 1548

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1549
					   IWL_HCMD_DFL_DUP))) {
1550 1551 1552 1553 1554
			copy = cmd->len[i];

			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
			cmd_pos += copy;
			copy_size += copy;
1555 1556 1557 1558
			continue;
		}

		/*
1559 1560
		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
		 * in total (for bi-directional DMA), but copy up to what
1561 1562 1563 1564 1565 1566 1567 1568
		 * we can fit into the payload for debug dump purposes.
		 */
		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);

		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
		cmd_pos += copy;

		/* However, treat copy_size the proper way, we need it below */
1569 1570
		if (copy_size < IWL_FIRST_TB_SIZE) {
			copy = IWL_FIRST_TB_SIZE - copy_size;
1571 1572 1573 1574

			if (copy > cmd->len[i])
				copy = cmd->len[i];
			copy_size += copy;
1575
		}
1576 1577
	}

J
Johannes Berg 已提交
1578
	IWL_DEBUG_HC(trans,
1579
		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1580
		     iwl_get_cmd_string(trans, cmd->id),
1581 1582
		     group_id, out_cmd->hdr.cmd,
		     le16_to_cpu(out_cmd->hdr.sequence),
1583
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1584

1585 1586 1587
	/* start the TFD with the minimum copy bytes */
	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1588
	iwl_pcie_txq_build_tfd(trans, txq,
1589 1590
			       iwl_pcie_get_first_tb_dma(txq, idx),
			       tb0_size, true);
1591 1592

	/* map first command fragment, if any remains */
1593
	if (copy_size > tb0_size) {
1594
		phys_addr = dma_map_single(trans->dev,
1595 1596
					   ((u8 *)&out_cmd->hdr) + tb0_size,
					   copy_size - tb0_size,
1597 1598 1599 1600 1601 1602 1603
					   DMA_TO_DEVICE);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
			idx = -ENOMEM;
			goto out;
		}
1604

1605
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1606
				       copy_size - tb0_size, false);
J
Johannes Berg 已提交
1607 1608
	}

1609
	/* map the remaining (adjusted) nocopy/dup fragments */
1610
	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1611
		const void *data = cmddata[i];
1612

1613
		if (!cmdlen[i])
1614
			continue;
1615 1616
		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
					   IWL_HCMD_DFL_DUP)))
1617
			continue;
1618 1619 1620
		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
			data = dup_buf;
		phys_addr = dma_map_single(trans->dev, (void *)data,
1621
					   cmdlen[i], DMA_TO_DEVICE);
1622
		if (dma_mapping_error(trans->dev, phys_addr)) {
1623
			iwl_pcie_tfd_unmap(trans, out_meta,
1624
					   &txq->tfds[q->write_ptr]);
1625 1626 1627 1628
			idx = -ENOMEM;
			goto out;
		}

1629
		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1630
	}
R
Reinette Chatre 已提交
1631

J
Johannes Berg 已提交
1632 1633
	BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
		     sizeof(out_meta->flags) * BITS_PER_BYTE);
1634
	out_meta->flags = cmd->flags;
1635
	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1636
		kzfree(txq->entries[idx].free_buf);
1637
	txq->entries[idx].free_buf = dup_buf;
J
Johannes Berg 已提交
1638

1639
	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
R
Reinette Chatre 已提交
1640

1641
	/* start timer if queue currently empty */
1642 1643
	if (q->read_ptr == q->write_ptr && txq->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1644

1645
	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1646
	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1647 1648 1649 1650
	if (ret < 0) {
		idx = ret;
		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
		goto out;
1651 1652
	}

1653
	/* Increment and update queue's write index */
1654
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1655
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1656

1657 1658
	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);

J
Johannes Berg 已提交
1659
 out:
1660
	spin_unlock_bh(&txq->lock);
1661 1662 1663
 free_dup_buf:
	if (idx < 0)
		kfree(dup_buf);
1664
	return idx;
1665 1666
}

1667 1668
/*
 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1669 1670
 * @rxb: Rx buffer to reclaim
 */
1671
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1672
			    struct iwl_rx_cmd_buffer *rxb)
1673
{
Z
Zhu Yi 已提交
1674
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1675
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1676 1677
	u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id);
	u32 cmd_id;
1678 1679 1680
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
1681 1682
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
1683
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1684
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1685 1686 1687 1688

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
1689
	if (WARN(txq_id != trans_pcie->cmd_queue,
1690
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1691 1692 1693
		 txq_id, trans_pcie->cmd_queue, sequence,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
		 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1694
		iwl_print_hex_error(trans, pkt, 32);
1695
		return;
1696
	}
1697

1698
	spin_lock_bh(&txq->lock);
1699

1700
	cmd_index = get_cmd_index(&txq->q, index);
1701 1702
	cmd = txq->entries[cmd_index].cmd;
	meta = &txq->entries[cmd_index].meta;
1703
	cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
1704

1705
	iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
R
Reinette Chatre 已提交
1706

1707
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
1708
	if (meta->flags & CMD_WANT_SKB) {
1709
		struct page *p = rxb_steal_page(rxb);
1710 1711 1712

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1713
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1714
	}
1715

1716 1717 1718
	if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
		iwl_op_mode_async_cb(trans->op_mode, cmd);

1719
	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1720

J
Johannes Berg 已提交
1721
	if (!(meta->flags & CMD_ASYNC)) {
1722
		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1723 1724
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
1725
				 iwl_get_cmd_string(trans, cmd_id));
1726
		}
1727
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1728
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1729
			       iwl_get_cmd_string(trans, cmd_id));
1730
		wake_up(&trans_pcie->wait_command_queue);
1731
	}
1732

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
	if (meta->flags & CMD_MAKE_TRANS_IDLE) {
		IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
		set_bit(STATUS_TRANS_IDLE, &trans->status);
		wake_up(&trans_pcie->d0i3_waitq);
	}

	if (meta->flags & CMD_WAKE_UP_TRANS) {
		IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
		clear_bit(STATUS_TRANS_IDLE, &trans->status);
		wake_up(&trans_pcie->d0i3_waitq);
	}

Z
Zhu Yi 已提交
1747
	meta->flags = 0;
1748

1749
	spin_unlock_bh(&txq->lock);
1750
}
1751

1752
#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
1753

1754 1755
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
				    struct iwl_host_cmd *cmd)
1756 1757 1758 1759 1760 1761 1762
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;

1763
	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1764
	if (ret < 0) {
1765
		IWL_ERR(trans,
1766
			"Error sending %s: enqueue_hcmd failed: %d\n",
1767
			iwl_get_cmd_string(trans, cmd->id), ret);
1768 1769 1770 1771 1772
		return ret;
	}
	return 0;
}

1773 1774
static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
				   struct iwl_host_cmd *cmd)
1775
{
1776
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1777 1778 1779
	int cmd_idx;
	int ret;

1780
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1781
		       iwl_get_cmd_string(trans, cmd->id));
1782

1783 1784
	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
				  &trans->status),
1785
		 "Command %s: a command is already active!\n",
1786
		 iwl_get_cmd_string(trans, cmd->id)))
1787 1788
		return -EIO;

1789
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1790
		       iwl_get_cmd_string(trans, cmd->id));
1791

1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
	if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
		ret = wait_event_timeout(trans_pcie->d0i3_waitq,
				 pm_runtime_active(&trans_pcie->pci_dev->dev),
				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
		if (!ret) {
			IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
			return -ETIMEDOUT;
		}
	}

1802
	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1803 1804
	if (cmd_idx < 0) {
		ret = cmd_idx;
1805
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1806
		IWL_ERR(trans,
1807
			"Error sending %s: enqueue_hcmd failed: %d\n",
1808
			iwl_get_cmd_string(trans, cmd->id), ret);
1809 1810 1811
		return ret;
	}

1812 1813 1814 1815
	ret = wait_event_timeout(trans_pcie->wait_command_queue,
				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
					   &trans->status),
				 HOST_COMPLETE_TIMEOUT);
1816
	if (!ret) {
1817 1818
		struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
		struct iwl_queue *q = &txq->q;
1819

1820
		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1821
			iwl_get_cmd_string(trans, cmd->id),
1822
			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1823

1824 1825
		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
			q->read_ptr, q->write_ptr);
1826

1827
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1828
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1829
			       iwl_get_cmd_string(trans, cmd->id));
1830
		ret = -ETIMEDOUT;
1831

L
Liad Kaufman 已提交
1832
		iwl_force_nmi(trans);
1833
		iwl_trans_fw_error(trans);
1834

1835
		goto cancel;
1836 1837
	}

1838
	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1839
		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1840
			iwl_get_cmd_string(trans, cmd->id));
1841
		dump_stack();
1842 1843 1844 1845
		ret = -EIO;
		goto cancel;
	}

1846
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1847
	    test_bit(STATUS_RFKILL, &trans->status)) {
1848 1849 1850 1851 1852
		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
		ret = -ERFKILL;
		goto cancel;
	}

1853
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1854
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1855
			iwl_get_cmd_string(trans, cmd->id));
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1870 1871
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1872
	}
1873

1874 1875 1876
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1877 1878 1879 1880 1881
	}

	return ret;
}

1882
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1883
{
1884
	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1885
	    test_bit(STATUS_RFKILL, &trans->status)) {
1886 1887
		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
				  cmd->id);
1888
		return -ERFKILL;
1889
	}
1890

1891
	if (cmd->flags & CMD_ASYNC)
1892
		return iwl_pcie_send_hcmd_async(trans, cmd);
1893

1894
	/* We still can fail on RFKILL that can be asserted while we wait */
1895
	return iwl_pcie_send_hcmd_sync(trans, cmd);
1896 1897
}

1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950
static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
			     struct iwl_txq *txq, u8 hdr_len,
			     struct iwl_cmd_meta *out_meta,
			     struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
	struct iwl_queue *q = &txq->q;
	u16 tb2_len;
	int i;

	/*
	 * Set up TFD's third entry to point directly to remainder
	 * of skb's head, if any
	 */
	tb2_len = skb_headlen(skb) - hdr_len;

	if (tb2_len > 0) {
		dma_addr_t tb2_phys = dma_map_single(trans->dev,
						     skb->data + hdr_len,
						     tb2_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
			return -EINVAL;
		}
		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
	}

	/* set up the remaining entries to point to the data */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		dma_addr_t tb_phys;
		int tb_idx;

		if (!skb_frag_size(frag))
			continue;

		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
					   skb_frag_size(frag), DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
			iwl_pcie_tfd_unmap(trans, out_meta,
					   &txq->tfds[q->write_ptr]);
			return -EINVAL;
		}
		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
						skb_frag_size(frag), false);

		out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
	}

	trace_iwlwifi_dev_tx(trans->dev, skb,
			     &txq->tfds[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
1951
			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
1952 1953 1954 1955 1956 1957
			     skb->data + hdr_len, tb2_len);
	trace_iwlwifi_dev_tx_data(trans->dev, skb,
				  hdr_len, skb->len - hdr_len);
	return 0;
}

1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
#ifdef CONFIG_INET
static struct iwl_tso_hdr_page *
get_page_hdr(struct iwl_trans *trans, size_t len)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);

	if (!p->page)
		goto alloc;

	/* enough room on this page */
	if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
		return p;

	/* We don't have enough room on this page, get a new one. */
	__free_page(p->page);

alloc:
	p->page = alloc_page(GFP_ATOMIC);
	if (!p->page)
		return NULL;
	p->pos = page_address(p->page);
	return p;
}

static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
					bool ipv6, unsigned int len)
{
	if (ipv6) {
		struct ipv6hdr *iphv6 = iph;

		tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
					       len + tcph->doff * 4,
					       IPPROTO_TCP, 0);
	} else {
		struct iphdr *iphv4 = iph;

		ip_send_check(iphv4);
		tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
						 len + tcph->doff * 4,
						 IPPROTO_TCP, 0);
	}
}

static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
				   struct iwl_txq *txq, u8 hdr_len,
				   struct iwl_cmd_meta *out_meta,
				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
	unsigned int mss = skb_shinfo(skb)->gso_size;
	struct iwl_queue *q = &txq->q;
	u16 length, iv_len, amsdu_pad;
	u8 *start_hdr;
	struct iwl_tso_hdr_page *hdr_page;
2015
	struct page **page_ptr;
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
	int ret;
	struct tso_t tso;

	/* if the packet is protected, then it must be CCMP or GCMP */
	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
	iv_len = ieee80211_has_protected(hdr->frame_control) ?
		IEEE80211_CCMP_HDR_LEN : 0;

	trace_iwlwifi_dev_tx(trans->dev, skb,
			     &txq->tfds[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
2027
			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
			     NULL, 0);

	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
	amsdu_pad = 0;

	/* total amount of header we may need for this A-MSDU */
	hdr_room = DIV_ROUND_UP(total_len, mss) *
		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;

	/* Our device supports 9 segments at most, it will fit in 1 page */
	hdr_page = get_page_hdr(trans, hdr_room);
	if (!hdr_page)
		return -ENOMEM;

	get_page(hdr_page->page);
	start_hdr = hdr_page->pos;
2046 2047
	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
	*page_ptr = hdr_page->page;
2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
	hdr_page->pos += iv_len;

	/*
	 * Pull the ieee80211 header + IV to be able to use TSO core,
	 * we will restore it for the tx_status flow.
	 */
	skb_pull(skb, hdr_len + iv_len);

	tso_start(skb, &tso);

	while (total_len) {
		/* this is the data left for this subframe */
		unsigned int data_left =
			min_t(unsigned int, mss, total_len);
		struct sk_buff *csum_skb = NULL;
		unsigned int hdr_tb_len;
		dma_addr_t hdr_tb_phys;
		struct tcphdr *tcph;
		u8 *iph;

		total_len -= data_left;

		memset(hdr_page->pos, 0, amsdu_pad);
		hdr_page->pos += amsdu_pad;
		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
				  data_left)) & 0x3;
		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
		hdr_page->pos += ETH_ALEN;
		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
		hdr_page->pos += ETH_ALEN;

		length = snap_ip_tcp_hdrlen + data_left;
		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
		hdr_page->pos += sizeof(length);

		/*
		 * This will copy the SNAP as well which will be considered
		 * as MAC header.
		 */
		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
		iph = hdr_page->pos + 8;
		tcph = (void *)(iph + ip_hdrlen);

		/* For testing on current hardware only */
		if (trans_pcie->sw_csum_tx) {
			csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
					     GFP_ATOMIC);
			if (!csum_skb) {
				ret = -ENOMEM;
				goto out_unmap;
			}

			iwl_compute_pseudo_hdr_csum(iph, tcph,
						    skb->protocol ==
							htons(ETH_P_IPV6),
						    data_left);

			memcpy(skb_put(csum_skb, tcp_hdrlen(skb)),
			       tcph, tcp_hdrlen(skb));
			skb_set_transport_header(csum_skb, 0);
			csum_skb->csum_start =
				(unsigned char *)tcp_hdr(csum_skb) -
						 csum_skb->head;
		}

		hdr_page->pos += snap_ip_tcp_hdrlen;

		hdr_tb_len = hdr_page->pos - start_hdr;
		hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
					     hdr_tb_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
			dev_kfree_skb(csum_skb);
			ret = -EINVAL;
			goto out_unmap;
		}
		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
				       hdr_tb_len, false);
		trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
					       hdr_tb_len);

		/* prepare the start_hdr for the next subframe */
		start_hdr = hdr_page->pos;

		/* put the payload */
		while (data_left) {
			unsigned int size = min_t(unsigned int, tso.size,
						  data_left);
			dma_addr_t tb_phys;

			if (trans_pcie->sw_csum_tx)
				memcpy(skb_put(csum_skb, size), tso.data, size);

			tb_phys = dma_map_single(trans->dev, tso.data,
						 size, DMA_TO_DEVICE);
			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
				dev_kfree_skb(csum_skb);
				ret = -EINVAL;
				goto out_unmap;
			}

			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
					       size, false);
			trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
						       size);

			data_left -= size;
			tso_build_data(skb, &tso, size);
		}

		/* For testing on early hardware only */
		if (trans_pcie->sw_csum_tx) {
			__wsum csum;

			csum = skb_checksum(csum_skb,
					    skb_checksum_start_offset(csum_skb),
					    csum_skb->len -
					    skb_checksum_start_offset(csum_skb),
					    0);
			dev_kfree_skb(csum_skb);
			dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
						hdr_tb_len, DMA_TO_DEVICE);
			tcph->check = csum_fold(csum);
			dma_sync_single_for_device(trans->dev, hdr_tb_phys,
						   hdr_tb_len, DMA_TO_DEVICE);
		}
	}

	/* re -add the WiFi header and IV */
	skb_push(skb, hdr_len + iv_len);

	return 0;

out_unmap:
	iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
	return ret;
}
#else /* CONFIG_INET */
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
				   struct iwl_txq *txq, u8 hdr_len,
				   struct iwl_cmd_meta *out_meta,
				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
	/* No A-MSDU without CONFIG_INET */
	WARN_ON(1);

	return -1;
}
#endif /* CONFIG_INET */

2198 2199
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id)
2200
{
2201
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
J
Johannes Berg 已提交
2202
	struct ieee80211_hdr *hdr;
2203 2204 2205 2206
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	struct iwl_cmd_meta *out_meta;
	struct iwl_txq *txq;
	struct iwl_queue *q;
2207 2208
	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
	void *tb1_addr;
2209
	u16 len, tb1_len;
2210
	bool wait_write_ptr;
J
Johannes Berg 已提交
2211 2212
	__le16 fc;
	u8 hdr_len;
2213
	u16 wifi_seq;
2214
	bool amsdu;
2215 2216 2217

	txq = &trans_pcie->txq[txq_id];
	q = &txq->q;
2218

2219 2220
	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
		      "TX on unused queue %d\n", txq_id))
2221
		return -EINVAL;
2222

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
	if (unlikely(trans_pcie->sw_csum_tx &&
		     skb->ip_summed == CHECKSUM_PARTIAL)) {
		int offs = skb_checksum_start_offset(skb);
		int csum_offs = offs + skb->csum_offset;
		__wsum csum;

		if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
			return -1;

		csum = skb_checksum(skb, offs, skb->len - offs, 0);
		*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
2234 2235

		skb->ip_summed = CHECKSUM_UNNECESSARY;
2236 2237
	}

J
Johannes Berg 已提交
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
	if (skb_is_nonlinear(skb) &&
	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
	    __skb_linearize(skb))
		return -ENOMEM;

	/* mac80211 always puts the full header into the SKB's head,
	 * so there's no need to check if it's readable there
	 */
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
	hdr_len = ieee80211_hdrlen(fc);

2250
	spin_lock(&txq->lock);
2251

2252 2253 2254 2255 2256
	if (iwl_queue_space(q) < q->high_mark) {
		iwl_stop_queue(trans, txq);

		/* don't put the packet on the ring, if there is no room */
		if (unlikely(iwl_queue_space(q) < 3)) {
2257 2258 2259 2260
			struct iwl_device_cmd **dev_cmd_ptr;

			dev_cmd_ptr = (void *)((u8 *)skb->cb +
					       trans_pcie->dev_cmd_offs);
2261

2262
			*dev_cmd_ptr = dev_cmd;
2263 2264 2265 2266 2267 2268 2269
			__skb_queue_tail(&txq->overflow_q, skb);

			spin_unlock(&txq->lock);
			return 0;
		}
	}

2270 2271 2272 2273 2274
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
2275
	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2276
	WARN_ONCE(txq->ampdu &&
2277
		  (wifi_seq & 0xff) != q->write_ptr,
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);

	/* Set up driver data for this TFD */
	txq->entries[q->write_ptr].skb = skb;
	txq->entries[q->write_ptr].cmd = dev_cmd;

	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
			    INDEX_TO_SEQ(q->write_ptr)));

2289
	tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
2290 2291 2292 2293 2294 2295
	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
		       offsetof(struct iwl_tx_cmd, scratch);

	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

2296 2297
	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->entries[q->write_ptr].meta;
J
Johannes Berg 已提交
2298
	out_meta->flags = 0;
2299

2300
	/*
2301 2302 2303 2304
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
2305
	 */
2306
	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
2307
	      hdr_len - IWL_FIRST_TB_SIZE;
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
	/* do not align A-MSDU to dword as the subframe header aligns it */
	amsdu = ieee80211_is_data_qos(fc) &&
		(*ieee80211_get_qos_ctl(hdr) &
		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
	if (trans_pcie->sw_csum_tx || !amsdu) {
		tb1_len = ALIGN(len, 4);
		/* Tell NIC about any 2-byte padding after MAC header */
		if (tb1_len != len)
			tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
	} else {
		tb1_len = len;
	}
2320

2321 2322 2323
	/* The first TB points to bi-directional DMA data */
	memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
	       IWL_FIRST_TB_SIZE);
2324
	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2325
			       IWL_FIRST_TB_SIZE, true);
2326

2327
	/* there must be data left over for TB1 or this code must be changed */
2328
	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
2329 2330

	/* map the data for TB1 */
2331
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2332 2333 2334
	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
		goto out_err;
2335
	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2336

2337
	if (amsdu) {
2338 2339 2340 2341 2342 2343
		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
						     out_meta, dev_cmd,
						     tb1_len)))
			goto out_err;
	} else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
				       out_meta, dev_cmd, tb1_len))) {
2344
		goto out_err;
2345
	}
J
Johannes Berg 已提交
2346

2347 2348
	/* Set up entry for this TFD in Tx byte-count array */
	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
2349

2350
	wait_write_ptr = ieee80211_has_morefrags(fc);
2351

2352
	/* start timer if queue currently empty */
2353
	if (q->read_ptr == q->write_ptr) {
2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366
		if (txq->wd_timeout) {
			/*
			 * If the TXQ is active, then set the timer, if not,
			 * set the timer in remainder so that the timer will
			 * be armed with the right value when the station will
			 * wake up.
			 */
			if (!txq->frozen)
				mod_timer(&txq->stuck_timer,
					  jiffies + txq->wd_timeout);
			else
				txq->frozen_expiry_remainder = txq->wd_timeout;
		}
2367
		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
2368
		iwl_trans_ref(trans);
2369
	}
2370 2371

	/* Tell device the write index *just past* this latest filled TFD */
2372
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
2373 2374
	if (!wait_write_ptr)
		iwl_pcie_txq_inc_wr_ptr(trans, txq);
2375 2376 2377

	/*
	 * At this point the frame is "transmitted" successfully
2378
	 * and we will get a TX status notification eventually.
2379 2380 2381 2382 2383 2384
	 */
	spin_unlock(&txq->lock);
	return 0;
out_err:
	spin_unlock(&txq->lock);
	return -1;
2385
}