trans.c 56.1 KB
Newer Older
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
W
Wey-Yi Guy 已提交
8
 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
W
Wey-Yi Guy 已提交
33
 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
63 64
#include <linux/pci.h>
#include <linux/pci-aspm.h>
65
#include <linux/interrupt.h>
66
#include <linux/debugfs.h>
67
#include <linux/sched.h>
68 69
#include <linux/bitops.h>
#include <linux/gfp.h>
70

71
#include "iwl-drv.h"
72
#include "iwl-trans.h"
73 74
#include "iwl-csr.h"
#include "iwl-prph.h"
75
#include "iwl-agn-hw.h"
76
#include "internal.h"
77
/* FIXME: need to abstract out TX command (once we know what it looks like) */
78
#include "dvm/commands.h"
79

80
#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie)	\
81
	(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
82 83
	(~(1<<(trans_pcie)->cmd_queue)))

84 85
static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
				struct iwl_dma_ptr *ptr, size_t size)
86 87 88 89
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

90
	ptr->addr = dma_alloc_coherent(trans->dev, size,
91 92 93 94 95 96 97
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

98 99
static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
				struct iwl_dma_ptr *ptr)
100 101 102 103
{
	if (unlikely(!ptr->addr))
		return;

104
	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
105 106 107
	memset(ptr, 0, sizeof(*ptr));
}

108 109
static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
{
110
	struct iwl_txq *txq = (void *)data;
111
	struct iwl_queue *q = &txq->q;
112 113
	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
114
	u32 scd_sram_addr = trans_pcie->scd_base_addr +
115
				SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
116 117
	u8 buf[16];
	int i;
118 119 120 121 122 123 124 125 126 127 128 129 130 131

	spin_lock(&txq->lock);
	/* check if triggered erroneously */
	if (txq->q.read_ptr == txq->q.write_ptr) {
		spin_unlock(&txq->lock);
		return;
	}
	spin_unlock(&txq->lock);

	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
		jiffies_to_msecs(trans_pcie->wd_timeout));
	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);

132 133 134 135 136 137 138 139
	iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
			iwl_read_direct32(trans, FH_TX_TRB_REG(i)));

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
			iwl_read_targ_mem(trans,
					  trans_pcie->scd_base_addr +
					  SCD_TRANS_TBL_OFFSET_QUEUE(i));

		if (i & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			i, active ? "" : "in", fifo, tbl_dw,
			iwl_read_prph(trans,
				      SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
	}
161

162 163 164 165 166 167 168 169
	for (i = q->read_ptr; i != q->write_ptr;
	     i = iwl_queue_inc_wrap(i, q->n_bd)) {
		struct iwl_tx_cmd *tx_cmd =
			(struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
			get_unaligned_le32(&tx_cmd->scratch));
	}

170 171 172
	iwl_op_mode_nic_error(trans->op_mode);
}

173
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
174
			       struct iwl_txq *txq, int slots_num,
175
			       u32 txq_id)
176
{
177
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
178
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
179 180
	int i;

181
	if (WARN_ON(txq->entries || txq->tfds))
182 183
		return -EINVAL;

184 185 186 187
	setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
		    (unsigned long)txq);
	txq->trans_pcie = trans_pcie;

188 189
	txq->q.n_window = slots_num;

190
	txq->entries = kcalloc(slots_num,
191
			       sizeof(struct iwl_pcie_txq_entry),
192
			       GFP_KERNEL);
193

194
	if (!txq->entries)
195 196
		goto error;

197
	if (txq_id == trans_pcie->cmd_queue)
198
		for (i = 0; i < slots_num; i++) {
199 200 201 202
			txq->entries[i].cmd =
				kmalloc(sizeof(struct iwl_device_cmd),
					GFP_KERNEL);
			if (!txq->entries[i].cmd)
203 204
				goto error;
		}
205 206 207

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
208
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
209
				       &txq->q.dma_addr, GFP_KERNEL);
210
	if (!txq->tfds) {
211
		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
212 213 214 215 216 217
		goto error;
	}
	txq->q.id = txq_id;

	return 0;
error:
218
	if (txq->entries && txq_id == trans_pcie->cmd_queue)
219
		for (i = 0; i < slots_num; i++)
220 221 222
			kfree(txq->entries[i].cmd);
	kfree(txq->entries);
	txq->entries = NULL;
223 224 225 226 227

	return -ENOMEM;

}

228
static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
229
			      int slots_num, u32 txq_id)
230 231 232 233 234 235 236 237 238 239
{
	int ret;

	txq->need_update = 0;

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
240
	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
241 242 243 244
			txq_id);
	if (ret)
		return ret;

245 246
	spin_lock_init(&txq->lock);

247 248 249 250
	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
251
	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
252 253 254 255 256
			     txq->q.dma_addr >> 8);

	return 0;
}

257
/*
258
 * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
259
 */
260
void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
261
{
262
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
263
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
264
	struct iwl_queue *q = &txq->q;
265
	enum dma_data_direction dma_dir;
266 267 268 269

	if (!q->n_bd)
		return;

270 271 272
	/* In the command queue, all the TBs are mapped as BIDI
	 * so unmap them as such.
	 */
273
	if (txq_id == trans_pcie->cmd_queue)
274
		dma_dir = DMA_BIDIRECTIONAL;
275
	else
276 277
		dma_dir = DMA_TO_DEVICE;

278
	spin_lock_bh(&txq->lock);
279
	while (q->write_ptr != q->read_ptr) {
280
		iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
281 282
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
283
	spin_unlock_bh(&txq->lock);
284 285
}

286 287
/*
 * iwl_txq_free - Deallocate DMA queue.
288 289 290 291 292 293
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
294
static void iwl_txq_free(struct iwl_trans *trans, int txq_id)
295
{
296
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
297
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
298
	struct device *dev = trans->dev;
299
	int i;
300

301 302 303
	if (WARN_ON(!txq))
		return;

304
	iwl_pcie_txq_unmap(trans, txq_id);
305 306

	/* De-alloc array of command/tx buffers */
307
	if (txq_id == trans_pcie->cmd_queue)
308
		for (i = 0; i < txq->q.n_window; i++) {
309
			kfree(txq->entries[i].cmd);
310
			kfree(txq->entries[i].copy_cmd);
311
			kfree(txq->entries[i].free_buf);
312
		}
313 314 315

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd) {
316
		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
317 318 319 320
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
	}

321 322
	kfree(txq->entries);
	txq->entries = NULL;
323

324 325
	del_timer_sync(&txq->stuck_timer);

326 327 328 329
	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

330
/*
331 332 333 334
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
335
static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
336 337
{
	int txq_id;
338
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
339 340

	/* Tx queues */
341
	if (trans_pcie->txq) {
342
		for (txq_id = 0;
343
		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
344
			iwl_txq_free(trans, txq_id);
345 346
	}

347 348
	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;
349

350
	iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
351

352
	iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
353 354
}

355
/*
356 357 358
 * iwl_trans_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 */
359
static int iwl_trans_tx_alloc(struct iwl_trans *trans)
360 361 362
{
	int ret;
	int txq_id, slots_num;
363
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
364

365
	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
366 367
			sizeof(struct iwlagn_scd_bc_tbl);

368 369
	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
370
	if (WARN_ON(trans_pcie->txq)) {
371 372 373 374
		ret = -EINVAL;
		goto error;
	}

375
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
376
				   scd_bc_tbls_size);
377
	if (ret) {
378
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
379 380 381 382
		goto error;
	}

	/* Alloc keep-warm buffer */
383
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
384
	if (ret) {
385
		IWL_ERR(trans, "Keep Warm allocation failed\n");
386 387 388
		goto error;
	}

389
	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
390
				  sizeof(struct iwl_txq), GFP_KERNEL);
391
	if (!trans_pcie->txq) {
392
		IWL_ERR(trans, "Not enough memory for txq\n");
393 394 395 396 397
		ret = ENOMEM;
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
398
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
399
	     txq_id++) {
W
Wey-Yi Guy 已提交
400
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
401
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
402 403
		ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
404
		if (ret) {
405
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
406 407 408 409 410 411 412
			goto error;
		}
	}

	return 0;

error:
413
	iwl_trans_pcie_tx_free(trans);
414 415 416

	return ret;
}
417
static int iwl_tx_init(struct iwl_trans *trans)
418
{
419
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
420 421 422 423 424
	int ret;
	int txq_id, slots_num;
	unsigned long flags;
	bool alloc = false;

425
	if (!trans_pcie->txq) {
426
		ret = iwl_trans_tx_alloc(trans);
427 428 429 430 431
		if (ret)
			goto error;
		alloc = true;
	}

J
Johannes Berg 已提交
432
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
433 434

	/* Turn off all Tx DMA fifos */
435
	iwl_write_prph(trans, SCD_TXFACT, 0);
436 437

	/* Tell NIC where to find the "keep warm" buffer */
438
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
439
			   trans_pcie->kw.dma >> 4);
440

J
Johannes Berg 已提交
441
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
442 443

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
444
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
445
	     txq_id++) {
W
Wey-Yi Guy 已提交
446
		slots_num = (txq_id == trans_pcie->cmd_queue) ?
447
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
448 449
		ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
450
		if (ret) {
451
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
452 453 454 455 456 457 458 459
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
460
		iwl_trans_pcie_tx_free(trans);
461 462 463
	return ret;
}

464
static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
465 466 467 468 469 470
{
/*
 * (for documentation purposes)
 * to set power to V_AUX, do:

		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
471
			iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
472 473 474 475
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
 */

476
	iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
477 478 479 480
			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
			       ~APMG_PS_CTRL_MSK_PWR_SRC);
}

E
Emmanuel Grumbach 已提交
481 482 483 484 485
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT	0x041
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
#define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02

486
static void iwl_pcie_apm_config(struct iwl_trans *trans)
E
Emmanuel Grumbach 已提交
487
{
488
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
489
	u16 lctl;
E
Emmanuel Grumbach 已提交
490 491 492 493 494 495 496 497 498

	/*
	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
	 * If so (likely), disable L0S, so device moves directly L0->L1;
	 *    costs negligible amount of power savings.
	 * If not (unlikely), enable L0S, so there is at least some
	 *    power savings, even without L1.
	 */
499
	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
E
Emmanuel Grumbach 已提交
500 501 502 503 504 505 506 507 508 509 510 511 512

	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
				PCI_CFG_LINK_CTRL_VAL_L1_EN) {
		/* L1-ASPM enabled; disable(!) L0S */
		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
		dev_printk(KERN_INFO, trans->dev,
			   "L1 Enabled; Disabling L0S\n");
	} else {
		/* L1-ASPM disabled; enable(!) L0S */
		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
		dev_printk(KERN_INFO, trans->dev,
			   "L1 Disabled; Enabling L0S\n");
	}
513
	trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
E
Emmanuel Grumbach 已提交
514 515
}

516 517
/*
 * Start up NIC's basic functionality after it has been reset
518
 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
519 520
 * NOTE:  This does not load uCode nor start the embedded processor
 */
521
static int iwl_pcie_apm_init(struct iwl_trans *trans)
522
{
D
Don Fry 已提交
523
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
524 525 526 527 528 529 530 531 532 533
	int ret = 0;
	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");

	/*
	 * Use "set_bit" below rather than "write", to preserve any hardware
	 * bits already set by default after reset.
	 */

	/* Disable L0S exit timer (platform NMI Work/Around) */
	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
534
		    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
535 536 537 538 539 540

	/*
	 * Disable L0s without affecting L1;
	 *  don't wait for ICH L0s (ICH bug W/A)
	 */
	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
541
		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
542 543 544 545 546 547 548 549 550

	/* Set FH wait threshold to maximum (HW error during stress W/A) */
	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);

	/*
	 * Enable HAP INTA (interrupt from management bus) to
	 * wake device's PCI Express link L1a -> L0s
	 */
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
551
		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
552

553
	iwl_pcie_apm_config(trans);
554 555

	/* Configure analog phase-lock-loop before activating to D0A */
556
	if (trans->cfg->base_params->pll_cfg_val)
557
		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
558
			    trans->cfg->base_params->pll_cfg_val);
559 560 561 562 563 564 565 566 567 568 569 570 571

	/*
	 * Set "initialization complete" bit to move adapter from
	 * D0U* --> D0A* (powered-up active) state.
	 */
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/*
	 * Wait for clock stabilization; once stabilized, access to
	 * device-internal resources is supported, e.g. iwl_write_prph()
	 * and accesses to uCode SRAM.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
572 573
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	if (ret < 0) {
		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
		goto out;
	}

	/*
	 * Enable DMA clock and wait for it to stabilize.
	 *
	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
	 * do not disable clocks.  This preserves any hardware bits already
	 * set by default in "CLK_CTRL_REG" after reset.
	 */
	iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
	udelay(20);

	/* Disable L1-Active */
	iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);

D
Don Fry 已提交
593
	set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
594 595 596 597 598

out:
	return ret;
}

599
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
600 601 602 603 604 605 606
{
	int ret = 0;

	/* stop device's busmaster DMA activity */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);

	ret = iwl_poll_bit(trans, CSR_RESET,
607 608
			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
609 610 611 612 613 614 615 616
	if (ret)
		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");

	IWL_DEBUG_INFO(trans, "stop master\n");

	return ret;
}

617
static void iwl_pcie_apm_stop(struct iwl_trans *trans)
618
{
D
Don Fry 已提交
619
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
620 621
	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");

D
Don Fry 已提交
622
	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
623 624

	/* Stop device's DMA activity */
625
	iwl_pcie_apm_stop_master(trans);
626 627 628 629 630 631 632 633 634 635 636 637 638 639

	/* Reset the entire device */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);

	udelay(10);

	/*
	 * Clear "initialization complete" bit to move adapter from
	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
	 */
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}

640
static int iwl_pcie_nic_init(struct iwl_trans *trans)
641
{
J
Johannes Berg 已提交
642
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
643 644 645
	unsigned long flags;

	/* nic_init */
J
Johannes Berg 已提交
646
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
647
	iwl_pcie_apm_init(trans);
648 649

	/* Set interrupt coalescing calibration timer to default (512 usecs) */
650
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
651

J
Johannes Berg 已提交
652
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
653

654
	iwl_pcie_set_pwr_vmain(trans);
655

J
Johannes Berg 已提交
656
	iwl_op_mode_nic_config(trans->op_mode);
657 658

	/* Allocate the RX queue, or reset if it is already allocated */
659
	iwl_pcie_rx_init(trans);
660 661

	/* Allocate or reset and init all Tx and Command queues */
662
	if (iwl_tx_init(trans))
663 664
		return -ENOMEM;

665
	if (trans->cfg->base_params->shadow_reg_enable) {
666
		/* enable shadow regs in HW */
667
		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
668
		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
669 670 671 672 673 674 675 676
	}

	return 0;
}

#define HW_READY_TIMEOUT (50)

/* Note: returns poll_bit return value, which is >= 0 if success */
677
static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
678 679 680
{
	int ret;

681
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
682
		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
683 684

	/* See if we got it */
685
	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
686 687 688
			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
			   HW_READY_TIMEOUT);
689

690
	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
691 692 693 694
	return ret;
}

/* Note: returns standard 0/-ERROR code */
695
static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
696 697
{
	int ret;
698
	int t = 0;
699

700
	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
701

702
	ret = iwl_pcie_set_hw_ready(trans);
703
	/* If the card is ready, exit 0 */
704 705 706 707
	if (ret >= 0)
		return 0;

	/* If HW is not ready, prepare the conditions to check again */
708
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
709
		    CSR_HW_IF_CONFIG_REG_PREPARE);
710

711
	do {
712
		ret = iwl_pcie_set_hw_ready(trans);
713 714
		if (ret >= 0)
			return 0;
715

716 717 718
		usleep_range(200, 1000);
		t += 200;
	} while (t < 150000);
719 720 721 722

	return ret;
}

723 724 725
/*
 * ucode
 */
726
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
J
Johannes Berg 已提交
727
				   dma_addr_t phy_addr, u32 byte_cnt)
728
{
729
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
730 731
	int ret;

732
	trans_pcie->ucode_write_complete = false;
733 734

	iwl_write_direct32(trans,
735 736
			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
737 738

	iwl_write_direct32(trans,
739 740
			   FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
			   dst_addr);
741 742

	iwl_write_direct32(trans,
J
Johannes Berg 已提交
743 744
			   FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
			   phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
745 746

	iwl_write_direct32(trans,
747 748 749
			   FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
			   (iwl_get_dma_hi_addr(phy_addr)
				<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
750 751

	iwl_write_direct32(trans,
752 753 754 755
			   FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
			   FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
756 757

	iwl_write_direct32(trans,
758 759 760 761
			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
			   FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
762

763 764
	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
				 trans_pcie->ucode_write_complete, 5 * HZ);
765
	if (!ret) {
J
Johannes Berg 已提交
766
		IWL_ERR(trans, "Failed to load firmware chunk!\n");
767 768 769 770 771 772
		return -ETIMEDOUT;
	}

	return 0;
}

773
static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
J
Johannes Berg 已提交
774
			    const struct fw_desc *section)
775
{
J
Johannes Berg 已提交
776 777 778
	u8 *v_addr;
	dma_addr_t p_addr;
	u32 offset;
779 780
	int ret = 0;

J
Johannes Berg 已提交
781 782 783 784 785 786 787 788 789 790 791
	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
		     section_num);

	v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
	if (!v_addr)
		return -ENOMEM;

	for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
		u32 copy_size;

		copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
792

J
Johannes Berg 已提交
793
		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
794 795 796
		ret = iwl_pcie_load_firmware_chunk(trans,
						   section->offset + offset,
						   p_addr, copy_size);
J
Johannes Berg 已提交
797 798 799 800 801
		if (ret) {
			IWL_ERR(trans,
				"Could not load the [%d] uCode section\n",
				section_num);
			break;
D
David Spinadel 已提交
802
		}
J
Johannes Berg 已提交
803 804 805 806 807 808
	}

	dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
	return ret;
}

809
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
810
				const struct fw_img *image)
811
{
812
	int i, ret = 0;
813

814
	for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
J
Johannes Berg 已提交
815
		if (!image->sec[i].data)
816
			break;
817

818
		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
819 820 821
		if (ret)
			return ret;
	}
822 823 824 825 826 827 828

	/* Remove all resets to allow NIC to operate */
	iwl_write32(trans, CSR_RESET, 0);

	return 0;
}

829 830
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
				   const struct fw_img *fw)
831
{
832
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
833
	int ret;
834
	bool hw_rfkill;
835

836
	/* This may fail if AMT took ownership of the device */
837
	if (iwl_pcie_prepare_card_hw(trans)) {
838
		IWL_WARN(trans, "Exit HW not ready\n");
839 840 841
		return -EIO;
	}

842 843
	clear_bit(STATUS_FW_ERROR, &trans_pcie->status);

844 845
	iwl_enable_rfkill_int(trans);

846
	/* If platform's RF_KILL switch is NOT set to KILL */
847
	hw_rfkill = iwl_is_rfkill_set(trans);
848
	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
849
	if (hw_rfkill)
850 851
		return -ERFKILL;

852
	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
853

854
	ret = iwl_pcie_nic_init(trans);
855
	if (ret) {
856
		IWL_ERR(trans, "Unable to init nic\n");
857 858 859 860
		return ret;
	}

	/* make sure rfkill handshake bits are cleared */
861 862
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
863 864 865
		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

	/* clear (again), then enable host interrupts */
866
	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
867
	iwl_enable_interrupts(trans);
868 869

	/* really make sure rfkill handshake bits are cleared */
870 871
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
872

873
	/* Load the given image to the HW */
874
	return iwl_pcie_load_given_ucode(trans, fw);
875 876
}

877 878 879
/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
 */
880
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
881
{
J
Johannes Berg 已提交
882 883 884
	struct iwl_trans_pcie __maybe_unused *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

885
	iwl_write_prph(trans, SCD_TXFACT, mask);
886 887
}

888
static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
889
{
890
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
891
	u32 a;
892
	int chan;
893 894
	u32 reg_val;

895 896 897 898
	/* make sure all queue are not stopped/used */
	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));

899
	trans_pcie->scd_base_addr =
900
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
901 902 903 904

	WARN_ON(scd_base_addr != 0 &&
		scd_base_addr != trans_pcie->scd_base_addr);

905
	a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
906
	/* reset conext data memory */
907
	for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
908
		a += 4)
909
		iwl_write_targ_mem(trans, a, 0);
910
	/* reset tx status memory */
911
	for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
912
		a += 4)
913
		iwl_write_targ_mem(trans, a, 0);
914
	for (; a < trans_pcie->scd_base_addr +
915
	       SCD_TRANS_TBL_OFFSET_QUEUE(
916
				trans->cfg->base_params->num_of_queues);
917
	       a += 4)
918
		iwl_write_targ_mem(trans, a, 0);
919

920
	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
921
		       trans_pcie->scd_bc_tbls.dma >> 10);
922

923 924 925 926 927
	/* The chain extension of the SCD doesn't work well. This feature is
	 * enabled by default by the HW, so we need to disable it manually.
	 */
	iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);

928 929
	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
				trans_pcie->cmd_fifo);
930

931 932 933
	/* Activate all Tx DMA/FIFO channels */
	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));

934 935
	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
936
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
937 938
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
939 940

	/* Update FH chicken bits */
941 942
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
943 944 945
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	/* Enable L1-Active */
946
	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
947
			    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
948 949
}

950
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
951
{
952
	iwl_pcie_reset_ict(trans);
953
	iwl_tx_start(trans, scd_addr);
954 955
}

956
/*
957 958
 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
 */
959
static int iwl_trans_tx_stop(struct iwl_trans *trans)
960
{
961
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
962
	int ch, txq_id, ret;
963 964 965
	unsigned long flags;

	/* Turn off all Tx DMA fifos */
J
Johannes Berg 已提交
966
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
967

968
	iwl_trans_txq_set_sched(trans, 0);
969 970

	/* Stop each Tx DMA channel, and wait for it to be idle */
971
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
972
		iwl_write_direct32(trans,
973
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
974
		ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
975
			FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
976
		if (ret < 0)
977
			IWL_ERR(trans,
978
				"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
979 980 981
				ch,
				iwl_read_direct32(trans,
						  FH_TSSR_TX_STATUS_REG));
982
	}
J
Johannes Berg 已提交
983
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
984

985
	if (!trans_pcie->txq) {
986 987
		IWL_WARN(trans,
			 "Stopping tx queues that aren't allocated...\n");
988 989 990 991
		return 0;
	}

	/* Unmap DMA from host system and free skb's */
992
	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
993
	     txq_id++)
994
		iwl_pcie_txq_unmap(trans, txq_id);
995 996 997 998

	return 0;
}

999
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1000
{
1001
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1002
	unsigned long flags;
1003

1004
	/* tell the device to stop sending interrupts */
J
Johannes Berg 已提交
1005
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1006
	iwl_disable_interrupts(trans);
J
Johannes Berg 已提交
1007
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1008

1009
	/* device going down, Stop using ICT table */
1010
	iwl_pcie_disable_ict(trans);
1011 1012 1013 1014 1015 1016 1017 1018

	/*
	 * If a HW restart happens during firmware loading,
	 * then the firmware loading might call this function
	 * and later it might be called again due to the
	 * restart. So don't process again if the device is
	 * already dead.
	 */
D
Don Fry 已提交
1019
	if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1020
		iwl_trans_tx_stop(trans);
1021
		iwl_pcie_rx_stop(trans);
1022

1023
		/* Power-down device's busmaster DMA clocks */
1024
		iwl_write_prph(trans, APMG_CLK_DIS_REG,
1025 1026 1027 1028 1029
			       APMG_CLK_VAL_DMA_CLK_RQT);
		udelay(5);
	}

	/* Make sure (redundant) we've released our request to stay awake */
1030
	iwl_clear_bit(trans, CSR_GP_CNTRL,
1031
		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1032 1033

	/* Stop the device, and put it in low power state */
1034
	iwl_pcie_apm_stop(trans);
1035 1036 1037 1038

	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
	 * Clean again the interrupt here
	 */
J
Johannes Berg 已提交
1039
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1040
	iwl_disable_interrupts(trans);
J
Johannes Berg 已提交
1041
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1042

1043 1044
	iwl_enable_rfkill_int(trans);

1045
	/* wait to make sure we flush pending tasklet*/
J
Johannes Berg 已提交
1046
	synchronize_irq(trans_pcie->irq);
1047 1048
	tasklet_kill(&trans_pcie->irq_tasklet);

J
Johannes Berg 已提交
1049 1050
	cancel_work_sync(&trans_pcie->rx_replenish);

1051
	/* stop and reset the on-board processor */
1052
	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
D
Don Fry 已提交
1053 1054 1055 1056 1057

	/* clear all status bits */
	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
1058
	clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1059
	clear_bit(STATUS_RFKILL, &trans_pcie->status);
1060 1061
}

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
{
	/* let the ucode operate on its own */
	iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
		    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);

	iwl_disable_interrupts(trans);
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}

1073
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1074
			     struct iwl_device_cmd *dev_cmd, int txq_id)
1075
{
1076 1077
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1078
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1079
	struct iwl_cmd_meta *out_meta;
1080
	struct iwl_txq *txq;
1081
	struct iwl_queue *q;
1082 1083 1084 1085 1086
	dma_addr_t phys_addr = 0;
	dma_addr_t txcmd_phys;
	dma_addr_t scratch_phys;
	u16 len, firstlen, secondlen;
	u8 wait_write_ptr = 0;
1087
	__le16 fc = hdr->frame_control;
1088
	u8 hdr_len = ieee80211_hdrlen(fc);
1089
	u16 __maybe_unused wifi_seq;
1090

1091
	txq = &trans_pcie->txq[txq_id];
1092 1093
	q = &txq->q;

1094 1095 1096 1097
	if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
		WARN_ON_ONCE(1);
		return -EINVAL;
	}
1098

1099
	spin_lock(&txq->lock);
1100

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
#ifdef CONFIG_IWLWIFI_DEBUG
	wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
	WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
		  ((wifi_seq & 0xff) != q->write_ptr),
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);
#endif

1114
	/* Set up driver data for this TFD */
1115 1116
	txq->entries[q->write_ptr].skb = skb;
	txq->entries[q->write_ptr].cmd = dev_cmd;
1117 1118

	dev_cmd->hdr.cmd = REPLY_TX;
1119 1120 1121
	dev_cmd->hdr.sequence =
		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
			    INDEX_TO_SEQ(q->write_ptr)));
1122 1123

	/* Set up first empty entry in queue's array of Tx/cmd buffers */
1124
	out_meta = &txq->entries[q->write_ptr].meta;
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144

	/*
	 * Use the first empty entry in this queue's command buffer array
	 * to contain the Tx command and MAC header concatenated together
	 * (payload data will be in another buffer).
	 * Size of this varies, due to varying MAC header length.
	 * If end is not dword aligned, we'll have 2 extra bytes at the end
	 * of the MAC header (device reads on dword boundaries).
	 * We'll tell device about this padding later.
	 */
	len = sizeof(struct iwl_tx_cmd) +
		sizeof(struct iwl_cmd_header) + hdr_len;
	firstlen = (len + 3) & ~3;

	/* Tell NIC about any 2-byte padding after MAC header */
	if (firstlen != len)
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

	/* Physical address of this Tx command's header (not MAC header!),
	 * within command buffer array. */
1145
	txcmd_phys = dma_map_single(trans->dev,
1146 1147
				    &dev_cmd->hdr, firstlen,
				    DMA_BIDIRECTIONAL);
1148
	if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1149
		goto out_err;
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
	dma_unmap_len_set(out_meta, len, firstlen);

	if (!ieee80211_has_morefrags(fc)) {
		txq->need_update = 1;
	} else {
		wait_write_ptr = 1;
		txq->need_update = 0;
	}

	/* Set up TFD's 2nd entry to point directly to remainder of skb,
	 * if any (802.11 null frames have no payload). */
	secondlen = skb->len - hdr_len;
	if (secondlen > 0) {
1164
		phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1165
					   secondlen, DMA_TO_DEVICE);
1166 1167
		if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
			dma_unmap_single(trans->dev,
1168 1169 1170
					 dma_unmap_addr(out_meta, mapping),
					 dma_unmap_len(out_meta, len),
					 DMA_BIDIRECTIONAL);
1171
			goto out_err;
1172 1173 1174 1175
		}
	}

	/* Attach buffers to TFD */
1176
	iwl_pcie_tx_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1177
	if (secondlen > 0)
1178
		iwl_pcie_tx_build_tfd(trans, txq, phys_addr, secondlen, 0);
1179 1180 1181 1182 1183

	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
				offsetof(struct iwl_tx_cmd, scratch);

	/* take back ownership of DMA buffer to enable update */
1184
	dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1185
				DMA_BIDIRECTIONAL);
1186 1187 1188
	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1189
	IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1190
		     le16_to_cpu(dev_cmd->hdr.sequence));
1191
	IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1192 1193

	/* Set up entry for this TFD in Tx byte-count array */
1194
	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1195

1196
	dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1197
				   DMA_BIDIRECTIONAL);
1198

1199
	trace_iwlwifi_dev_tx(trans->dev, skb,
1200
			     &txq->tfds[txq->q.write_ptr],
1201 1202 1203
			     sizeof(struct iwl_tfd),
			     &dev_cmd->hdr, firstlen,
			     skb->data + hdr_len, secondlen);
1204 1205
	trace_iwlwifi_dev_tx_data(trans->dev, skb,
				  skb->data + hdr_len, secondlen);
1206

1207
	/* start timer if queue currently empty */
1208 1209
	if (txq->need_update && q->read_ptr == q->write_ptr &&
	    trans_pcie->wd_timeout)
1210 1211
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

1212 1213
	/* Tell device the write index *just past* this latest filled TFD */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1214
	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1215

1216 1217 1218 1219 1220 1221
	/*
	 * At this point the frame is "transmitted" successfully
	 * and we will get a TX status notification eventually,
	 * regardless of the value of ret. "ret" only indicates
	 * whether or not we should update the write pointer.
	 */
1222
	if (iwl_queue_space(q) < q->high_mark) {
1223 1224
		if (wait_write_ptr) {
			txq->need_update = 1;
1225
			iwl_pcie_txq_inc_wr_ptr(trans, txq);
1226
		} else {
1227
			iwl_stop_queue(trans, txq);
1228 1229
		}
	}
1230
	spin_unlock(&txq->lock);
1231
	return 0;
1232 1233 1234
 out_err:
	spin_unlock(&txq->lock);
	return -1;
1235 1236
}

1237
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1238
{
1239
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1240
	int err;
1241
	bool hw_rfkill;
1242

1243 1244
	trans_pcie->inta_mask = CSR_INI_SET_MASK;

1245 1246
	if (!trans_pcie->irq_requested) {
		tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1247
			iwl_pcie_tasklet, (unsigned long)trans);
1248

1249
		iwl_pcie_alloc_ict(trans);
1250

1251 1252
		err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
				  IRQF_SHARED, DRV_NAME, trans);
1253 1254
		if (err) {
			IWL_ERR(trans, "Error allocating IRQ %d\n",
J
Johannes Berg 已提交
1255
				trans_pcie->irq);
1256
			goto error;
1257 1258 1259
		}

		trans_pcie->irq_requested = true;
1260 1261
	}

1262
	err = iwl_pcie_prepare_card_hw(trans);
1263
	if (err) {
1264
		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1265
		goto err_free_irq;
1266
	}
1267

1268
	iwl_pcie_apm_init(trans);
1269

1270 1271 1272
	/* From now on, the op_mode will be kept updated about RF kill state */
	iwl_enable_rfkill_int(trans);

1273
	hw_rfkill = iwl_is_rfkill_set(trans);
1274
	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1275

1276 1277
	return err;

1278
err_free_irq:
1279
	trans_pcie->irq_requested = false;
J
Johannes Berg 已提交
1280
	free_irq(trans_pcie->irq, trans);
1281
error:
1282
	iwl_pcie_free_ict(trans);
1283 1284
	tasklet_kill(&trans_pcie->irq_tasklet);
	return err;
1285 1286
}

1287 1288
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
				   bool op_mode_leaving)
1289
{
1290
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1291
	bool hw_rfkill;
1292
	unsigned long flags;
1293

1294 1295 1296 1297
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	iwl_disable_interrupts(trans);
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

1298
	iwl_pcie_apm_stop(trans);
1299

1300 1301 1302
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	iwl_disable_interrupts(trans);
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1303

1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
	if (!op_mode_leaving) {
		/*
		 * Even if we stop the HW, we still want the RF kill
		 * interrupt
		 */
		iwl_enable_rfkill_int(trans);

		/*
		 * Check again since the RF kill state may have changed while
		 * all the interrupts were disabled, in this case we couldn't
		 * receive the RF kill interrupt and update the state in the
		 * op_mode.
		 */
		hw_rfkill = iwl_is_rfkill_set(trans);
		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
	}
1320 1321
}

1322 1323
static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
				   struct sk_buff_head *skbs)
1324
{
1325
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1326
	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1327 1328 1329
	/* n_bd is usually 256 => n_bd - 1 = 0xff */
	int tfd_num = ssn & (txq->q.n_bd - 1);

1330 1331
	spin_lock(&txq->lock);

1332
	if (txq->q.read_ptr != tfd_num) {
1333 1334
		IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
				   txq_id, txq->q.read_ptr, tfd_num, ssn);
1335
		iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
1336
		if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1337
			iwl_wake_queue(trans, txq);
1338
	}
1339 1340

	spin_unlock(&txq->lock);
1341 1342
}

1343 1344
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
1345
	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1346 1347 1348 1349
}

static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
1350
	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1351 1352 1353 1354
}

static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
1355
	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1356 1357
}

1358
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1359
				     const struct iwl_trans_config *trans_cfg)
1360 1361 1362 1363
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1364
	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1365 1366 1367 1368 1369 1370 1371
	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
		trans_pcie->n_no_reclaim_cmds = 0;
	else
		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
	if (trans_pcie->n_no_reclaim_cmds)
		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1372

1373 1374 1375 1376 1377
	trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
	if (trans_pcie->rx_buf_size_8k)
		trans_pcie->rx_page_order = get_order(8 * 1024);
	else
		trans_pcie->rx_page_order = get_order(4 * 1024);
1378 1379 1380

	trans_pcie->wd_timeout =
		msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
J
Johannes Berg 已提交
1381 1382

	trans_pcie->command_names = trans_cfg->command_names;
1383 1384
}

1385
void iwl_trans_pcie_free(struct iwl_trans *trans)
1386
{
1387
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1388

1389
	iwl_trans_pcie_tx_free(trans);
1390
	iwl_pcie_rx_free(trans);
1391

1392
	if (trans_pcie->irq_requested == true) {
J
Johannes Berg 已提交
1393
		free_irq(trans_pcie->irq, trans);
1394
		iwl_pcie_free_ict(trans);
1395
	}
1396 1397

	pci_disable_msi(trans_pcie->pci_dev);
1398
	iounmap(trans_pcie->hw_base);
1399 1400
	pci_release_regions(trans_pcie->pci_dev);
	pci_disable_device(trans_pcie->pci_dev);
1401
	kmem_cache_destroy(trans->dev_cmd_pool);
1402

1403
	kfree(trans);
1404 1405
}

D
Don Fry 已提交
1406 1407 1408 1409 1410
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (state)
1411
		set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
D
Don Fry 已提交
1412
	else
1413
		clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
D
Don Fry 已提交
1414 1415
}

J
Johannes Berg 已提交
1416
#ifdef CONFIG_PM_SLEEP
1417 1418 1419 1420 1421 1422 1423
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
	return 0;
}

static int iwl_trans_pcie_resume(struct iwl_trans *trans)
{
1424
	bool hw_rfkill;
1425

1426 1427
	iwl_enable_rfkill_int(trans);

1428
	hw_rfkill = iwl_is_rfkill_set(trans);
1429
	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1430

1431
	if (!hw_rfkill)
1432 1433
		iwl_enable_interrupts(trans);

1434 1435
	return 0;
}
J
Johannes Berg 已提交
1436
#endif /* CONFIG_PM_SLEEP */
1437

1438 1439
#define IWL_FLUSH_WAIT_MS	2000

1440
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1441
{
1442
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1443
	struct iwl_txq *txq;
1444 1445 1446 1447 1448 1449
	struct iwl_queue *q;
	int cnt;
	unsigned long now = jiffies;
	int ret = 0;

	/* waiting for all the tx frames complete might take a while */
1450
	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
W
Wey-Yi Guy 已提交
1451
		if (cnt == trans_pcie->cmd_queue)
1452
			continue;
1453
		txq = &trans_pcie->txq[cnt];
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
		q = &txq->q;
		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
			msleep(1);

		if (q->read_ptr != q->write_ptr) {
			IWL_ERR(trans, "fail to flush all tx fifo queues\n");
			ret = -ETIMEDOUT;
			break;
		}
	}
	return ret;
}

1468 1469
static const char *get_fh_string(int cmd)
{
J
Johannes Berg 已提交
1470
#define IWL_CMD(x) case x: return #x
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
	switch (cmd) {
	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
	IWL_CMD(FH_TSSR_TX_STATUS_REG);
	IWL_CMD(FH_TSSR_TX_ERROR_REG);
	default:
		return "UNKNOWN";
	}
J
Johannes Berg 已提交
1484
#undef IWL_CMD
1485 1486
}

1487
int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
{
	int i;
	static const u32 fh_tbl[] = {
		FH_RSCSR_CHNL0_STTS_WPTR_REG,
		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
		FH_RSCSR_CHNL0_WPTR,
		FH_MEM_RCSR_CHNL0_CONFIG_REG,
		FH_MEM_RSSR_SHARED_CTRL_REG,
		FH_MEM_RSSR_RX_STATUS_REG,
		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
		FH_TSSR_TX_STATUS_REG,
		FH_TSSR_TX_ERROR_REG
	};
1501 1502 1503 1504 1505 1506

#ifdef CONFIG_IWLWIFI_DEBUGFS
	if (buf) {
		int pos = 0;
		size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;

1507 1508 1509
		*buf = kmalloc(bufsz, GFP_KERNEL);
		if (!*buf)
			return -ENOMEM;
1510

1511 1512
		pos += scnprintf(*buf + pos, bufsz - pos,
				"FH register values:\n");
1513 1514

		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1515 1516 1517
			pos += scnprintf(*buf + pos, bufsz - pos,
				"  %34s: 0X%08x\n",
				get_fh_string(fh_tbl[i]),
1518
				iwl_read_direct32(trans, fh_tbl[i]));
1519

1520 1521 1522
		return pos;
	}
#endif
1523

1524
	IWL_ERR(trans, "FH register values:\n");
1525
	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
1526 1527
		IWL_ERR(trans, "  %34s: 0X%08x\n",
			get_fh_string(fh_tbl[i]),
1528
			iwl_read_direct32(trans, fh_tbl[i]));
1529

1530 1531 1532 1533 1534
	return 0;
}

static const char *get_csr_string(int cmd)
{
J
Johannes Berg 已提交
1535
#define IWL_CMD(x) case x: return #x
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	switch (cmd) {
	IWL_CMD(CSR_HW_IF_CONFIG_REG);
	IWL_CMD(CSR_INT_COALESCING);
	IWL_CMD(CSR_INT);
	IWL_CMD(CSR_INT_MASK);
	IWL_CMD(CSR_FH_INT_STATUS);
	IWL_CMD(CSR_GPIO_IN);
	IWL_CMD(CSR_RESET);
	IWL_CMD(CSR_GP_CNTRL);
	IWL_CMD(CSR_HW_REV);
	IWL_CMD(CSR_EEPROM_REG);
	IWL_CMD(CSR_EEPROM_GP);
	IWL_CMD(CSR_OTP_GP_REG);
	IWL_CMD(CSR_GIO_REG);
	IWL_CMD(CSR_GP_UCODE_REG);
	IWL_CMD(CSR_GP_DRIVER_REG);
	IWL_CMD(CSR_UCODE_DRV_GP1);
	IWL_CMD(CSR_UCODE_DRV_GP2);
	IWL_CMD(CSR_LED_REG);
	IWL_CMD(CSR_DRAM_INT_TBL_REG);
	IWL_CMD(CSR_GIO_CHICKEN_BITS);
	IWL_CMD(CSR_ANA_PLL_CFG);
	IWL_CMD(CSR_HW_REV_WA_REG);
	IWL_CMD(CSR_DBG_HPET_MEM_REG);
	default:
		return "UNKNOWN";
	}
J
Johannes Berg 已提交
1563
#undef IWL_CMD
1564 1565
}

1566
void iwl_pcie_dump_csr(struct iwl_trans *trans)
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
{
	int i;
	static const u32 csr_tbl[] = {
		CSR_HW_IF_CONFIG_REG,
		CSR_INT_COALESCING,
		CSR_INT,
		CSR_INT_MASK,
		CSR_FH_INT_STATUS,
		CSR_GPIO_IN,
		CSR_RESET,
		CSR_GP_CNTRL,
		CSR_HW_REV,
		CSR_EEPROM_REG,
		CSR_EEPROM_GP,
		CSR_OTP_GP_REG,
		CSR_GIO_REG,
		CSR_GP_UCODE_REG,
		CSR_GP_DRIVER_REG,
		CSR_UCODE_DRV_GP1,
		CSR_UCODE_DRV_GP2,
		CSR_LED_REG,
		CSR_DRAM_INT_TBL_REG,
		CSR_GIO_CHICKEN_BITS,
		CSR_ANA_PLL_CFG,
		CSR_HW_REV_WA_REG,
		CSR_DBG_HPET_MEM_REG
	};
	IWL_ERR(trans, "CSR values:\n");
	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
		"CSR_INT_PERIODIC_REG)\n");
	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
		IWL_ERR(trans, "  %25s: 0X%08x\n",
			get_csr_string(csr_tbl[i]),
1600
			iwl_read32(trans, csr_tbl[i]));
1601 1602 1603
	}
}

1604 1605 1606
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
1607
	if (!debugfs_create_file(#name, mode, parent, trans,		\
1608
				 &iwl_dbgfs_##name##_ops))		\
1609
		goto err;						\
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
} while (0)

/* file operation */
#define DEBUGFS_READ_FUNC(name)                                         \
static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
					char __user *user_buf,          \
					size_t count, loff_t *ppos);

#define DEBUGFS_WRITE_FUNC(name)                                        \
static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
					const char __user *user_buf,    \
					size_t count, loff_t *ppos);


#define DEBUGFS_READ_FILE_OPS(name)					\
	DEBUGFS_READ_FUNC(name);					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.read = iwl_dbgfs_##name##_read,				\
1628
	.open = simple_open,						\
1629 1630 1631
	.llseek = generic_file_llseek,					\
};

1632 1633 1634 1635
#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
	DEBUGFS_WRITE_FUNC(name);                                       \
static const struct file_operations iwl_dbgfs_##name##_ops = {          \
	.write = iwl_dbgfs_##name##_write,                              \
1636
	.open = simple_open,						\
1637 1638 1639
	.llseek = generic_file_llseek,					\
};

1640 1641 1642 1643 1644 1645
#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
	DEBUGFS_READ_FUNC(name);					\
	DEBUGFS_WRITE_FUNC(name);					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.write = iwl_dbgfs_##name##_write,				\
	.read = iwl_dbgfs_##name##_read,				\
1646
	.open = simple_open,						\
1647 1648 1649 1650
	.llseek = generic_file_llseek,					\
};

static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1651 1652
				       char __user *user_buf,
				       size_t count, loff_t *ppos)
1653
{
1654
	struct iwl_trans *trans = file->private_data;
1655
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1656
	struct iwl_txq *txq;
1657 1658 1659 1660 1661
	struct iwl_queue *q;
	char *buf;
	int pos = 0;
	int cnt;
	int ret;
1662 1663
	size_t bufsz;

1664
	bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1665

J
Johannes Berg 已提交
1666
	if (!trans_pcie->txq)
1667
		return -EAGAIN;
J
Johannes Berg 已提交
1668

1669 1670 1671 1672
	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

1673
	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1674
		txq = &trans_pcie->txq[cnt];
1675 1676
		q = &txq->q;
		pos += scnprintf(buf + pos, bufsz - pos,
1677
				"hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1678
				cnt, q->read_ptr, q->write_ptr,
1679 1680
				!!test_bit(cnt, trans_pcie->queue_used),
				!!test_bit(cnt, trans_pcie->queue_stopped));
1681 1682 1683 1684 1685 1686 1687
	}
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1688 1689 1690
				       char __user *user_buf,
				       size_t count, loff_t *ppos)
{
1691
	struct iwl_trans *trans = file->private_data;
1692
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1693
	struct iwl_rxq *rxq = &trans_pcie->rxq;
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
	char buf[256];
	int pos = 0;
	const size_t bufsz = sizeof(buf);

	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
						rxq->read);
	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
						rxq->write);
	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
						rxq->free_count);
	if (rxq->rb_stts) {
		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
	} else {
		pos += scnprintf(buf + pos, bufsz - pos,
					"closed_rb_num: Not Allocated\n");
	}
	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}

1714 1715
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
					char __user *user_buf,
1716 1717
					size_t count, loff_t *ppos)
{
1718
	struct iwl_trans *trans = file->private_data;
1719
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1720 1721 1722 1723 1724 1725 1726 1727
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	int pos = 0;
	char *buf;
	int bufsz = 24 * 64; /* 24 items * 64 char per item */
	ssize_t ret;

	buf = kzalloc(bufsz, GFP_KERNEL);
J
Johannes Berg 已提交
1728
	if (!buf)
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
		return -ENOMEM;

	pos += scnprintf(buf + pos, bufsz - pos,
			"Interrupt Statistics Report:\n");

	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
		isr_stats->hw);
	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
		isr_stats->sw);
	if (isr_stats->sw || isr_stats->hw) {
		pos += scnprintf(buf + pos, bufsz - pos,
			"\tLast Restarting Code:  0x%X\n",
			isr_stats->err_code);
	}
#ifdef CONFIG_IWLWIFI_DEBUG
	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
		isr_stats->sch);
	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
		isr_stats->alive);
#endif
	pos += scnprintf(buf + pos, bufsz - pos,
		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);

	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
		isr_stats->ctkill);

	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
		isr_stats->wakeup);

	pos += scnprintf(buf + pos, bufsz - pos,
		"Rx command responses:\t\t %u\n", isr_stats->rx);

	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
		isr_stats->tx);

	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
		isr_stats->unhandled);

	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
					 const char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
1777
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	char buf[8];
	int buf_size;
	u32 reset_flag;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%x", &reset_flag) != 1)
		return -EFAULT;
	if (reset_flag == 0)
		memset(isr_stats, 0, sizeof(*isr_stats));

	return count;
}

1796
static ssize_t iwl_dbgfs_csr_write(struct file *file,
1797 1798
				   const char __user *user_buf,
				   size_t count, loff_t *ppos)
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
{
	struct iwl_trans *trans = file->private_data;
	char buf[8];
	int buf_size;
	int csr;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%d", &csr) != 1)
		return -EFAULT;

1812
	iwl_pcie_dump_csr(trans);
1813 1814 1815 1816 1817

	return count;
}

static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1818 1819
				     char __user *user_buf,
				     size_t count, loff_t *ppos)
1820 1821
{
	struct iwl_trans *trans = file->private_data;
1822
	char *buf = NULL;
1823 1824 1825
	int pos = 0;
	ssize_t ret = -EFAULT;

1826
	ret = pos = iwl_pcie_dump_fh(trans, &buf);
1827 1828 1829 1830 1831 1832 1833 1834 1835
	if (buf) {
		ret = simple_read_from_buffer(user_buf,
					      count, ppos, buf, pos);
		kfree(buf);
	}

	return ret;
}

1836 1837 1838 1839 1840 1841 1842 1843 1844
static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
					  const char __user *user_buf,
					  size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;

	if (!trans->op_mode)
		return -EAGAIN;

1845
	local_bh_disable();
1846
	iwl_op_mode_nic_error(trans->op_mode);
1847
	local_bh_enable();
1848 1849 1850 1851

	return count;
}

1852
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1853
DEBUGFS_READ_FILE_OPS(fh_reg);
1854 1855
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
1856
DEBUGFS_WRITE_FILE_OPS(csr);
1857
DEBUGFS_WRITE_FILE_OPS(fw_restart);
1858 1859 1860 1861 1862 1863

/*
 * Create the debugfs files and directories
 *
 */
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1864
					 struct dentry *dir)
1865 1866 1867
{
	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1868
	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1869 1870
	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1871
	DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
1872
	return 0;
1873 1874 1875 1876

err:
	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
	return -ENOMEM;
1877 1878 1879
}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1880 1881 1882 1883
					 struct dentry *dir)
{
	return 0;
}
1884 1885
#endif /*CONFIG_IWLWIFI_DEBUGFS */

1886
static const struct iwl_trans_ops trans_ops_pcie = {
1887
	.start_hw = iwl_trans_pcie_start_hw,
1888
	.stop_hw = iwl_trans_pcie_stop_hw,
1889
	.fw_alive = iwl_trans_pcie_fw_alive,
1890
	.start_fw = iwl_trans_pcie_start_fw,
1891
	.stop_device = iwl_trans_pcie_stop_device,
1892

1893 1894
	.wowlan_suspend = iwl_trans_pcie_wowlan_suspend,

1895
	.send_cmd = iwl_pcie_send_cmd,
1896

1897
	.tx = iwl_trans_pcie_tx,
1898
	.reclaim = iwl_trans_pcie_reclaim,
1899

1900 1901
	.txq_disable = iwl_pcie_txq_disable,
	.txq_enable = iwl_pcie_txq_enable,
1902

1903
	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
1904

1905
	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
1906

J
Johannes Berg 已提交
1907
#ifdef CONFIG_PM_SLEEP
1908 1909
	.suspend = iwl_trans_pcie_suspend,
	.resume = iwl_trans_pcie_resume,
J
Johannes Berg 已提交
1910
#endif
1911 1912 1913
	.write8 = iwl_trans_pcie_write8,
	.write32 = iwl_trans_pcie_write32,
	.read32 = iwl_trans_pcie_read32,
1914
	.configure = iwl_trans_pcie_configure,
D
Don Fry 已提交
1915
	.set_pmi = iwl_trans_pcie_set_pmi,
1916
};
1917

1918
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1919 1920
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg)
1921 1922 1923 1924 1925 1926 1927
{
	struct iwl_trans_pcie *trans_pcie;
	struct iwl_trans *trans;
	u16 pci_cmd;
	int err;

	trans = kzalloc(sizeof(struct iwl_trans) +
1928
			sizeof(struct iwl_trans_pcie), GFP_KERNEL);
1929

1930
	if (!trans)
1931 1932 1933 1934 1935
		return NULL;

	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans->ops = &trans_ops_pcie;
1936
	trans->cfg = cfg;
1937
	trans_pcie->trans = trans;
J
Johannes Berg 已提交
1938
	spin_lock_init(&trans_pcie->irq_lock);
1939
	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1940 1941 1942 1943

	/* W/A - seems to solve weird behavior. We need to remove this if we
	 * don't want to stay in L1 all the time. This wastes a lot of power */
	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1944
			       PCIE_LINK_STATE_CLKPM);
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959

	if (pci_enable_device(pdev)) {
		err = -ENODEV;
		goto out_no_pci;
	}

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!err)
			err = pci_set_consistent_dma_mask(pdev,
1960
							  DMA_BIT_MASK(32));
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
		/* both attempts failed: */
		if (err) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "No suitable DMA available.\n");
			goto out_pci_disable_device;
		}
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
1971 1972
		dev_printk(KERN_ERR, &pdev->dev,
			   "pci_request_regions failed\n");
1973 1974 1975
		goto out_pci_disable_device;
	}

1976
	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
1977
	if (!trans_pcie->hw_base) {
1978
		dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
1979 1980 1981 1982 1983 1984 1985 1986 1987
		err = -ENODEV;
		goto out_pci_release_regions;
	}

	/* We disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state */
	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);

	err = pci_enable_msi(pdev);
1988
	if (err) {
1989
		dev_printk(KERN_ERR, &pdev->dev,
1990
			   "pci_enable_msi failed(0X%x)\n", err);
1991 1992 1993 1994 1995 1996 1997
		/* enable rfkill interrupt: hw bug w/a */
		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
		}
	}
1998 1999

	trans->dev = &pdev->dev;
J
Johannes Berg 已提交
2000
	trans_pcie->irq = pdev->irq;
2001
	trans_pcie->pci_dev = pdev;
2002
	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
E
Emmanuel Grumbach 已提交
2003
	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2004 2005
	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2006

2007
	/* Initialize the wait queue for commands */
2008
	init_waitqueue_head(&trans_pcie->wait_command_queue);
2009
	spin_lock_init(&trans->reg_lock);
2010

2011 2012
	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
		 "iwl_cmd_pool:%s", dev_name(trans->dev));
2013 2014 2015

	trans->dev_cmd_headroom = 0;
	trans->dev_cmd_pool =
2016
		kmem_cache_create(trans->dev_cmd_pool_name,
2017 2018 2019 2020 2021 2022 2023 2024 2025
				  sizeof(struct iwl_device_cmd)
				  + trans->dev_cmd_headroom,
				  sizeof(void *),
				  SLAB_HWCACHE_ALIGN,
				  NULL);

	if (!trans->dev_cmd_pool)
		goto out_pci_disable_msi;

2026 2027
	return trans;

2028 2029
out_pci_disable_msi:
	pci_disable_msi(pdev);
2030 2031 2032 2033 2034 2035 2036 2037
out_pci_release_regions:
	pci_release_regions(pdev);
out_pci_disable_device:
	pci_disable_device(pdev);
out_no_pci:
	kfree(trans);
	return NULL;
}