trans.c 78.7 KB
Newer Older
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8 9
 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
26
 * in the file called COPYING.
27 28 29 30 31 32 33
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
34 35
 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
65 66
#include <linux/pci.h>
#include <linux/pci-aspm.h>
67
#include <linux/interrupt.h>
68
#include <linux/debugfs.h>
69
#include <linux/sched.h>
70 71
#include <linux/bitops.h>
#include <linux/gfp.h>
72
#include <linux/vmalloc.h>
73

74
#include "iwl-drv.h"
75
#include "iwl-trans.h"
76 77
#include "iwl-csr.h"
#include "iwl-prph.h"
78
#include "iwl-scd.h"
79
#include "iwl-agn-hw.h"
80
#include "iwl-fw-error-dump.h"
81
#include "internal.h"
82
#include "iwl-fh.h"
83

84 85 86 87
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START	0x40000
#define IWL_FW_MEM_EXTENDED_END		0x57FFF

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (!trans_pcie->fw_mon_page)
		return;

	dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
		       trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
	__free_pages(trans_pcie->fw_mon_page,
		     get_order(trans_pcie->fw_mon_size));
	trans_pcie->fw_mon_page = NULL;
	trans_pcie->fw_mon_phys = 0;
	trans_pcie->fw_mon_size = 0;
}

104
static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
105 106
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
107
	struct page *page = NULL;
108
	dma_addr_t phys;
109
	u32 size = 0;
110 111
	u8 power;

112 113 114 115 116 117 118 119 120 121 122 123
	if (!max_power) {
		/* default max_power is maximum */
		max_power = 26;
	} else {
		max_power += 11;
	}

	if (WARN(max_power > 26,
		 "External buffer size for monitor is too big %d, check the FW TLV\n",
		 max_power))
		return;

124 125 126 127 128 129 130 131
	if (trans_pcie->fw_mon_page) {
		dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
					   trans_pcie->fw_mon_size,
					   DMA_FROM_DEVICE);
		return;
	}

	phys = 0;
132
	for (power = max_power; power >= 11; power--) {
133 134 135 136 137 138 139 140 141 142 143 144 145
		int order;

		size = BIT(power);
		order = get_order(size);
		page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
				   order);
		if (!page)
			continue;

		phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
				    DMA_FROM_DEVICE);
		if (dma_mapping_error(trans->dev, phys)) {
			__free_pages(page, order);
146
			page = NULL;
147 148 149 150 151 152 153 154
			continue;
		}
		IWL_INFO(trans,
			 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
			 size, order);
		break;
	}

155
	if (WARN_ON_ONCE(!page))
156 157
		return;

158 159 160 161 162 163
	if (power != max_power)
		IWL_ERR(trans,
			"Sorry - debug buffer is only %luK while you requested %luK\n",
			(unsigned long)BIT(power - 10),
			(unsigned long)BIT(max_power - 10));

164 165 166 167 168
	trans_pcie->fw_mon_page = page;
	trans_pcie->fw_mon_phys = phys;
	trans_pcie->fw_mon_size = size;
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
{
	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
		    ((reg & 0x0000ffff) | (2 << 28)));
	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
}

static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
{
	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
		    ((reg & 0x0000ffff) | (3 << 28)));
}

183
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184
{
185 186 187
	if (!trans->cfg->apmg_not_supported)
		return;

188 189 190 191 192 193 194 195
	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
				       ~APMG_PS_CTRL_MSK_PWR_SRC);
	else
		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
				       ~APMG_PS_CTRL_MSK_PWR_SRC);
196 197
}

E
Emmanuel Grumbach 已提交
198 199 200
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT	0x041

201
static void iwl_pcie_apm_config(struct iwl_trans *trans)
E
Emmanuel Grumbach 已提交
202
{
203
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
204
	u16 lctl;
E
Emmanuel Grumbach 已提交
205
	u16 cap;
E
Emmanuel Grumbach 已提交
206 207 208 209 210 211 212 213 214

	/*
	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
	 * If so (likely), disable L0S, so device moves directly L0->L1;
	 *    costs negligible amount of power savings.
	 * If not (unlikely), enable L0S, so there is at least some
	 *    power savings, even without L1.
	 */
215
	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
E
Emmanuel Grumbach 已提交
216
	if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
E
Emmanuel Grumbach 已提交
217
		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
E
Emmanuel Grumbach 已提交
218
	else
E
Emmanuel Grumbach 已提交
219
		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
220
	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
E
Emmanuel Grumbach 已提交
221 222 223 224 225 226

	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
	dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
		 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
		 trans->ltr_enabled ? "En" : "Dis");
E
Emmanuel Grumbach 已提交
227 228
}

229 230
/*
 * Start up NIC's basic functionality after it has been reset
231
 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
232 233
 * NOTE:  This does not load uCode nor start the embedded processor
 */
234
static int iwl_pcie_apm_init(struct iwl_trans *trans)
235 236 237 238 239 240 241 242 243 244
{
	int ret = 0;
	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");

	/*
	 * Use "set_bit" below rather than "write", to preserve any hardware
	 * bits already set by default after reset.
	 */

	/* Disable L0S exit timer (platform NMI Work/Around) */
245 246 247
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
248 249 250 251 252 253

	/*
	 * Disable L0s without affecting L1;
	 *  don't wait for ICH L0s (ICH bug W/A)
	 */
	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
254
		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
255 256 257 258 259 260 261 262 263

	/* Set FH wait threshold to maximum (HW error during stress W/A) */
	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);

	/*
	 * Enable HAP INTA (interrupt from management bus) to
	 * wake device's PCI Express link L1a -> L0s
	 */
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
264
		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
265

266
	iwl_pcie_apm_config(trans);
267 268

	/* Configure analog phase-lock-loop before activating to D0A */
269
	if (trans->cfg->base_params->pll_cfg_val)
270
		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
271
			    trans->cfg->base_params->pll_cfg_val);
272 273 274 275 276 277 278 279 280 281 282 283 284

	/*
	 * Set "initialization complete" bit to move adapter from
	 * D0U* --> D0A* (powered-up active) state.
	 */
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/*
	 * Wait for clock stabilization; once stabilized, access to
	 * device-internal resources is supported, e.g. iwl_write_prph()
	 * and accesses to uCode SRAM.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
285 286
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
287 288 289 290 291
	if (ret < 0) {
		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
		goto out;
	}

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
	if (trans->cfg->host_interrupt_operation_mode) {
		/*
		 * This is a bit of an abuse - This is needed for 7260 / 3160
		 * only check host_interrupt_operation_mode even if this is
		 * not related to host_interrupt_operation_mode.
		 *
		 * Enable the oscillator to count wake up time for L1 exit. This
		 * consumes slightly more power (100uA) - but allows to be sure
		 * that we wake up from L1 on time.
		 *
		 * This looks weird: read twice the same register, discard the
		 * value, set a bit, and yet again, read that same register
		 * just to discard the value. But that's the way the hardware
		 * seems to like it.
		 */
		iwl_read_prph(trans, OSC_CLK);
		iwl_read_prph(trans, OSC_CLK);
		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
		iwl_read_prph(trans, OSC_CLK);
		iwl_read_prph(trans, OSC_CLK);
	}

314 315 316
	/*
	 * Enable DMA clock and wait for it to stabilize.
	 *
317 318 319
	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
	 * bits do not disable clocks.  This preserves any hardware
	 * bits already set by default in "CLK_CTRL_REG" after reset.
320
	 */
321
	if (!trans->cfg->apmg_not_supported) {
322 323 324 325 326 327 328 329 330 331 332 333
		iwl_write_prph(trans, APMG_CLK_EN_REG,
			       APMG_CLK_VAL_DMA_CLK_RQT);
		udelay(20);

		/* Disable L1-Active */
		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);

		/* Clear the interrupt in APMG if the NIC is in RFKILL */
		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
			       APMG_RTC_INT_STT_RFKILL);
	}
334

335
	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
336 337 338 339 340

out:
	return ret;
}

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
/*
 * Enable LP XTAL to avoid HW bug where device may consume much power if
 * FW is not loaded after device reset. LP XTAL is disabled by default
 * after device HW reset. Do it only if XTAL is fed by internal source.
 * Configure device's "persistence" mode to avoid resetting XTAL again when
 * SHRD_HW_RST occurs in S3.
 */
static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
{
	int ret;
	u32 apmg_gp1_reg;
	u32 apmg_xtal_cfg_reg;
	u32 dl_cfg_reg;

	/* Force XTAL ON */
	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);

	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);

	udelay(10);

	/*
	 * Set "initialization complete" bit to move adapter from
	 * D0U* --> D0A* (powered-up active) state.
	 */
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/*
	 * Wait for clock stabilization; once stabilized, access to
	 * device-internal resources is possible.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   25000);
	if (WARN_ON(ret < 0)) {
		IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
		/* Release XTAL ON request */
		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
		return;
	}

	/*
	 * Clear "disable persistence" to avoid LP XTAL resetting when
	 * SHRD_HW_RST is applied in S3.
	 */
	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);

	/*
	 * Force APMG XTAL to be active to prevent its disabling by HW
	 * caused by APMG idle state.
	 */
	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
						    SHR_APMG_XTAL_CFG_REG);
	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
				 apmg_xtal_cfg_reg |
				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);

	/*
	 * Reset entire device again - do controller reset (results in
	 * SHRD_HW_RST). Turn MAC off before proceeding.
	 */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);

	udelay(10);

	/* Enable LP XTAL by indirect access through CSR */
	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
				 SHR_APMG_GP1_WF_XTAL_LP_EN |
				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);

	/* Clear delay line clock power up */
	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);

	/*
	 * Enable persistence mode to avoid LP XTAL resetting when
	 * SHRD_HW_RST is applied in S3.
	 */
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);

	/*
	 * Clear "initialization complete" bit to move adapter from
	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
	 */
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/* Activates XTAL resources monitor */
	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
				 CSR_MONITOR_XTAL_RESOURCES);

	/* Release XTAL ON request */
	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
	udelay(10);

	/* Release APMG XTAL */
	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
				 apmg_xtal_cfg_reg &
				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
}

451
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
452 453 454 455 456 457 458
{
	int ret = 0;

	/* stop device's busmaster DMA activity */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);

	ret = iwl_poll_bit(trans, CSR_RESET,
459 460
			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
461
	if (ret < 0)
462 463 464 465 466 467 468
		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");

	IWL_DEBUG_INFO(trans, "stop master\n");

	return ret;
}

469
static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
470 471 472
{
	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
	if (op_mode_leave) {
		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
			iwl_pcie_apm_init(trans);

		/* inform ME that we are leaving */
		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
					  APMG_PCIDEV_STT_VAL_WAKE_ME);
		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
				    CSR_HW_IF_CONFIG_REG_PREPARE |
				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
		mdelay(5);
	}

488
	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
489 490

	/* Stop device's DMA activity */
491
	iwl_pcie_apm_stop_master(trans);
492

493 494 495 496 497
	if (trans->cfg->lp_xtal_workaround) {
		iwl_pcie_apm_lp_xtal_enable(trans);
		return;
	}

498 499 500 501 502 503 504 505 506 507 508 509 510
	/* Reset the entire device */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);

	udelay(10);

	/*
	 * Clear "initialization complete" bit to move adapter from
	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
	 */
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}

511
static int iwl_pcie_nic_init(struct iwl_trans *trans)
512
{
J
Johannes Berg 已提交
513
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
514 515

	/* nic_init */
516
	spin_lock(&trans_pcie->irq_lock);
517
	iwl_pcie_apm_init(trans);
518

519
	spin_unlock(&trans_pcie->irq_lock);
520

521
	iwl_pcie_set_pwr(trans, false);
522

J
Johannes Berg 已提交
523
	iwl_op_mode_nic_config(trans->op_mode);
524 525

	/* Allocate the RX queue, or reset if it is already allocated */
526
	iwl_pcie_rx_init(trans);
527 528

	/* Allocate or reset and init all Tx and Command queues */
529
	if (iwl_pcie_tx_init(trans))
530 531
		return -ENOMEM;

532
	if (trans->cfg->base_params->shadow_reg_enable) {
533
		/* enable shadow regs in HW */
534
		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
535
		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
536 537 538 539 540 541 542 543
	}

	return 0;
}

#define HW_READY_TIMEOUT (50)

/* Note: returns poll_bit return value, which is >= 0 if success */
544
static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
545 546 547
{
	int ret;

548
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
549
		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
550 551

	/* See if we got it */
552
	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
553 554 555
			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
			   HW_READY_TIMEOUT);
556

557 558 559
	if (ret >= 0)
		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);

560
	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
561 562 563 564
	return ret;
}

/* Note: returns standard 0/-ERROR code */
565
static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
566 567
{
	int ret;
568
	int t = 0;
569
	int iter;
570

571
	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
572

573
	ret = iwl_pcie_set_hw_ready(trans);
574
	/* If the card is ready, exit 0 */
575 576 577
	if (ret >= 0)
		return 0;

578 579 580 581 582 583 584 585 586
	for (iter = 0; iter < 10; iter++) {
		/* If HW is not ready, prepare the conditions to check again */
		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
			    CSR_HW_IF_CONFIG_REG_PREPARE);

		do {
			ret = iwl_pcie_set_hw_ready(trans);
			if (ret >= 0)
				return 0;
587

588 589 590 591 592
			usleep_range(200, 1000);
			t += 200;
		} while (t < 150000);
		msleep(25);
	}
593

594
	IWL_ERR(trans, "Couldn't prepare the card\n");
595 596 597 598

	return ret;
}

599 600 601
/*
 * ucode
 */
602
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
J
Johannes Berg 已提交
603
				   dma_addr_t phy_addr, u32 byte_cnt)
604
{
605
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
606 607
	int ret;

608
	trans_pcie->ucode_write_complete = false;
609 610

	iwl_write_direct32(trans,
611 612
			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
613 614

	iwl_write_direct32(trans,
615 616
			   FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
			   dst_addr);
617 618

	iwl_write_direct32(trans,
J
Johannes Berg 已提交
619 620
			   FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
			   phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
621 622

	iwl_write_direct32(trans,
623 624 625
			   FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
			   (iwl_get_dma_hi_addr(phy_addr)
				<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
626 627

	iwl_write_direct32(trans,
628 629 630 631
			   FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
			   FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
632 633

	iwl_write_direct32(trans,
634 635 636 637
			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
			   FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
638

639 640
	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
				 trans_pcie->ucode_write_complete, 5 * HZ);
641
	if (!ret) {
J
Johannes Berg 已提交
642
		IWL_ERR(trans, "Failed to load firmware chunk!\n");
643 644 645 646 647 648
		return -ETIMEDOUT;
	}

	return 0;
}

649
static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
J
Johannes Berg 已提交
650
			    const struct fw_desc *section)
651
{
J
Johannes Berg 已提交
652 653
	u8 *v_addr;
	dma_addr_t p_addr;
654
	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
655 656
	int ret = 0;

J
Johannes Berg 已提交
657 658 659
	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
		     section_num);

660 661 662 663 664 665 666 667 668 669
	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
				    GFP_KERNEL | __GFP_NOWARN);
	if (!v_addr) {
		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
		chunk_sz = PAGE_SIZE;
		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
					    &p_addr, GFP_KERNEL);
		if (!v_addr)
			return -ENOMEM;
	}
J
Johannes Berg 已提交
670

671
	for (offset = 0; offset < section->len; offset += chunk_sz) {
672 673
		u32 copy_size, dst_addr;
		bool extended_addr = false;
J
Johannes Berg 已提交
674

675
		copy_size = min_t(u32, chunk_sz, section->len - offset);
676 677 678 679 680 681 682 683 684
		dst_addr = section->offset + offset;

		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
			extended_addr = true;

		if (extended_addr)
			iwl_set_bits_prph(trans, LMPM_CHICK,
					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
685

J
Johannes Berg 已提交
686
		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
687 688 689 690 691 692 693
		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
						   copy_size);

		if (extended_addr)
			iwl_clear_bits_prph(trans, LMPM_CHICK,
					    LMPM_CHICK_EXTENDED_ADDR_SPACE);

J
Johannes Berg 已提交
694 695 696 697 698
		if (ret) {
			IWL_ERR(trans,
				"Could not load the [%d] uCode section\n",
				section_num);
			break;
D
David Spinadel 已提交
699
		}
J
Johannes Berg 已提交
700 701
	}

702
	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
J
Johannes Berg 已提交
703 704 705
	return ret;
}

706 707 708 709 710 711 712 713 714
/*
 * Driver Takes the ownership on secure machine before FW load
 * and prevent race with the BT load.
 * W/A for ROM bug. (should be remove in the next Si step)
 */
static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
{
	u32 val, loop = 1000;

715 716 717 718 719
	/*
	 * Check the RSA semaphore is accessible.
	 * If the HW isn't locked and the rsa semaphore isn't accessible,
	 * we are in trouble.
	 */
720 721
	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
	if (val & (BIT(1) | BIT(17))) {
722 723
		IWL_INFO(trans,
			 "can't access the RSA semaphore it is write protected\n");
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
		return 0;
	}

	/* take ownership on the AUX IF */
	iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
	iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);

	do {
		iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
		val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
		if (val == 0x1) {
			iwl_write_prph(trans, RSA_ENABLE, 0);
			return 0;
		}

		udelay(10);
		loop--;
	} while (loop > 0);

	IWL_ERR(trans, "Failed to take ownership on secure machine\n");
	return -EIO;
}

747 748 749 750
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
					   const struct fw_img *image,
					   int cpu,
					   int *first_ucode_section)
751 752
{
	int shift_param;
753 754
	int i, ret = 0, sec_num = 0x1;
	u32 val, last_read_idx = 0;
755 756 757

	if (cpu == 1) {
		shift_param = 0;
758
		*first_ucode_section = 0;
759 760
	} else {
		shift_param = 16;
761
		(*first_ucode_section)++;
762 763
	}

764 765 766
	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
		last_read_idx = i;

767 768 769 770 771 772
		/*
		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
		 * CPU1 to CPU2.
		 * PAGING_SEPARATOR_SECTION delimiter - separate between
		 * CPU2 non paged to CPU2 paging sec.
		 */
773
		if (!image->sec[i].data ||
774 775
		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
776 777 778
			IWL_DEBUG_FW(trans,
				     "Break since Data not valid or Empty section, sec = %d\n",
				     i);
779
			break;
780 781
		}

782 783 784
		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
		if (ret)
			return ret;
785 786 787 788 789 790

		/* Notify the ucode of the loaded section number and status */
		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
		val = val | (sec_num << shift_param);
		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
		sec_num = (sec_num << 1) | 0x1;
791 792
	}

793 794
	*first_ucode_section = last_read_idx;

795 796 797 798 799
	if (cpu == 1)
		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
	else
		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);

800 801
	return 0;
}
802

803 804
static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
				      const struct fw_img *image,
805 806
				      int cpu,
				      int *first_ucode_section)
807 808 809
{
	int shift_param;
	int i, ret = 0;
810
	u32 last_read_idx = 0;
811 812 813

	if (cpu == 1) {
		shift_param = 0;
814
		*first_ucode_section = 0;
815 816
	} else {
		shift_param = 16;
817
		(*first_ucode_section)++;
818 819
	}

820 821 822
	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
		last_read_idx = i;

823 824 825 826 827 828
		/*
		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
		 * CPU1 to CPU2.
		 * PAGING_SEPARATOR_SECTION delimiter - separate between
		 * CPU2 non paged to CPU2 paging sec.
		 */
829
		if (!image->sec[i].data ||
830 831
		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
832 833 834
			IWL_DEBUG_FW(trans,
				     "Break since Data not valid or Empty section, sec = %d\n",
				     i);
835
			break;
836 837
		}

838 839 840
		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
		if (ret)
			return ret;
841 842
	}

843 844 845 846 847 848 849 850
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
		iwl_set_bits_prph(trans,
				  CSR_UCODE_LOAD_STATUS_ADDR,
				  (LMPM_CPU_UCODE_LOADING_COMPLETED |
				   LMPM_CPU_HDRS_LOADING_COMPLETED |
				   LMPM_CPU_UCODE_LOADING_STARTED) <<
					shift_param);

851 852
	*first_ucode_section = last_read_idx;

853 854 855
	return 0;
}

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
static void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
	int i;

	if (dest->version)
		IWL_ERR(trans,
			"DBG DEST version is %d - expect issues\n",
			dest->version);

	IWL_INFO(trans, "Applying debug destination %s\n",
		 get_fw_dbg_mode_string(dest->monitor_mode));

	if (dest->monitor_mode == EXTERNAL_MODE)
871
		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
	else
		IWL_WARN(trans, "PCI should have external buffer debug\n");

	for (i = 0; i < trans->dbg_dest_reg_num; i++) {
		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
		u32 val = le32_to_cpu(dest->reg_ops[i].val);

		switch (dest->reg_ops[i].op) {
		case CSR_ASSIGN:
			iwl_write32(trans, addr, val);
			break;
		case CSR_SETBIT:
			iwl_set_bit(trans, addr, BIT(val));
			break;
		case CSR_CLEARBIT:
			iwl_clear_bit(trans, addr, BIT(val));
			break;
		case PRPH_ASSIGN:
			iwl_write_prph(trans, addr, val);
			break;
		case PRPH_SETBIT:
			iwl_set_bits_prph(trans, addr, BIT(val));
			break;
		case PRPH_CLEARBIT:
			iwl_clear_bits_prph(trans, addr, BIT(val));
			break;
898 899 900 901 902 903 904 905
		case PRPH_BLOCKBIT:
			if (iwl_read_prph(trans, addr) & BIT(val)) {
				IWL_ERR(trans,
					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
					val, addr);
				goto monitor;
			}
			break;
906 907 908 909 910 911 912
		default:
			IWL_ERR(trans, "FW debug - unknown OP %d\n",
				dest->reg_ops[i].op);
			break;
		}
	}

913
monitor:
914 915 916 917 918 919 920 921 922
	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
			       trans_pcie->fw_mon_phys >> dest->base_shift);
		iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
			       (trans_pcie->fw_mon_phys +
				trans_pcie->fw_mon_size) >> dest->end_shift);
	}
}

923
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
924
				const struct fw_img *image)
925
{
926
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
927
	int ret = 0;
928
	int first_ucode_section;
929

930
	IWL_DEBUG_FW(trans, "working with %s CPU\n",
931 932
		     image->is_dual_cpus ? "Dual" : "Single");

933 934 935 936
	/* load to FW the binary non secured sections of CPU1 */
	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
	if (ret)
		return ret;
937 938

	if (image->is_dual_cpus) {
939 940 941 942
		/* set CPU2 header address */
		iwl_write_prph(trans,
			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
943

944
		/* load to FW the binary sections of CPU2 */
945 946
		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
						 &first_ucode_section);
947 948
		if (ret)
			return ret;
949
	}
950

951 952 953
	/* supported for 7000 only for the moment */
	if (iwlwifi_mod_params.fw_monitor &&
	    trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
954
		iwl_pcie_alloc_fw_monitor(trans, 0);
955 956 957 958 959 960 961 962

		if (trans_pcie->fw_mon_size) {
			iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
				       trans_pcie->fw_mon_phys >> 4);
			iwl_write_prph(trans, MON_BUFF_END_ADDR,
				       (trans_pcie->fw_mon_phys +
					trans_pcie->fw_mon_size) >> 4);
		}
963 964
	} else if (trans->dbg_dest_tlv) {
		iwl_pcie_apply_destination(trans);
965 966
	}

967
	/* release CPU reset */
968
	iwl_write32(trans, CSR_RESET, 0);
969

970 971
	return 0;
}
972

973 974
static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
					  const struct fw_img *image)
975 976 977 978 979 980 981
{
	int ret = 0;
	int first_ucode_section;

	IWL_DEBUG_FW(trans, "working with %s CPU\n",
		     image->is_dual_cpus ? "Dual" : "Single");

982 983 984
	if (trans->dbg_dest_tlv)
		iwl_pcie_apply_destination(trans);

985 986 987 988 989
	/* TODO: remove in the next Si step */
	ret = iwl_pcie_rsa_race_bug_wa(trans);
	if (ret)
		return ret;

990 991 992 993 994
	/* configure the ucode to be ready to get the secured image */
	/* release CPU reset */
	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);

	/* load to FW the binary Secured sections of CPU1 */
995 996
	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
					      &first_ucode_section);
997 998 999 1000
	if (ret)
		return ret;

	/* load to FW the binary sections of CPU2 */
1001 1002
	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
					       &first_ucode_section);
1003 1004
}

1005
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1006
				   const struct fw_img *fw, bool run_in_rfkill)
1007
{
1008
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1009
	bool hw_rfkill;
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	int ret;

	mutex_lock(&trans_pcie->mutex);

	/* Someone called stop_device, don't try to start_fw */
	if (trans_pcie->is_down) {
		IWL_WARN(trans,
			 "Can't start_fw since the HW hasn't been started\n");
		ret = EIO;
		goto out;
	}
1021

1022
	/* This may fail if AMT took ownership of the device */
1023
	if (iwl_pcie_prepare_card_hw(trans)) {
1024
		IWL_WARN(trans, "Exit HW not ready\n");
1025 1026
		ret = -EIO;
		goto out;
1027 1028
	}

1029 1030
	iwl_enable_rfkill_int(trans);

1031
	/* If platform's RF_KILL switch is NOT set to KILL */
1032
	hw_rfkill = iwl_is_rfkill_set(trans);
1033
	if (hw_rfkill)
1034
		set_bit(STATUS_RFKILL, &trans->status);
1035
	else
1036
		clear_bit(STATUS_RFKILL, &trans->status);
1037
	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1038 1039 1040 1041
	if (hw_rfkill && !run_in_rfkill) {
		ret = -ERFKILL;
		goto out;
	}
1042

1043
	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1044

1045
	ret = iwl_pcie_nic_init(trans);
1046
	if (ret) {
1047
		IWL_ERR(trans, "Unable to init nic\n");
1048
		goto out;
1049 1050 1051
	}

	/* make sure rfkill handshake bits are cleared */
1052 1053
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1054 1055 1056
		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

	/* clear (again), then enable host interrupts */
1057
	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1058
	iwl_enable_interrupts(trans);
1059 1060

	/* really make sure rfkill handshake bits are cleared */
1061 1062
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1063

1064
	/* Load the given image to the HW */
1065
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1066
		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1067
	else
1068 1069 1070 1071 1072
		ret = iwl_pcie_load_given_ucode(trans, fw);

out:
	mutex_unlock(&trans_pcie->mutex);
	return ret;
1073 1074
}

1075
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1076
{
1077
	iwl_pcie_reset_ict(trans);
1078
	iwl_pcie_tx_start(trans, scd_addr);
1079 1080
}

1081
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1082
{
1083
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1084 1085
	bool hw_rfkill, was_hw_rfkill;

1086 1087 1088 1089 1090 1091 1092
	lockdep_assert_held(&trans_pcie->mutex);

	if (trans_pcie->is_down)
		return;

	trans_pcie->is_down = true;

1093
	was_hw_rfkill = iwl_is_rfkill_set(trans);
1094

1095
	/* tell the device to stop sending interrupts */
1096
	spin_lock(&trans_pcie->irq_lock);
1097
	iwl_disable_interrupts(trans);
1098
	spin_unlock(&trans_pcie->irq_lock);
1099

1100
	/* device going down, Stop using ICT table */
1101
	iwl_pcie_disable_ict(trans);
1102 1103 1104 1105 1106 1107 1108 1109

	/*
	 * If a HW restart happens during firmware loading,
	 * then the firmware loading might call this function
	 * and later it might be called again due to the
	 * restart. So don't process again if the device is
	 * already dead.
	 */
1110 1111
	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
		IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
1112
		iwl_pcie_tx_stop(trans);
1113
		iwl_pcie_rx_stop(trans);
1114

1115
		/* Power-down device's busmaster DMA clocks */
1116
		if (!trans->cfg->apmg_not_supported) {
1117 1118 1119 1120
			iwl_write_prph(trans, APMG_CLK_DIS_REG,
				       APMG_CLK_VAL_DMA_CLK_RQT);
			udelay(5);
		}
1121 1122 1123
	}

	/* Make sure (redundant) we've released our request to stay awake */
1124
	iwl_clear_bit(trans, CSR_GP_CNTRL,
1125
		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1126 1127

	/* Stop the device, and put it in low power state */
1128
	iwl_pcie_apm_stop(trans, false);
1129

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
	/* stop and reset the on-board processor */
	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
	udelay(20);

	/*
	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
	 * This is a bug in certain verions of the hardware.
	 * Certain devices also keep sending HW RF kill interrupt all
	 * the time, unless the interrupt is ACKed even if the interrupt
	 * should be masked. Re-ACK all the interrupts here.
1140
	 */
1141
	spin_lock(&trans_pcie->irq_lock);
1142
	iwl_disable_interrupts(trans);
1143
	spin_unlock(&trans_pcie->irq_lock);
1144

D
Don Fry 已提交
1145 1146

	/* clear all status bits */
1147 1148 1149 1150
	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	clear_bit(STATUS_INT_ENABLED, &trans->status);
	clear_bit(STATUS_TPOWER_PMI, &trans->status);
	clear_bit(STATUS_RFKILL, &trans->status);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162

	/*
	 * Even if we stop the HW, we still want the RF kill
	 * interrupt
	 */
	iwl_enable_rfkill_int(trans);

	/*
	 * Check again since the RF kill state may have changed while
	 * all the interrupts were disabled, in this case we couldn't
	 * receive the RF kill interrupt and update the state in the
	 * op_mode.
1163 1164 1165 1166 1167 1168
	 * Don't call the op_mode if the rkfill state hasn't changed.
	 * This allows the op_mode to call stop_device from the rfkill
	 * notification without endless recursion. Under very rare
	 * circumstances, we might have a small recursion if the rfkill
	 * state changed exactly now while we were called from stop_device.
	 * This is very unlikely but can happen and is supported.
1169 1170 1171
	 */
	hw_rfkill = iwl_is_rfkill_set(trans);
	if (hw_rfkill)
1172
		set_bit(STATUS_RFKILL, &trans->status);
1173
	else
1174
		clear_bit(STATUS_RFKILL, &trans->status);
1175
	if (hw_rfkill != was_hw_rfkill)
1176
		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1177 1178 1179

	/* re-take ownership to prevent other users from stealing the deivce */
	iwl_pcie_prepare_card_hw(trans);
1180 1181
}

1182 1183 1184 1185 1186 1187 1188 1189 1190
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	mutex_lock(&trans_pcie->mutex);
	_iwl_trans_pcie_stop_device(trans, low_power);
	mutex_unlock(&trans_pcie->mutex);
}

1191 1192
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
1193 1194 1195 1196 1197
	struct iwl_trans_pcie __maybe_unused *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->mutex);

1198
	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1199
		_iwl_trans_pcie_stop_device(trans, true);
1200 1201
}

1202
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
1203
{
1204 1205
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1206 1207 1208 1209 1210 1211
	if (trans->wowlan_d0i3) {
		/* Enable persistence mode to avoid reset */
		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
	}

1212
	iwl_disable_interrupts(trans);
1213 1214 1215 1216 1217 1218 1219 1220

	/*
	 * in testing mode, the host stays awake and the
	 * hardware won't be reset (not even partially)
	 */
	if (test)
		return;

1221 1222
	iwl_pcie_disable_ict(trans);

1223 1224
	synchronize_irq(trans_pcie->pci_dev->irq);

1225 1226
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1227 1228 1229
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

1230 1231 1232 1233 1234 1235 1236 1237
	if (!trans->wowlan_d0i3) {
		/*
		 * reset TX queues -- some of their registers reset during S3
		 * so if we don't reset everything here the D3 image would try
		 * to execute some invalid memory upon resume
		 */
		iwl_trans_pcie_tx_reset(trans);
	}
1238 1239 1240 1241 1242

	iwl_pcie_set_pwr(trans, true);
}

static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1243 1244
				    enum iwl_d3_status *status,
				    bool test)
1245 1246 1247 1248
{
	u32 val;
	int ret;

1249 1250 1251 1252 1253 1254
	if (test) {
		iwl_enable_interrupts(trans);
		*status = IWL_D3_STATUS_ALIVE;
		return 0;
	}

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	/*
	 * Also enables interrupts - none will happen as the device doesn't
	 * know we're waking it up, only when the opmode actually tells it
	 * after this call.
	 */
	iwl_pcie_reset_ict(trans);

	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

1265 1266 1267
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
		udelay(2);

1268 1269 1270 1271
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   25000);
1272
	if (ret < 0) {
1273 1274 1275 1276
		IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
		return ret;
	}

1277 1278
	iwl_pcie_set_pwr(trans, false);

1279 1280 1281 1282 1283
	if (trans->wowlan_d0i3) {
		iwl_clear_bit(trans, CSR_GP_CNTRL,
			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
	} else {
		iwl_trans_pcie_tx_reset(trans);
1284

1285 1286 1287 1288 1289 1290
		ret = iwl_pcie_rx_init(trans);
		if (ret) {
			IWL_ERR(trans,
				"Failed to resume the device (RX reset)\n");
			return ret;
		}
1291 1292
	}

1293 1294 1295 1296 1297 1298
	val = iwl_read32(trans, CSR_RESET);
	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
		*status = IWL_D3_STATUS_RESET;
	else
		*status = IWL_D3_STATUS_ALIVE;

1299
	return 0;
1300 1301
}

1302
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1303
{
1304
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1305
	bool hw_rfkill;
J
Johannes Berg 已提交
1306
	int err;
1307

1308 1309
	lockdep_assert_held(&trans_pcie->mutex);

1310
	err = iwl_pcie_prepare_card_hw(trans);
1311
	if (err) {
1312
		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
J
Johannes Berg 已提交
1313
		return err;
1314
	}
1315

1316
	/* Reset the entire device */
1317
	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1318 1319 1320

	usleep_range(10, 15);

1321
	iwl_pcie_apm_init(trans);
1322

1323 1324 1325
	/* From now on, the op_mode will be kept updated about RF kill state */
	iwl_enable_rfkill_int(trans);

1326 1327 1328
	/* Set is_down to false here so that...*/
	trans_pcie->is_down = false;

1329
	hw_rfkill = iwl_is_rfkill_set(trans);
1330
	if (hw_rfkill)
1331
		set_bit(STATUS_RFKILL, &trans->status);
1332
	else
1333
		clear_bit(STATUS_RFKILL, &trans->status);
1334
	/* ... rfkill can call stop_device and set it false if needed */
1335
	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1336

J
Johannes Berg 已提交
1337
	return 0;
1338 1339
}

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;

	mutex_lock(&trans_pcie->mutex);
	ret = _iwl_trans_pcie_start_hw(trans, low_power);
	mutex_unlock(&trans_pcie->mutex);

	return ret;
}

1352
static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1353
{
1354
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1355

1356 1357
	mutex_lock(&trans_pcie->mutex);

1358
	/* disable interrupts - don't enable HW RF kill interrupt */
1359
	spin_lock(&trans_pcie->irq_lock);
1360
	iwl_disable_interrupts(trans);
1361
	spin_unlock(&trans_pcie->irq_lock);
1362

1363
	iwl_pcie_apm_stop(trans, true);
1364

1365
	spin_lock(&trans_pcie->irq_lock);
1366
	iwl_disable_interrupts(trans);
1367
	spin_unlock(&trans_pcie->irq_lock);
1368

E
Emmanuel Grumbach 已提交
1369
	iwl_pcie_disable_ict(trans);
1370

1371
	mutex_unlock(&trans_pcie->mutex);
1372 1373

	synchronize_irq(trans_pcie->pci_dev->irq);
1374 1375
}

1376 1377
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
1378
	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1379 1380 1381 1382
}

static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
1383
	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1384 1385 1386 1387
}

static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
1388
	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1389 1390
}

1391 1392
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
A
Amnon Paz 已提交
1393 1394
	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
			       ((reg & 0x000FFFFF) | (3 << 24)));
1395 1396 1397 1398 1399 1400 1401
	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}

static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
				      u32 val)
{
	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
A
Amnon Paz 已提交
1402
			       ((addr & 0x000FFFFF) | (3 << 24)));
1403 1404 1405
	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}

1406 1407 1408 1409 1410 1411
static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
	WARN_ON(1);
	return 0;
}

1412
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1413
				     const struct iwl_trans_config *trans_cfg)
1414 1415 1416 1417
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1418
	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1419
	trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1420 1421 1422 1423 1424 1425 1426
	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
		trans_pcie->n_no_reclaim_cmds = 0;
	else
		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
	if (trans_pcie->n_no_reclaim_cmds)
		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1427

1428 1429 1430 1431 1432
	trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
	if (trans_pcie->rx_buf_size_8k)
		trans_pcie->rx_page_order = get_order(8 * 1024);
	else
		trans_pcie->rx_page_order = get_order(4 * 1024);
1433

1434
	trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
J
Johannes Berg 已提交
1435
	trans_pcie->command_names = trans_cfg->command_names;
1436
	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1437
	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1438

1439 1440 1441
	/* init ref_count to 1 (should be cleared when ucode is loaded) */
	trans_pcie->ref_count = 1;

1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
	/* Initialize NAPI here - it should be before registering to mac80211
	 * in the opmode but after the HW struct is allocated.
	 * As this function may be called again in some corner cases don't
	 * do anything if NAPI was already initialized.
	 */
	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
		init_dummy_netdev(&trans_pcie->napi_dev);
		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
				     &trans_pcie->napi_dev,
				     iwl_pcie_dummy_napi_poll, 64);
	}
1453 1454
}

1455
void iwl_trans_pcie_free(struct iwl_trans *trans)
1456
{
1457
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1458

1459 1460
	synchronize_irq(trans_pcie->pci_dev->irq);

1461
	iwl_pcie_tx_free(trans);
1462
	iwl_pcie_rx_free(trans);
1463

J
Johannes Berg 已提交
1464 1465
	free_irq(trans_pcie->pci_dev->irq, trans);
	iwl_pcie_free_ict(trans);
1466 1467

	pci_disable_msi(trans_pcie->pci_dev);
1468
	iounmap(trans_pcie->hw_base);
1469 1470 1471
	pci_release_regions(trans_pcie->pci_dev);
	pci_disable_device(trans_pcie->pci_dev);

1472 1473 1474
	if (trans_pcie->napi.poll)
		netif_napi_del(&trans_pcie->napi);

1475 1476
	iwl_pcie_free_fw_monitor(trans);

1477
	iwl_trans_free(trans);
1478 1479
}

D
Don Fry 已提交
1480 1481 1482
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
	if (state)
1483
		set_bit(STATUS_TPOWER_PMI, &trans->status);
D
Don Fry 已提交
1484
	else
1485
		clear_bit(STATUS_TPOWER_PMI, &trans->status);
D
Don Fry 已提交
1486 1487
}

1488 1489
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
						unsigned long *flags)
1490 1491
{
	int ret;
1492 1493 1494
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1495

1496
	if (trans_pcie->cmd_hold_nic_awake)
1497 1498
		goto out;

1499
	/* this bit wakes up the NIC */
1500 1501
	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1502 1503
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
		udelay(2);
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534

	/*
	 * These bits say the device is running, and should keep running for
	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
	 * but they do not indicate that embedded SRAM is restored yet;
	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
	 * to/from host DRAM when sleeping/waking for power-saving.
	 * Each direction takes approximately 1/4 millisecond; with this
	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
	 * series of register accesses are expected (e.g. reading Event Log),
	 * to keep device from sleeping.
	 *
	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
	 * SRAM is okay/restored.  We don't check that here because this call
	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
	 *
	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
	 * and do not save/restore SRAM when power cycling.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
	if (unlikely(ret < 0)) {
		iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
		if (!silent) {
			u32 val = iwl_read32(trans, CSR_GP_CNTRL);
			WARN_ONCE(1,
				  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
				  val);
1535
			spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1536 1537 1538 1539
			return false;
		}
	}

1540
out:
1541 1542 1543 1544
	/*
	 * Fool sparse by faking we release the lock - sparse will
	 * track nic_access anyway.
	 */
1545
	__release(&trans_pcie->reg_lock);
1546 1547 1548
	return true;
}

1549 1550
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
					      unsigned long *flags)
1551
{
1552
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1553

1554
	lockdep_assert_held(&trans_pcie->reg_lock);
1555 1556 1557 1558 1559

	/*
	 * Fool sparse by faking we acquiring the lock - sparse will
	 * track nic_access anyway.
	 */
1560
	__acquire(&trans_pcie->reg_lock);
1561

1562
	if (trans_pcie->cmd_hold_nic_awake)
1563 1564
		goto out;

1565 1566
	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1567 1568 1569 1570 1571 1572 1573
	/*
	 * Above we read the CSR_GP_CNTRL register, which will flush
	 * any previous writes, but we need the write that clears the
	 * MAC_ACCESS_REQ bit to be performed before any other writes
	 * scheduled on different CPUs (after we drop reg_lock).
	 */
	mmiowb();
1574
out:
1575
	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1576 1577
}

1578 1579 1580 1581 1582 1583 1584
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
				   void *buf, int dwords)
{
	unsigned long flags;
	int offs, ret = 0;
	u32 *vals = buf;

1585
	if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1586 1587 1588
		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
		for (offs = 0; offs < dwords; offs++)
			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1589
		iwl_trans_release_nic_access(trans, &flags);
1590 1591 1592 1593 1594 1595 1596
	} else {
		ret = -EBUSY;
	}
	return ret;
}

static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1597
				    const void *buf, int dwords)
1598 1599 1600
{
	unsigned long flags;
	int offs, ret = 0;
1601
	const u32 *vals = buf;
1602

1603
	if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1604 1605
		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
		for (offs = 0; offs < dwords; offs++)
E
Emmanuel Grumbach 已提交
1606 1607
			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
				    vals ? vals[offs] : 0);
1608
		iwl_trans_release_nic_access(trans, &flags);
1609 1610 1611 1612 1613
	} else {
		ret = -EBUSY;
	}
	return ret;
}
1614

1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
					    unsigned long txqs,
					    bool freeze)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int queue;

	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
		struct iwl_txq *txq = &trans_pcie->txq[queue];
		unsigned long now;

		spin_lock_bh(&txq->lock);

		now = jiffies;

		if (txq->frozen == freeze)
			goto next_queue;

		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
				    freeze ? "Freezing" : "Waking", queue);

		txq->frozen = freeze;

		if (txq->q.read_ptr == txq->q.write_ptr)
			goto next_queue;

		if (freeze) {
			if (unlikely(time_after(now,
						txq->stuck_timer.expires))) {
				/*
				 * The timer should have fired, maybe it is
				 * spinning right now on the lock.
				 */
				goto next_queue;
			}
			/* remember how long until the timer fires */
			txq->frozen_expiry_remainder =
				txq->stuck_timer.expires - now;
			del_timer(&txq->stuck_timer);
			goto next_queue;
		}

		/*
		 * Wake a non-empty queue -> arm timer with the
		 * remainder before it froze
		 */
		mod_timer(&txq->stuck_timer,
			  now + txq->frozen_expiry_remainder);

next_queue:
		spin_unlock_bh(&txq->lock);
	}
}

1669 1670
#define IWL_FLUSH_WAIT_MS	2000

1671
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1672
{
1673
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1674
	struct iwl_txq *txq;
1675 1676 1677
	struct iwl_queue *q;
	int cnt;
	unsigned long now = jiffies;
1678 1679
	u32 scd_sram_addr;
	u8 buf[16];
1680 1681 1682
	int ret = 0;

	/* waiting for all the tx frames complete might take a while */
1683
	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1684 1685
		u8 wr_ptr;

W
Wey-Yi Guy 已提交
1686
		if (cnt == trans_pcie->cmd_queue)
1687
			continue;
1688 1689 1690 1691
		if (!test_bit(cnt, trans_pcie->queue_used))
			continue;
		if (!(BIT(cnt) & txq_bm))
			continue;
1692 1693

		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1694
		txq = &trans_pcie->txq[cnt];
1695
		q = &txq->q;
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
		wr_ptr = ACCESS_ONCE(q->write_ptr);

		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
		       !time_after(jiffies,
				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
			u8 write_ptr = ACCESS_ONCE(q->write_ptr);

			if (WARN_ONCE(wr_ptr != write_ptr,
				      "WR pointer moved while flushing %d -> %d\n",
				      wr_ptr, write_ptr))
				return -ETIMEDOUT;
1707
			msleep(1);
1708
		}
1709 1710

		if (q->read_ptr != q->write_ptr) {
1711 1712
			IWL_ERR(trans,
				"fail to flush all tx fifo queues Q %d\n", cnt);
1713 1714 1715
			ret = -ETIMEDOUT;
			break;
		}
1716
		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1717
	}
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750

	if (!ret)
		return 0;

	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);

	scd_sram_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
			iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));

	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
			iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
					     SCD_TRANS_TBL_OFFSET_QUEUE(cnt));

		if (cnt & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			cnt, active ? "" : "in", fifo, tbl_dw,
1751 1752
			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
				(TFD_QUEUE_SIZE_MAX - 1),
1753 1754 1755
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
	}

1756 1757 1758
	return ret;
}

1759 1760 1761
static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
					 u32 mask, u32 value)
{
1762
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1763 1764
	unsigned long flags;

1765
	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1766
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1767
	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1768 1769
}

1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
void iwl_trans_pcie_ref(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;

	if (iwlwifi_mod_params.d0i3_disable)
		return;

	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
	trans_pcie->ref_count++;
	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
}

void iwl_trans_pcie_unref(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;

	if (iwlwifi_mod_params.d0i3_disable)
		return;

	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
	if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
		spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
		return;
	}
	trans_pcie->ref_count--;
	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
}

1802 1803
static const char *get_csr_string(int cmd)
{
J
Johannes Berg 已提交
1804
#define IWL_CMD(x) case x: return #x
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
	switch (cmd) {
	IWL_CMD(CSR_HW_IF_CONFIG_REG);
	IWL_CMD(CSR_INT_COALESCING);
	IWL_CMD(CSR_INT);
	IWL_CMD(CSR_INT_MASK);
	IWL_CMD(CSR_FH_INT_STATUS);
	IWL_CMD(CSR_GPIO_IN);
	IWL_CMD(CSR_RESET);
	IWL_CMD(CSR_GP_CNTRL);
	IWL_CMD(CSR_HW_REV);
	IWL_CMD(CSR_EEPROM_REG);
	IWL_CMD(CSR_EEPROM_GP);
	IWL_CMD(CSR_OTP_GP_REG);
	IWL_CMD(CSR_GIO_REG);
	IWL_CMD(CSR_GP_UCODE_REG);
	IWL_CMD(CSR_GP_DRIVER_REG);
	IWL_CMD(CSR_UCODE_DRV_GP1);
	IWL_CMD(CSR_UCODE_DRV_GP2);
	IWL_CMD(CSR_LED_REG);
	IWL_CMD(CSR_DRAM_INT_TBL_REG);
	IWL_CMD(CSR_GIO_CHICKEN_BITS);
	IWL_CMD(CSR_ANA_PLL_CFG);
	IWL_CMD(CSR_HW_REV_WA_REG);
1828
	IWL_CMD(CSR_MONITOR_STATUS_REG);
1829 1830 1831 1832
	IWL_CMD(CSR_DBG_HPET_MEM_REG);
	default:
		return "UNKNOWN";
	}
J
Johannes Berg 已提交
1833
#undef IWL_CMD
1834 1835
}

1836
void iwl_pcie_dump_csr(struct iwl_trans *trans)
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
{
	int i;
	static const u32 csr_tbl[] = {
		CSR_HW_IF_CONFIG_REG,
		CSR_INT_COALESCING,
		CSR_INT,
		CSR_INT_MASK,
		CSR_FH_INT_STATUS,
		CSR_GPIO_IN,
		CSR_RESET,
		CSR_GP_CNTRL,
		CSR_HW_REV,
		CSR_EEPROM_REG,
		CSR_EEPROM_GP,
		CSR_OTP_GP_REG,
		CSR_GIO_REG,
		CSR_GP_UCODE_REG,
		CSR_GP_DRIVER_REG,
		CSR_UCODE_DRV_GP1,
		CSR_UCODE_DRV_GP2,
		CSR_LED_REG,
		CSR_DRAM_INT_TBL_REG,
		CSR_GIO_CHICKEN_BITS,
		CSR_ANA_PLL_CFG,
1861
		CSR_MONITOR_STATUS_REG,
1862 1863 1864 1865 1866 1867 1868 1869 1870
		CSR_HW_REV_WA_REG,
		CSR_DBG_HPET_MEM_REG
	};
	IWL_ERR(trans, "CSR values:\n");
	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
		"CSR_INT_PERIODIC_REG)\n");
	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
		IWL_ERR(trans, "  %25s: 0X%08x\n",
			get_csr_string(csr_tbl[i]),
1871
			iwl_read32(trans, csr_tbl[i]));
1872 1873 1874
	}
}

1875 1876 1877
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
1878
	if (!debugfs_create_file(#name, mode, parent, trans,		\
1879
				 &iwl_dbgfs_##name##_ops))		\
1880
		goto err;						\
1881 1882 1883 1884 1885 1886
} while (0)

/* file operation */
#define DEBUGFS_READ_FILE_OPS(name)					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.read = iwl_dbgfs_##name##_read,				\
1887
	.open = simple_open,						\
1888 1889 1890
	.llseek = generic_file_llseek,					\
};

1891 1892 1893
#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
static const struct file_operations iwl_dbgfs_##name##_ops = {          \
	.write = iwl_dbgfs_##name##_write,                              \
1894
	.open = simple_open,						\
1895 1896 1897
	.llseek = generic_file_llseek,					\
};

1898 1899 1900 1901
#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.write = iwl_dbgfs_##name##_write,				\
	.read = iwl_dbgfs_##name##_read,				\
1902
	.open = simple_open,						\
1903 1904 1905 1906
	.llseek = generic_file_llseek,					\
};

static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1907 1908
				       char __user *user_buf,
				       size_t count, loff_t *ppos)
1909
{
1910
	struct iwl_trans *trans = file->private_data;
1911
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1912
	struct iwl_txq *txq;
1913 1914 1915 1916 1917
	struct iwl_queue *q;
	char *buf;
	int pos = 0;
	int cnt;
	int ret;
1918 1919
	size_t bufsz;

1920
	bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
1921

J
Johannes Berg 已提交
1922
	if (!trans_pcie->txq)
1923
		return -EAGAIN;
J
Johannes Berg 已提交
1924

1925 1926 1927 1928
	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

1929
	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1930
		txq = &trans_pcie->txq[cnt];
1931 1932
		q = &txq->q;
		pos += scnprintf(buf + pos, bufsz - pos,
1933
				"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1934
				cnt, q->read_ptr, q->write_ptr,
1935
				!!test_bit(cnt, trans_pcie->queue_used),
1936
				 !!test_bit(cnt, trans_pcie->queue_stopped),
1937
				 txq->need_update, txq->frozen,
1938
				 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
1939 1940 1941 1942 1943 1944 1945
	}
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1946 1947 1948
				       char __user *user_buf,
				       size_t count, loff_t *ppos)
{
1949
	struct iwl_trans *trans = file->private_data;
1950
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1951
	struct iwl_rxq *rxq = &trans_pcie->rxq;
1952 1953 1954 1955 1956 1957 1958 1959
	char buf[256];
	int pos = 0;
	const size_t bufsz = sizeof(buf);

	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
						rxq->read);
	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
						rxq->write);
1960 1961 1962 1963
	pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
						rxq->write_actual);
	pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
						rxq->need_update);
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
						rxq->free_count);
	if (rxq->rb_stts) {
		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
	} else {
		pos += scnprintf(buf + pos, bufsz - pos,
					"closed_rb_num: Not Allocated\n");
	}
	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}

1976 1977
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
					char __user *user_buf,
1978 1979
					size_t count, loff_t *ppos)
{
1980
	struct iwl_trans *trans = file->private_data;
1981
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1982 1983 1984 1985 1986 1987 1988 1989
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	int pos = 0;
	char *buf;
	int bufsz = 24 * 64; /* 24 items * 64 char per item */
	ssize_t ret;

	buf = kzalloc(bufsz, GFP_KERNEL);
J
Johannes Berg 已提交
1990
	if (!buf)
1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
		return -ENOMEM;

	pos += scnprintf(buf + pos, bufsz - pos,
			"Interrupt Statistics Report:\n");

	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
		isr_stats->hw);
	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
		isr_stats->sw);
	if (isr_stats->sw || isr_stats->hw) {
		pos += scnprintf(buf + pos, bufsz - pos,
			"\tLast Restarting Code:  0x%X\n",
			isr_stats->err_code);
	}
#ifdef CONFIG_IWLWIFI_DEBUG
	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
		isr_stats->sch);
	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
		isr_stats->alive);
#endif
	pos += scnprintf(buf + pos, bufsz - pos,
		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);

	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
		isr_stats->ctkill);

	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
		isr_stats->wakeup);

	pos += scnprintf(buf + pos, bufsz - pos,
		"Rx command responses:\t\t %u\n", isr_stats->rx);

	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
		isr_stats->tx);

	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
		isr_stats->unhandled);

	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
					 const char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
2039
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	char buf[8];
	int buf_size;
	u32 reset_flag;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%x", &reset_flag) != 1)
		return -EFAULT;
	if (reset_flag == 0)
		memset(isr_stats, 0, sizeof(*isr_stats));

	return count;
}

2058
static ssize_t iwl_dbgfs_csr_write(struct file *file,
2059 2060
				   const char __user *user_buf,
				   size_t count, loff_t *ppos)
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
{
	struct iwl_trans *trans = file->private_data;
	char buf[8];
	int buf_size;
	int csr;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%d", &csr) != 1)
		return -EFAULT;

2074
	iwl_pcie_dump_csr(trans);
2075 2076 2077 2078 2079

	return count;
}

static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2080 2081
				     char __user *user_buf,
				     size_t count, loff_t *ppos)
2082 2083
{
	struct iwl_trans *trans = file->private_data;
2084
	char *buf = NULL;
2085
	ssize_t ret;
2086

2087 2088 2089 2090 2091 2092 2093
	ret = iwl_dump_fh(trans, &buf);
	if (ret < 0)
		return ret;
	if (!buf)
		return -EINVAL;
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
	kfree(buf);
2094 2095 2096
	return ret;
}

2097
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2098
DEBUGFS_READ_FILE_OPS(fh_reg);
2099 2100
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
2101
DEBUGFS_WRITE_FILE_OPS(csr);
2102 2103 2104 2105 2106 2107

/*
 * Create the debugfs files and directories
 *
 */
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2108
					 struct dentry *dir)
2109 2110 2111
{
	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2112
	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2113 2114
	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2115
	return 0;
2116 2117 2118 2119

err:
	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
	return -ENOMEM;
2120
}
2121 2122 2123 2124 2125 2126 2127
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
					 struct dentry *dir)
{
	return 0;
}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139

static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
{
	u32 cmdlen = 0;
	int i;

	for (i = 0; i < IWL_NUM_OF_TBS; i++)
		cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);

	return cmdlen;
}

2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
static const struct {
	u32 start, end;
} iwl_prph_dump_addr[] = {
	{ .start = 0x00a00000, .end = 0x00a00000 },
	{ .start = 0x00a0000c, .end = 0x00a00024 },
	{ .start = 0x00a0002c, .end = 0x00a0003c },
	{ .start = 0x00a00410, .end = 0x00a00418 },
	{ .start = 0x00a00420, .end = 0x00a00420 },
	{ .start = 0x00a00428, .end = 0x00a00428 },
	{ .start = 0x00a00430, .end = 0x00a0043c },
	{ .start = 0x00a00444, .end = 0x00a00444 },
	{ .start = 0x00a004c0, .end = 0x00a004cc },
	{ .start = 0x00a004d8, .end = 0x00a004d8 },
	{ .start = 0x00a004e0, .end = 0x00a004f0 },
	{ .start = 0x00a00840, .end = 0x00a00840 },
	{ .start = 0x00a00850, .end = 0x00a00858 },
	{ .start = 0x00a01004, .end = 0x00a01008 },
	{ .start = 0x00a01010, .end = 0x00a01010 },
	{ .start = 0x00a01018, .end = 0x00a01018 },
	{ .start = 0x00a01024, .end = 0x00a01024 },
	{ .start = 0x00a0102c, .end = 0x00a01034 },
	{ .start = 0x00a0103c, .end = 0x00a01040 },
	{ .start = 0x00a01048, .end = 0x00a01094 },
	{ .start = 0x00a01c00, .end = 0x00a01c20 },
	{ .start = 0x00a01c58, .end = 0x00a01c58 },
	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
	{ .start = 0x00a01c28, .end = 0x00a01c54 },
	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
2168
	{ .start = 0x00a01c60, .end = 0x00a01cdc },
2169 2170 2171 2172 2173
	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
	{ .start = 0x00a01d18, .end = 0x00a01d20 },
	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
	{ .start = 0x00a01d40, .end = 0x00a01d5c },
	{ .start = 0x00a01d80, .end = 0x00a01d80 },
2174 2175 2176
	{ .start = 0x00a01d98, .end = 0x00a01d9c },
	{ .start = 0x00a01da8, .end = 0x00a01da8 },
	{ .start = 0x00a01db8, .end = 0x00a01df4 },
2177 2178 2179
	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
	{ .start = 0x00a01e00, .end = 0x00a01e2c },
	{ .start = 0x00a01e40, .end = 0x00a01e60 },
2180 2181
	{ .start = 0x00a01e68, .end = 0x00a01e6c },
	{ .start = 0x00a01e74, .end = 0x00a01e74 },
2182 2183
	{ .start = 0x00a01e84, .end = 0x00a01e90 },
	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
2184 2185 2186
	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
	{ .start = 0x00a01f00, .end = 0x00a01f1c },
	{ .start = 0x00a01f44, .end = 0x00a01ffc },
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
	{ .start = 0x00a02000, .end = 0x00a02048 },
	{ .start = 0x00a02068, .end = 0x00a020f0 },
	{ .start = 0x00a02100, .end = 0x00a02118 },
	{ .start = 0x00a02140, .end = 0x00a0214c },
	{ .start = 0x00a02168, .end = 0x00a0218c },
	{ .start = 0x00a021c0, .end = 0x00a021c0 },
	{ .start = 0x00a02400, .end = 0x00a02410 },
	{ .start = 0x00a02418, .end = 0x00a02420 },
	{ .start = 0x00a02428, .end = 0x00a0242c },
	{ .start = 0x00a02434, .end = 0x00a02434 },
	{ .start = 0x00a02440, .end = 0x00a02460 },
	{ .start = 0x00a02468, .end = 0x00a024b0 },
	{ .start = 0x00a024c8, .end = 0x00a024cc },
	{ .start = 0x00a02500, .end = 0x00a02504 },
	{ .start = 0x00a0250c, .end = 0x00a02510 },
	{ .start = 0x00a02540, .end = 0x00a02554 },
	{ .start = 0x00a02580, .end = 0x00a025f4 },
	{ .start = 0x00a02600, .end = 0x00a0260c },
	{ .start = 0x00a02648, .end = 0x00a02650 },
	{ .start = 0x00a02680, .end = 0x00a02680 },
	{ .start = 0x00a026c0, .end = 0x00a026d0 },
	{ .start = 0x00a02700, .end = 0x00a0270c },
	{ .start = 0x00a02804, .end = 0x00a02804 },
	{ .start = 0x00a02818, .end = 0x00a0281c },
	{ .start = 0x00a02c00, .end = 0x00a02db4 },
	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
	{ .start = 0x00a03000, .end = 0x00a03014 },
	{ .start = 0x00a0301c, .end = 0x00a0302c },
	{ .start = 0x00a03034, .end = 0x00a03038 },
	{ .start = 0x00a03040, .end = 0x00a03048 },
	{ .start = 0x00a03060, .end = 0x00a03068 },
	{ .start = 0x00a03070, .end = 0x00a03074 },
	{ .start = 0x00a0307c, .end = 0x00a0307c },
	{ .start = 0x00a03080, .end = 0x00a03084 },
	{ .start = 0x00a0308c, .end = 0x00a03090 },
	{ .start = 0x00a03098, .end = 0x00a03098 },
	{ .start = 0x00a030a0, .end = 0x00a030a0 },
	{ .start = 0x00a030a8, .end = 0x00a030b4 },
	{ .start = 0x00a030bc, .end = 0x00a030bc },
	{ .start = 0x00a030c0, .end = 0x00a0312c },
	{ .start = 0x00a03c00, .end = 0x00a03c5c },
	{ .start = 0x00a04400, .end = 0x00a04454 },
	{ .start = 0x00a04460, .end = 0x00a04474 },
	{ .start = 0x00a044c0, .end = 0x00a044ec },
	{ .start = 0x00a04500, .end = 0x00a04504 },
	{ .start = 0x00a04510, .end = 0x00a04538 },
	{ .start = 0x00a04540, .end = 0x00a04548 },
	{ .start = 0x00a04560, .end = 0x00a0457c },
	{ .start = 0x00a04590, .end = 0x00a04598 },
	{ .start = 0x00a045c0, .end = 0x00a045f4 },
};

static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
				    struct iwl_fw_error_dump_data **data)
{
	struct iwl_fw_error_dump_prph *prph;
	unsigned long flags;
	u32 prph_len = 0, i;

	if (!iwl_trans_grab_nic_access(trans, false, &flags))
		return 0;

	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
		/* The range includes both boundaries */
		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
			 iwl_prph_dump_addr[i].start + 4;
		int reg;
		__le32 *val;

L
Liad Kaufman 已提交
2256
		prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277

		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
		(*data)->len = cpu_to_le32(sizeof(*prph) +
					num_bytes_in_chunk);
		prph = (void *)(*data)->data;
		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
		val = (void *)prph->data;

		for (reg = iwl_prph_dump_addr[i].start;
		     reg <= iwl_prph_dump_addr[i].end;
		     reg += 4)
			*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
								      reg));
		*data = iwl_fw_error_next_data(*data);
	}

	iwl_trans_release_nic_access(trans, &flags);

	return prph_len;
}

2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
				   struct iwl_fw_error_dump_data **data,
				   int allocated_rb_nums)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	u32 i, r, j, rb_len = 0;

	spin_lock(&rxq->lock);

	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;

	for (i = rxq->read, j = 0;
	     i != r && j < allocated_rb_nums;
	     i = (i + 1) & RX_QUEUE_MASK, j++) {
		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
		struct iwl_fw_error_dump_rb *rb;

		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
			       DMA_FROM_DEVICE);

		rb_len += sizeof(**data) + sizeof(*rb) + max_len;

		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
		rb = (void *)(*data)->data;
		rb->index = cpu_to_le32(i);
		memcpy(rb->data, page_address(rxb->page), max_len);
		/* remap the page for the free benefit */
		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
						     max_len,
						     DMA_FROM_DEVICE);

		*data = iwl_fw_error_next_data(*data);
	}

	spin_unlock(&rxq->lock);

	return rb_len;
}
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
#define IWL_CSR_TO_DUMP (0x250)

static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
				   struct iwl_fw_error_dump_data **data)
{
	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
	__le32 *val;
	int i;

	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
	val = (void *)(*data)->data;

	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));

	*data = iwl_fw_error_next_data(*data);

	return csr_len;
}

2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
				       struct iwl_fw_error_dump_data **data)
{
	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
	unsigned long flags;
	__le32 *val;
	int i;

	if (!iwl_trans_grab_nic_access(trans, false, &flags))
		return 0;

	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
	(*data)->len = cpu_to_le32(fh_regs_len);
	val = (void *)(*data)->data;

	for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));

	iwl_trans_release_nic_access(trans, &flags);

	*data = iwl_fw_error_next_data(*data);

	return sizeof(**data) + fh_regs_len;
}

2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
static u32
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
				 u32 monitor_len)
{
	u32 buf_size_in_dwords = (monitor_len >> 2);
	u32 *buffer = (u32 *)fw_mon_data->data;
	unsigned long flags;
	u32 i;

	if (!iwl_trans_grab_nic_access(trans, false, &flags))
		return 0;

	__iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
	for (i = 0; i < buf_size_in_dwords; i++)
		buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
	__iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);

	iwl_trans_release_nic_access(trans, &flags);

	return monitor_len;
}

2388 2389
static
struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
2390 2391 2392 2393 2394
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_fw_error_dump_data *data;
	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
	struct iwl_fw_error_dump_txcmd *txcmd;
2395
	struct iwl_trans_dump_data *dump_data;
2396
	u32 len, num_rbs;
2397
	u32 monitor_len;
2398
	int i, ptr;
2399
	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
2400

2401 2402 2403 2404 2405
	/* transport dump header */
	len = sizeof(*dump_data);

	/* host commands */
	len += sizeof(*data) +
2406 2407
		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);

2408 2409 2410 2411
	/* CSR registers */
	len += sizeof(*data) + IWL_CSR_TO_DUMP;

	/* PRPH registers */
2412 2413 2414 2415 2416 2417 2418 2419 2420
	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
		/* The range includes both boundaries */
		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
			iwl_prph_dump_addr[i].start + 4;

		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
			num_bytes_in_chunk;
	}

2421 2422 2423
	/* FH registers */
	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);

2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
	if (dump_rbs) {
		/* RBs */
		num_rbs = le16_to_cpu(ACCESS_ONCE(
				      trans_pcie->rxq.rb_stts->closed_rb_num))
				      & 0x0FFF;
		num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
		len += num_rbs * (sizeof(*data) +
				  sizeof(struct iwl_fw_error_dump_rb) +
				  (PAGE_SIZE << trans_pcie->rx_page_order));
	}

2435
	/* FW monitor */
2436
	if (trans_pcie->fw_mon_page) {
2437
		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
		       trans_pcie->fw_mon_size;
		monitor_len = trans_pcie->fw_mon_size;
	} else if (trans->dbg_dest_tlv) {
		u32 base, end;

		base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
		end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);

		base = iwl_read_prph(trans, base) <<
		       trans->dbg_dest_tlv->base_shift;
		end = iwl_read_prph(trans, end) <<
		      trans->dbg_dest_tlv->end_shift;

		/* Make "end" point to the actual end */
2452 2453
		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
		    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
2454 2455 2456 2457 2458 2459 2460
			end += (1 << trans->dbg_dest_tlv->end_shift);
		monitor_len = end - base;
		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
		       monitor_len;
	} else {
		monitor_len = 0;
	}
2461

2462 2463 2464
	dump_data = vzalloc(len);
	if (!dump_data)
		return NULL;
2465 2466

	len = 0;
2467
	data = (void *)dump_data->data;
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
	txcmd = (void *)data->data;
	spin_lock_bh(&cmdq->lock);
	ptr = cmdq->q.write_ptr;
	for (i = 0; i < cmdq->q.n_window; i++) {
		u8 idx = get_cmd_index(&cmdq->q, ptr);
		u32 caplen, cmdlen;

		cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);

		if (cmdlen) {
			len += sizeof(*txcmd) + caplen;
			txcmd->cmdlen = cpu_to_le32(cmdlen);
			txcmd->caplen = cpu_to_le32(caplen);
			memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
			txcmd = (void *)((u8 *)txcmd->data + caplen);
		}

		ptr = iwl_queue_dec_wrap(ptr);
	}
	spin_unlock_bh(&cmdq->lock);

	data->len = cpu_to_le32(len);
2492
	len += sizeof(*data);
2493 2494 2495
	data = iwl_fw_error_next_data(data);

	len += iwl_trans_pcie_dump_prph(trans, &data);
2496
	len += iwl_trans_pcie_dump_csr(trans, &data);
2497
	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
2498 2499
	if (dump_rbs)
		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
2500

2501
	/* data is already pointing to the next section */
2502 2503 2504
	if ((trans_pcie->fw_mon_page &&
	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
	    trans->dbg_dest_tlv) {
2505
		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
		u32 base, write_ptr, wrap_cnt;

		/* If there was a dest TLV - use the values from there */
		if (trans->dbg_dest_tlv) {
			write_ptr =
				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
		} else {
			base = MON_BUFF_BASE_ADDR;
			write_ptr = MON_BUFF_WRPTR;
			wrap_cnt = MON_BUFF_CYCLE_CNT;
		}
2519 2520 2521 2522

		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
		fw_mon_data = (void *)data->data;
		fw_mon_data->fw_mon_wr_ptr =
2523
			cpu_to_le32(iwl_read_prph(trans, write_ptr));
2524
		fw_mon_data->fw_mon_cycle_cnt =
2525
			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
2526
		fw_mon_data->fw_mon_base_ptr =
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
			cpu_to_le32(iwl_read_prph(trans, base));

		len += sizeof(*data) + sizeof(*fw_mon_data);
		if (trans_pcie->fw_mon_page) {
			/*
			 * The firmware is now asserted, it won't write anything
			 * to the buffer. CPU can take ownership to fetch the
			 * data. The buffer will be handed back to the device
			 * before the firmware will be restarted.
			 */
			dma_sync_single_for_cpu(trans->dev,
						trans_pcie->fw_mon_phys,
						trans_pcie->fw_mon_size,
						DMA_FROM_DEVICE);
			memcpy(fw_mon_data->data,
			       page_address(trans_pcie->fw_mon_page),
			       trans_pcie->fw_mon_size);

2545 2546
			monitor_len = trans_pcie->fw_mon_size;
		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
2547 2548 2549 2550 2551 2552 2553 2554
			/*
			 * Update pointers to reflect actual values after
			 * shifting
			 */
			base = iwl_read_prph(trans, base) <<
			       trans->dbg_dest_tlv->base_shift;
			iwl_trans_read_mem(trans, base, fw_mon_data->data,
					   monitor_len / sizeof(u32));
2555 2556 2557 2558 2559 2560 2561 2562
		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
			monitor_len =
				iwl_trans_pci_dump_marbh_monitor(trans,
								 fw_mon_data,
								 monitor_len);
		} else {
			/* Didn't match anything - output no monitor data */
			monitor_len = 0;
2563
		}
2564 2565 2566

		len += monitor_len;
		data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
2567 2568
	}

2569 2570 2571
	dump_data->len = len;

	return dump_data;
2572
}
2573

2574
static const struct iwl_trans_ops trans_ops_pcie = {
2575
	.start_hw = iwl_trans_pcie_start_hw,
2576
	.op_mode_leave = iwl_trans_pcie_op_mode_leave,
2577
	.fw_alive = iwl_trans_pcie_fw_alive,
2578
	.start_fw = iwl_trans_pcie_start_fw,
2579
	.stop_device = iwl_trans_pcie_stop_device,
2580

2581 2582
	.d3_suspend = iwl_trans_pcie_d3_suspend,
	.d3_resume = iwl_trans_pcie_d3_resume,
2583

2584
	.send_cmd = iwl_trans_pcie_send_hcmd,
2585

2586
	.tx = iwl_trans_pcie_tx,
2587
	.reclaim = iwl_trans_pcie_reclaim,
2588

2589
	.txq_disable = iwl_trans_pcie_txq_disable,
2590
	.txq_enable = iwl_trans_pcie_txq_enable,
2591

2592
	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
2593

2594
	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2595
	.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2596

2597 2598 2599
	.write8 = iwl_trans_pcie_write8,
	.write32 = iwl_trans_pcie_write32,
	.read32 = iwl_trans_pcie_read32,
2600 2601
	.read_prph = iwl_trans_pcie_read_prph,
	.write_prph = iwl_trans_pcie_write_prph,
2602 2603
	.read_mem = iwl_trans_pcie_read_mem,
	.write_mem = iwl_trans_pcie_write_mem,
2604
	.configure = iwl_trans_pcie_configure,
D
Don Fry 已提交
2605
	.set_pmi = iwl_trans_pcie_set_pmi,
2606
	.grab_nic_access = iwl_trans_pcie_grab_nic_access,
2607 2608
	.release_nic_access = iwl_trans_pcie_release_nic_access,
	.set_bits_mask = iwl_trans_pcie_set_bits_mask,
2609

2610 2611 2612
	.ref = iwl_trans_pcie_ref,
	.unref = iwl_trans_pcie_unref,

2613
	.dump_data = iwl_trans_pcie_dump_data,
2614
};
2615

2616
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2617 2618
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg)
2619 2620 2621 2622 2623 2624
{
	struct iwl_trans_pcie *trans_pcie;
	struct iwl_trans *trans;
	u16 pci_cmd;
	int err;

2625 2626 2627 2628
	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
				&pdev->dev, cfg, &trans_ops_pcie, 0);
	if (!trans)
		return ERR_PTR(-ENOMEM);
2629 2630 2631 2632

	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans_pcie->trans = trans;
J
Johannes Berg 已提交
2633
	spin_lock_init(&trans_pcie->irq_lock);
2634
	spin_lock_init(&trans_pcie->reg_lock);
J
Johannes Berg 已提交
2635
	spin_lock_init(&trans_pcie->ref_lock);
2636
	mutex_init(&trans_pcie->mutex);
2637
	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2638

J
Johannes Berg 已提交
2639 2640 2641 2642
	err = pci_enable_device(pdev);
	if (err)
		goto out_no_pci;

2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
	if (!cfg->base_params->pcie_l1_allowed) {
		/*
		 * W/A - seems to solve weird behavior. We need to remove this
		 * if we don't want to stay in L1 all the time. This wastes a
		 * lot of power.
		 */
		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
				       PCIE_LINK_STATE_L1 |
				       PCIE_LINK_STATE_CLKPM);
	}
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!err)
			err = pci_set_consistent_dma_mask(pdev,
2663
							  DMA_BIT_MASK(32));
2664 2665
		/* both attempts failed: */
		if (err) {
2666
			dev_err(&pdev->dev, "No suitable DMA available\n");
2667 2668 2669 2670 2671 2672
			goto out_pci_disable_device;
		}
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
2673
		dev_err(&pdev->dev, "pci_request_regions failed\n");
2674 2675 2676
		goto out_pci_disable_device;
	}

2677
	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2678
	if (!trans_pcie->hw_base) {
2679
		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2680 2681 2682 2683 2684 2685 2686 2687
		err = -ENODEV;
		goto out_pci_release_regions;
	}

	/* We disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state */
	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);

2688 2689 2690 2691
	trans->dev = &pdev->dev;
	trans_pcie->pci_dev = pdev;
	iwl_disable_interrupts(trans);

2692
	err = pci_enable_msi(pdev);
2693
	if (err) {
2694
		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
2695 2696 2697 2698 2699 2700 2701
		/* enable rfkill interrupt: hw bug w/a */
		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
		}
	}
2702

2703
	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2704 2705 2706 2707 2708 2709
	/*
	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
	 * changed, and now the revision step also includes bit 0-1 (no more
	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
	 * in the old format.
	 */
2710 2711 2712 2713
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
		unsigned long flags;
		int ret;

2714
		trans->hw_rev = (trans->hw_rev & 0xfff0) |
2715
				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2716

2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
		/*
		 * in-order to recognize C step driver should read chip version
		 * id located at the AUX bus MISC address space.
		 */
		iwl_set_bit(trans, CSR_GP_CNTRL,
			    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
		udelay(2);

		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
				   25000);
		if (ret < 0) {
			IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
			goto out_pci_disable_msi;
		}

		if (iwl_trans_grab_nic_access(trans, false, &flags)) {
			u32 hw_step;

			hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
			hw_step |= ENABLE_WFPM;
			__iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
			hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
			hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
			if (hw_step == 0x3)
				trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
						(SILICON_C_STEP << 2);
			iwl_trans_release_nic_access(trans, &flags);
		}
	}

E
Emmanuel Grumbach 已提交
2749
	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2750 2751
	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2752

2753
	/* Initialize the wait queue for commands */
2754
	init_waitqueue_head(&trans_pcie->wait_command_queue);
2755

J
Johannes Berg 已提交
2756
	if (iwl_pcie_alloc_ict(trans))
2757
		goto out_pci_disable_msi;
J
Johannes Berg 已提交
2758

2759
	err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2760 2761 2762
				   iwl_pcie_irq_handler,
				   IRQF_SHARED, DRV_NAME, trans);
	if (err) {
J
Johannes Berg 已提交
2763 2764 2765 2766
		IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
		goto out_free_ict;
	}

2767
	trans_pcie->inta_mask = CSR_INI_SET_MASK;
2768
	trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
2769

2770 2771
	return trans;

J
Johannes Berg 已提交
2772 2773
out_free_ict:
	iwl_pcie_free_ict(trans);
2774 2775
out_pci_disable_msi:
	pci_disable_msi(pdev);
2776 2777 2778 2779 2780
out_pci_release_regions:
	pci_release_regions(pdev);
out_pci_disable_device:
	pci_disable_device(pdev);
out_no_pci:
2781
	iwl_trans_free(trans);
2782
	return ERR_PTR(err);
2783
}