trans.c 86.3 KB
Newer Older
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8 9
 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 Intel Deutschland GmbH
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
27
 * in the file called COPYING.
28 29
 *
 * Contact Information:
30
 *  Intel Linux Wireless <linuxwifi@intel.com>
31 32 33 34
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
35 36
 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37
 * Copyright(c) 2016 Intel Deutschland GmbH
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
67 68
#include <linux/pci.h>
#include <linux/pci-aspm.h>
69
#include <linux/interrupt.h>
70
#include <linux/debugfs.h>
71
#include <linux/sched.h>
72 73
#include <linux/bitops.h>
#include <linux/gfp.h>
74
#include <linux/vmalloc.h>
75
#include <linux/pm_runtime.h>
76

77
#include "iwl-drv.h"
78
#include "iwl-trans.h"
79 80
#include "iwl-csr.h"
#include "iwl-prph.h"
81
#include "iwl-scd.h"
82
#include "iwl-agn-hw.h"
83
#include "iwl-fw-error-dump.h"
84
#include "internal.h"
85
#include "iwl-fh.h"
86

87 88 89 90
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START	0x40000
#define IWL_FW_MEM_EXTENDED_END		0x57FFF

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (!trans_pcie->fw_mon_page)
		return;

	dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
		       trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
	__free_pages(trans_pcie->fw_mon_page,
		     get_order(trans_pcie->fw_mon_size));
	trans_pcie->fw_mon_page = NULL;
	trans_pcie->fw_mon_phys = 0;
	trans_pcie->fw_mon_size = 0;
}

107
static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
108 109
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
110
	struct page *page = NULL;
111
	dma_addr_t phys;
112
	u32 size = 0;
113 114
	u8 power;

115 116 117 118 119 120 121 122 123 124 125 126
	if (!max_power) {
		/* default max_power is maximum */
		max_power = 26;
	} else {
		max_power += 11;
	}

	if (WARN(max_power > 26,
		 "External buffer size for monitor is too big %d, check the FW TLV\n",
		 max_power))
		return;

127 128 129 130 131 132 133 134
	if (trans_pcie->fw_mon_page) {
		dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
					   trans_pcie->fw_mon_size,
					   DMA_FROM_DEVICE);
		return;
	}

	phys = 0;
135
	for (power = max_power; power >= 11; power--) {
136 137 138 139 140 141 142 143 144 145 146 147 148
		int order;

		size = BIT(power);
		order = get_order(size);
		page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
				   order);
		if (!page)
			continue;

		phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
				    DMA_FROM_DEVICE);
		if (dma_mapping_error(trans->dev, phys)) {
			__free_pages(page, order);
149
			page = NULL;
150 151 152 153 154 155 156 157
			continue;
		}
		IWL_INFO(trans,
			 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
			 size, order);
		break;
	}

158
	if (WARN_ON_ONCE(!page))
159 160
		return;

161 162 163 164 165 166
	if (power != max_power)
		IWL_ERR(trans,
			"Sorry - debug buffer is only %luK while you requested %luK\n",
			(unsigned long)BIT(power - 10),
			(unsigned long)BIT(max_power - 10));

167 168 169 170 171
	trans_pcie->fw_mon_page = page;
	trans_pcie->fw_mon_phys = phys;
	trans_pcie->fw_mon_size = size;
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
{
	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
		    ((reg & 0x0000ffff) | (2 << 28)));
	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
}

static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
{
	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
		    ((reg & 0x0000ffff) | (3 << 28)));
}

186
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
187
{
188
	if (trans->cfg->apmg_not_supported)
189 190
		return;

191 192 193 194 195 196 197 198
	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
				       ~APMG_PS_CTRL_MSK_PWR_SRC);
	else
		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
				       ~APMG_PS_CTRL_MSK_PWR_SRC);
199 200
}

E
Emmanuel Grumbach 已提交
201 202 203
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT	0x041

204
static void iwl_pcie_apm_config(struct iwl_trans *trans)
E
Emmanuel Grumbach 已提交
205
{
206
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
207
	u16 lctl;
E
Emmanuel Grumbach 已提交
208
	u16 cap;
E
Emmanuel Grumbach 已提交
209 210 211 212 213 214 215 216 217

	/*
	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
	 * If so (likely), disable L0S, so device moves directly L0->L1;
	 *    costs negligible amount of power savings.
	 * If not (unlikely), enable L0S, so there is at least some
	 *    power savings, even without L1.
	 */
218
	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
E
Emmanuel Grumbach 已提交
219
	if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
E
Emmanuel Grumbach 已提交
220
		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
E
Emmanuel Grumbach 已提交
221
	else
E
Emmanuel Grumbach 已提交
222
		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
223
	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
E
Emmanuel Grumbach 已提交
224 225 226 227 228 229

	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
	dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
		 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
		 trans->ltr_enabled ? "En" : "Dis");
E
Emmanuel Grumbach 已提交
230 231
}

232 233
/*
 * Start up NIC's basic functionality after it has been reset
234
 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
235 236
 * NOTE:  This does not load uCode nor start the embedded processor
 */
237
static int iwl_pcie_apm_init(struct iwl_trans *trans)
238 239 240 241 242 243 244 245 246 247
{
	int ret = 0;
	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");

	/*
	 * Use "set_bit" below rather than "write", to preserve any hardware
	 * bits already set by default after reset.
	 */

	/* Disable L0S exit timer (platform NMI Work/Around) */
248 249 250
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
251 252 253 254 255 256

	/*
	 * Disable L0s without affecting L1;
	 *  don't wait for ICH L0s (ICH bug W/A)
	 */
	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
257
		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
258 259 260 261 262 263 264 265 266

	/* Set FH wait threshold to maximum (HW error during stress W/A) */
	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);

	/*
	 * Enable HAP INTA (interrupt from management bus) to
	 * wake device's PCI Express link L1a -> L0s
	 */
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
267
		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
268

269
	iwl_pcie_apm_config(trans);
270 271

	/* Configure analog phase-lock-loop before activating to D0A */
272 273
	if (trans->cfg->base_params->pll_cfg)
		iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
274 275 276 277 278 279 280 281 282 283 284 285 286

	/*
	 * Set "initialization complete" bit to move adapter from
	 * D0U* --> D0A* (powered-up active) state.
	 */
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/*
	 * Wait for clock stabilization; once stabilized, access to
	 * device-internal resources is supported, e.g. iwl_write_prph()
	 * and accesses to uCode SRAM.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
287 288
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
289 290 291 292 293
	if (ret < 0) {
		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
		goto out;
	}

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
	if (trans->cfg->host_interrupt_operation_mode) {
		/*
		 * This is a bit of an abuse - This is needed for 7260 / 3160
		 * only check host_interrupt_operation_mode even if this is
		 * not related to host_interrupt_operation_mode.
		 *
		 * Enable the oscillator to count wake up time for L1 exit. This
		 * consumes slightly more power (100uA) - but allows to be sure
		 * that we wake up from L1 on time.
		 *
		 * This looks weird: read twice the same register, discard the
		 * value, set a bit, and yet again, read that same register
		 * just to discard the value. But that's the way the hardware
		 * seems to like it.
		 */
		iwl_read_prph(trans, OSC_CLK);
		iwl_read_prph(trans, OSC_CLK);
		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
		iwl_read_prph(trans, OSC_CLK);
		iwl_read_prph(trans, OSC_CLK);
	}

316 317 318
	/*
	 * Enable DMA clock and wait for it to stabilize.
	 *
319 320 321
	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
	 * bits do not disable clocks.  This preserves any hardware
	 * bits already set by default in "CLK_CTRL_REG" after reset.
322
	 */
323
	if (!trans->cfg->apmg_not_supported) {
324 325 326 327 328 329 330 331 332 333 334 335
		iwl_write_prph(trans, APMG_CLK_EN_REG,
			       APMG_CLK_VAL_DMA_CLK_RQT);
		udelay(20);

		/* Disable L1-Active */
		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);

		/* Clear the interrupt in APMG if the NIC is in RFKILL */
		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
			       APMG_RTC_INT_STT_RFKILL);
	}
336

337
	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
338 339 340 341 342

out:
	return ret;
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
/*
 * Enable LP XTAL to avoid HW bug where device may consume much power if
 * FW is not loaded after device reset. LP XTAL is disabled by default
 * after device HW reset. Do it only if XTAL is fed by internal source.
 * Configure device's "persistence" mode to avoid resetting XTAL again when
 * SHRD_HW_RST occurs in S3.
 */
static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
{
	int ret;
	u32 apmg_gp1_reg;
	u32 apmg_xtal_cfg_reg;
	u32 dl_cfg_reg;

	/* Force XTAL ON */
	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);

	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
363
	usleep_range(1000, 2000);
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

	/*
	 * Set "initialization complete" bit to move adapter from
	 * D0U* --> D0A* (powered-up active) state.
	 */
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/*
	 * Wait for clock stabilization; once stabilized, access to
	 * device-internal resources is possible.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   25000);
	if (WARN_ON(ret < 0)) {
		IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
		/* Release XTAL ON request */
		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
		return;
	}

	/*
	 * Clear "disable persistence" to avoid LP XTAL resetting when
	 * SHRD_HW_RST is applied in S3.
	 */
	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);

	/*
	 * Force APMG XTAL to be active to prevent its disabling by HW
	 * caused by APMG idle state.
	 */
	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
						    SHR_APMG_XTAL_CFG_REG);
	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
				 apmg_xtal_cfg_reg |
				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);

	/*
	 * Reset entire device again - do controller reset (results in
	 * SHRD_HW_RST). Turn MAC off before proceeding.
	 */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
409
	usleep_range(1000, 2000);
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450

	/* Enable LP XTAL by indirect access through CSR */
	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
				 SHR_APMG_GP1_WF_XTAL_LP_EN |
				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);

	/* Clear delay line clock power up */
	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);

	/*
	 * Enable persistence mode to avoid LP XTAL resetting when
	 * SHRD_HW_RST is applied in S3.
	 */
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);

	/*
	 * Clear "initialization complete" bit to move adapter from
	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
	 */
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/* Activates XTAL resources monitor */
	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
				 CSR_MONITOR_XTAL_RESOURCES);

	/* Release XTAL ON request */
	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
	udelay(10);

	/* Release APMG XTAL */
	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
				 apmg_xtal_cfg_reg &
				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
}

451
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
452 453 454 455 456 457 458
{
	int ret = 0;

	/* stop device's busmaster DMA activity */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);

	ret = iwl_poll_bit(trans, CSR_RESET,
459 460
			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
461
	if (ret < 0)
462 463 464 465 466 467 468
		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");

	IWL_DEBUG_INFO(trans, "stop master\n");

	return ret;
}

469
static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
470 471 472
{
	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");

473 474 475 476 477 478 479 480
	if (op_mode_leave) {
		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
			iwl_pcie_apm_init(trans);

		/* inform ME that we are leaving */
		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
					  APMG_PCIDEV_STT_VAL_WAKE_ME);
481 482 483
		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
484 485 486
			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
				    CSR_HW_IF_CONFIG_REG_PREPARE |
				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
487 488 489 490
			mdelay(1);
			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
		}
491 492 493
		mdelay(5);
	}

494
	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
495 496

	/* Stop device's DMA activity */
497
	iwl_pcie_apm_stop_master(trans);
498

499 500 501 502 503
	if (trans->cfg->lp_xtal_workaround) {
		iwl_pcie_apm_lp_xtal_enable(trans);
		return;
	}

504 505
	/* Reset the entire device */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
506
	usleep_range(1000, 2000);
507 508 509 510 511 512 513 514 515

	/*
	 * Clear "initialization complete" bit to move adapter from
	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
	 */
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}

516
static int iwl_pcie_nic_init(struct iwl_trans *trans)
517
{
J
Johannes Berg 已提交
518
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
519 520

	/* nic_init */
521
	spin_lock(&trans_pcie->irq_lock);
522
	iwl_pcie_apm_init(trans);
523

524
	spin_unlock(&trans_pcie->irq_lock);
525

526
	iwl_pcie_set_pwr(trans, false);
527

J
Johannes Berg 已提交
528
	iwl_op_mode_nic_config(trans->op_mode);
529 530

	/* Allocate the RX queue, or reset if it is already allocated */
531
	iwl_pcie_rx_init(trans);
532 533

	/* Allocate or reset and init all Tx and Command queues */
534
	if (iwl_pcie_tx_init(trans))
535 536
		return -ENOMEM;

537
	if (trans->cfg->base_params->shadow_reg_enable) {
538
		/* enable shadow regs in HW */
539
		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
540
		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
541 542 543 544 545 546 547 548
	}

	return 0;
}

#define HW_READY_TIMEOUT (50)

/* Note: returns poll_bit return value, which is >= 0 if success */
549
static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
550 551 552
{
	int ret;

553
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
554
		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
555 556

	/* See if we got it */
557
	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
558 559 560
			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
			   HW_READY_TIMEOUT);
561

562 563 564
	if (ret >= 0)
		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);

565
	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
566 567 568 569
	return ret;
}

/* Note: returns standard 0/-ERROR code */
570
static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
571 572
{
	int ret;
573
	int t = 0;
574
	int iter;
575

576
	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
577

578
	ret = iwl_pcie_set_hw_ready(trans);
579
	/* If the card is ready, exit 0 */
580 581 582
	if (ret >= 0)
		return 0;

583 584
	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
585
	usleep_range(1000, 2000);
586

587 588 589 590 591 592 593
	for (iter = 0; iter < 10; iter++) {
		/* If HW is not ready, prepare the conditions to check again */
		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
			    CSR_HW_IF_CONFIG_REG_PREPARE);

		do {
			ret = iwl_pcie_set_hw_ready(trans);
594 595
			if (ret >= 0)
				return 0;
596

597 598 599 600 601
			usleep_range(200, 1000);
			t += 200;
		} while (t < 150000);
		msleep(25);
	}
602

603
	IWL_ERR(trans, "Couldn't prepare the card\n");
604 605 606 607

	return ret;
}

608 609 610
/*
 * ucode
 */
611 612 613
static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
					    u32 dst_addr, dma_addr_t phy_addr,
					    u32 byte_cnt)
614
{
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);

	iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
		    dst_addr);

	iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
		    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);

	iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
		    (iwl_get_dma_hi_addr(phy_addr)
			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);

	iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
		    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);

	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
		    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
}

static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
					     u32 dst_addr, dma_addr_t phy_addr,
					     u32 byte_cnt)
{
	/* Stop DMA channel */
	iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);

	/* Configure SRAM address */
	iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
		    dst_addr);

	/* Configure DRAM address - 64 bit */
	iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
652

653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
	/* Configure byte count to transfer */
	iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);

	/* Enable the DRAM2SRAM to start */
	iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
						   TFH_SRV_DMA_TO_DRIVER |
						   TFH_SRV_DMA_START);
}

static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
					u32 dst_addr, dma_addr_t phy_addr,
					u32 byte_cnt)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;
	int ret;

	trans_pcie->ucode_write_complete = false;

	if (!iwl_trans_grab_nic_access(trans, &flags))
		return -EIO;

	if (trans->cfg->use_tfh)
		iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
						 byte_cnt);
	else
		iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
						byte_cnt);
681
	iwl_trans_release_nic_access(trans, &flags);
682

683 684
	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
				 trans_pcie->ucode_write_complete, 5 * HZ);
685
	if (!ret) {
J
Johannes Berg 已提交
686
		IWL_ERR(trans, "Failed to load firmware chunk!\n");
687 688 689 690 691 692
		return -ETIMEDOUT;
	}

	return 0;
}

693
static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
J
Johannes Berg 已提交
694
			    const struct fw_desc *section)
695
{
J
Johannes Berg 已提交
696 697
	u8 *v_addr;
	dma_addr_t p_addr;
698
	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
699 700
	int ret = 0;

J
Johannes Berg 已提交
701 702 703
	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
		     section_num);

704 705 706 707 708 709 710 711 712 713
	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
				    GFP_KERNEL | __GFP_NOWARN);
	if (!v_addr) {
		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
		chunk_sz = PAGE_SIZE;
		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
					    &p_addr, GFP_KERNEL);
		if (!v_addr)
			return -ENOMEM;
	}
J
Johannes Berg 已提交
714

715
	for (offset = 0; offset < section->len; offset += chunk_sz) {
716 717
		u32 copy_size, dst_addr;
		bool extended_addr = false;
J
Johannes Berg 已提交
718

719
		copy_size = min_t(u32, chunk_sz, section->len - offset);
720 721 722 723 724 725 726 727 728
		dst_addr = section->offset + offset;

		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
			extended_addr = true;

		if (extended_addr)
			iwl_set_bits_prph(trans, LMPM_CHICK,
					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
729

J
Johannes Berg 已提交
730
		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
731 732 733 734 735 736 737
		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
						   copy_size);

		if (extended_addr)
			iwl_clear_bits_prph(trans, LMPM_CHICK,
					    LMPM_CHICK_EXTENDED_ADDR_SPACE);

J
Johannes Berg 已提交
738 739 740 741 742
		if (ret) {
			IWL_ERR(trans,
				"Could not load the [%d] uCode section\n",
				section_num);
			break;
D
David Spinadel 已提交
743
		}
J
Johannes Berg 已提交
744 745
	}

746
	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
J
Johannes Berg 已提交
747 748 749
	return ret;
}

750 751 752 753 754 755 756 757 758
/*
 * Driver Takes the ownership on secure machine before FW load
 * and prevent race with the BT load.
 * W/A for ROM bug. (should be remove in the next Si step)
 */
static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
{
	u32 val, loop = 1000;

759 760 761 762 763
	/*
	 * Check the RSA semaphore is accessible.
	 * If the HW isn't locked and the rsa semaphore isn't accessible,
	 * we are in trouble.
	 */
764 765
	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
	if (val & (BIT(1) | BIT(17))) {
766 767
		IWL_DEBUG_INFO(trans,
			       "can't access the RSA semaphore it is write protected\n");
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
		return 0;
	}

	/* take ownership on the AUX IF */
	iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
	iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);

	do {
		iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
		val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
		if (val == 0x1) {
			iwl_write_prph(trans, RSA_ENABLE, 0);
			return 0;
		}

		udelay(10);
		loop--;
	} while (loop > 0);

	IWL_ERR(trans, "Failed to take ownership on secure machine\n");
	return -EIO;
}

791 792 793 794
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
					   const struct fw_img *image,
					   int cpu,
					   int *first_ucode_section)
795 796
{
	int shift_param;
797 798
	int i, ret = 0, sec_num = 0x1;
	u32 val, last_read_idx = 0;
799 800 801

	if (cpu == 1) {
		shift_param = 0;
802
		*first_ucode_section = 0;
803 804
	} else {
		shift_param = 16;
805
		(*first_ucode_section)++;
806 807
	}

808
	for (i = *first_ucode_section; i < image->num_sec; i++) {
809 810
		last_read_idx = i;

811 812 813 814 815 816
		/*
		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
		 * CPU1 to CPU2.
		 * PAGING_SEPARATOR_SECTION delimiter - separate between
		 * CPU2 non paged to CPU2 paging sec.
		 */
817
		if (!image->sec[i].data ||
818 819
		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
820 821 822
			IWL_DEBUG_FW(trans,
				     "Break since Data not valid or Empty section, sec = %d\n",
				     i);
823
			break;
824 825
		}

826 827 828
		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
		if (ret)
			return ret;
829

830 831 832 833 834 835 836 837 838 839
		/* Notify ucode of loaded section number and status */
		if (trans->cfg->use_tfh) {
			val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
			val = val | (sec_num << shift_param);
			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
		} else {
			val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
			val = val | (sec_num << shift_param);
			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
		}
840
		sec_num = (sec_num << 1) | 0x1;
841 842
	}

843 844
	*first_ucode_section = last_read_idx;

845 846
	iwl_enable_interrupts(trans);

847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
	if (trans->cfg->use_tfh) {
		if (cpu == 1)
			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
				       0xFFFF);
		else
			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
				       0xFFFFFFFF);
	} else {
		if (cpu == 1)
			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
					   0xFFFF);
		else
			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
					   0xFFFFFFFF);
	}
862

863 864
	return 0;
}
865

866 867
static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
				      const struct fw_img *image,
868 869
				      int cpu,
				      int *first_ucode_section)
870 871
{
	int i, ret = 0;
872
	u32 last_read_idx = 0;
873

874
	if (cpu == 1)
875
		*first_ucode_section = 0;
876
	else
877
		(*first_ucode_section)++;
878

879
	for (i = *first_ucode_section; i < image->num_sec; i++) {
880 881
		last_read_idx = i;

882 883 884 885 886 887
		/*
		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
		 * CPU1 to CPU2.
		 * PAGING_SEPARATOR_SECTION delimiter - separate between
		 * CPU2 non paged to CPU2 paging sec.
		 */
888
		if (!image->sec[i].data ||
889 890
		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
891 892 893
			IWL_DEBUG_FW(trans,
				     "Break since Data not valid or Empty section, sec = %d\n",
				     i);
894
			break;
895 896
		}

897 898 899
		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
		if (ret)
			return ret;
900 901
	}

902 903
	*first_ucode_section = last_read_idx;

904 905 906
	return 0;
}

907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
static void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
	int i;

	if (dest->version)
		IWL_ERR(trans,
			"DBG DEST version is %d - expect issues\n",
			dest->version);

	IWL_INFO(trans, "Applying debug destination %s\n",
		 get_fw_dbg_mode_string(dest->monitor_mode));

	if (dest->monitor_mode == EXTERNAL_MODE)
922
		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	else
		IWL_WARN(trans, "PCI should have external buffer debug\n");

	for (i = 0; i < trans->dbg_dest_reg_num; i++) {
		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
		u32 val = le32_to_cpu(dest->reg_ops[i].val);

		switch (dest->reg_ops[i].op) {
		case CSR_ASSIGN:
			iwl_write32(trans, addr, val);
			break;
		case CSR_SETBIT:
			iwl_set_bit(trans, addr, BIT(val));
			break;
		case CSR_CLEARBIT:
			iwl_clear_bit(trans, addr, BIT(val));
			break;
		case PRPH_ASSIGN:
			iwl_write_prph(trans, addr, val);
			break;
		case PRPH_SETBIT:
			iwl_set_bits_prph(trans, addr, BIT(val));
			break;
		case PRPH_CLEARBIT:
			iwl_clear_bits_prph(trans, addr, BIT(val));
			break;
949 950 951 952 953 954 955 956
		case PRPH_BLOCKBIT:
			if (iwl_read_prph(trans, addr) & BIT(val)) {
				IWL_ERR(trans,
					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
					val, addr);
				goto monitor;
			}
			break;
957 958 959 960 961 962 963
		default:
			IWL_ERR(trans, "FW debug - unknown OP %d\n",
				dest->reg_ops[i].op);
			break;
		}
	}

964
monitor:
965 966 967
	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
			       trans_pcie->fw_mon_phys >> dest->base_shift);
968 969 970 971 972 973 974 975 976 977
		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
				       (trans_pcie->fw_mon_phys +
					trans_pcie->fw_mon_size - 256) >>
						dest->end_shift);
		else
			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
				       (trans_pcie->fw_mon_phys +
					trans_pcie->fw_mon_size) >>
						dest->end_shift);
978 979 980
	}
}

981
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
982
				const struct fw_img *image)
983
{
984
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
985
	int ret = 0;
986
	int first_ucode_section;
987

988
	IWL_DEBUG_FW(trans, "working with %s CPU\n",
989 990
		     image->is_dual_cpus ? "Dual" : "Single");

991 992 993 994
	/* load to FW the binary non secured sections of CPU1 */
	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
	if (ret)
		return ret;
995 996

	if (image->is_dual_cpus) {
997 998 999 1000
		/* set CPU2 header address */
		iwl_write_prph(trans,
			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
1001

1002
		/* load to FW the binary sections of CPU2 */
1003 1004
		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
						 &first_ucode_section);
1005 1006
		if (ret)
			return ret;
1007
	}
1008

1009 1010 1011
	/* supported for 7000 only for the moment */
	if (iwlwifi_mod_params.fw_monitor &&
	    trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1012
		iwl_pcie_alloc_fw_monitor(trans, 0);
1013 1014 1015 1016 1017 1018 1019 1020

		if (trans_pcie->fw_mon_size) {
			iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
				       trans_pcie->fw_mon_phys >> 4);
			iwl_write_prph(trans, MON_BUFF_END_ADDR,
				       (trans_pcie->fw_mon_phys +
					trans_pcie->fw_mon_size) >> 4);
		}
1021 1022
	} else if (trans->dbg_dest_tlv) {
		iwl_pcie_apply_destination(trans);
1023 1024
	}

1025 1026
	iwl_enable_interrupts(trans);

1027
	/* release CPU reset */
1028
	iwl_write32(trans, CSR_RESET, 0);
1029

1030 1031
	return 0;
}
1032

1033 1034
static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
					  const struct fw_img *image)
1035 1036 1037 1038 1039 1040 1041
{
	int ret = 0;
	int first_ucode_section;

	IWL_DEBUG_FW(trans, "working with %s CPU\n",
		     image->is_dual_cpus ? "Dual" : "Single");

1042 1043 1044
	if (trans->dbg_dest_tlv)
		iwl_pcie_apply_destination(trans);

1045 1046 1047 1048 1049
	/* TODO: remove in the next Si step */
	ret = iwl_pcie_rsa_race_bug_wa(trans);
	if (ret)
		return ret;

1050 1051 1052 1053 1054
	/* configure the ucode to be ready to get the secured image */
	/* release CPU reset */
	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);

	/* load to FW the binary Secured sections of CPU1 */
1055 1056
	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
					      &first_ucode_section);
1057 1058 1059 1060
	if (ret)
		return ret;

	/* load to FW the binary sections of CPU2 */
1061 1062
	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
					       &first_ucode_section);
1063 1064
}

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
{
	bool hw_rfkill = iwl_is_rfkill_set(trans);

	if (hw_rfkill)
		set_bit(STATUS_RFKILL, &trans->status);
	else
		clear_bit(STATUS_RFKILL, &trans->status);

	iwl_trans_pcie_rf_kill(trans, hw_rfkill);

	return hw_rfkill;
}

1079
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1080
{
1081
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1082 1083
	bool hw_rfkill, was_hw_rfkill;

1084 1085 1086 1087 1088 1089 1090
	lockdep_assert_held(&trans_pcie->mutex);

	if (trans_pcie->is_down)
		return;

	trans_pcie->is_down = true;

1091
	was_hw_rfkill = iwl_is_rfkill_set(trans);
1092

1093
	/* tell the device to stop sending interrupts */
1094 1095
	iwl_disable_interrupts(trans);

1096
	/* device going down, Stop using ICT table */
1097
	iwl_pcie_disable_ict(trans);
1098 1099 1100 1101 1102 1103 1104 1105

	/*
	 * If a HW restart happens during firmware loading,
	 * then the firmware loading might call this function
	 * and later it might be called again due to the
	 * restart. So don't process again if the device is
	 * already dead.
	 */
1106
	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1107 1108
		IWL_DEBUG_INFO(trans,
			       "DEVICE_ENABLED bit was set and is now cleared\n");
1109
		iwl_pcie_tx_stop(trans);
1110
		iwl_pcie_rx_stop(trans);
1111

1112
		/* Power-down device's busmaster DMA clocks */
1113
		if (!trans->cfg->apmg_not_supported) {
1114 1115 1116 1117
			iwl_write_prph(trans, APMG_CLK_DIS_REG,
				       APMG_CLK_VAL_DMA_CLK_RQT);
			udelay(5);
		}
1118 1119 1120
	}

	/* Make sure (redundant) we've released our request to stay awake */
1121
	iwl_clear_bit(trans, CSR_GP_CNTRL,
1122
		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1123 1124

	/* Stop the device, and put it in low power state */
1125
	iwl_pcie_apm_stop(trans, false);
1126

1127 1128
	/* stop and reset the on-board processor */
	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1129
	usleep_range(1000, 2000);
1130 1131 1132 1133 1134 1135 1136

	/*
	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
	 * This is a bug in certain verions of the hardware.
	 * Certain devices also keep sending HW RF kill interrupt all
	 * the time, unless the interrupt is ACKed even if the interrupt
	 * should be masked. Re-ACK all the interrupts here.
1137 1138 1139
	 */
	iwl_disable_interrupts(trans);

D
Don Fry 已提交
1140
	/* clear all status bits */
1141 1142 1143 1144
	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	clear_bit(STATUS_INT_ENABLED, &trans->status);
	clear_bit(STATUS_TPOWER_PMI, &trans->status);
	clear_bit(STATUS_RFKILL, &trans->status);
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156

	/*
	 * Even if we stop the HW, we still want the RF kill
	 * interrupt
	 */
	iwl_enable_rfkill_int(trans);

	/*
	 * Check again since the RF kill state may have changed while
	 * all the interrupts were disabled, in this case we couldn't
	 * receive the RF kill interrupt and update the state in the
	 * op_mode.
1157 1158 1159 1160 1161 1162
	 * Don't call the op_mode if the rkfill state hasn't changed.
	 * This allows the op_mode to call stop_device from the rfkill
	 * notification without endless recursion. Under very rare
	 * circumstances, we might have a small recursion if the rfkill
	 * state changed exactly now while we were called from stop_device.
	 * This is very unlikely but can happen and is supported.
1163 1164 1165
	 */
	hw_rfkill = iwl_is_rfkill_set(trans);
	if (hw_rfkill)
1166
		set_bit(STATUS_RFKILL, &trans->status);
1167
	else
1168
		clear_bit(STATUS_RFKILL, &trans->status);
1169
	if (hw_rfkill != was_hw_rfkill)
1170
		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1171

1172
	/* re-take ownership to prevent other users from stealing the device */
1173
	iwl_pcie_prepare_card_hw(trans);
1174 1175
}

1176 1177 1178 1179 1180 1181 1182
static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (trans_pcie->msix_enabled) {
		int i;

1183
		for (i = 0; i < trans_pcie->alloc_vecs; i++)
1184 1185 1186 1187 1188 1189
			synchronize_irq(trans_pcie->msix_entries[i].vector);
	} else {
		synchronize_irq(trans_pcie->pci_dev->irq);
	}
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
				   const struct fw_img *fw, bool run_in_rfkill)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	bool hw_rfkill;
	int ret;

	/* This may fail if AMT took ownership of the device */
	if (iwl_pcie_prepare_card_hw(trans)) {
		IWL_WARN(trans, "Exit HW not ready\n");
		ret = -EIO;
		goto out;
	}

	iwl_enable_rfkill_int(trans);

	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);

	/*
	 * We enabled the RF-Kill interrupt and the handler may very
	 * well be running. Disable the interrupts to make sure no other
	 * interrupt can be fired.
	 */
	iwl_disable_interrupts(trans);

	/* Make sure it finished running */
1216
	iwl_pcie_synchronize_irqs(trans);
1217 1218 1219 1220

	mutex_lock(&trans_pcie->mutex);

	/* If platform's RF_KILL switch is NOT set to KILL */
1221
	hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
1222 1223 1224 1225 1226 1227 1228 1229 1230
	if (hw_rfkill && !run_in_rfkill) {
		ret = -ERFKILL;
		goto out;
	}

	/* Someone called stop_device, don't try to start_fw */
	if (trans_pcie->is_down) {
		IWL_WARN(trans,
			 "Can't start_fw since the HW hasn't been started\n");
1231
		ret = -EIO;
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
		goto out;
	}

	/* make sure rfkill handshake bits are cleared */
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

	/* clear (again), then enable host interrupts */
	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);

	ret = iwl_pcie_nic_init(trans);
	if (ret) {
		IWL_ERR(trans, "Unable to init nic\n");
		goto out;
	}

	/*
	 * Now, we load the firmware and don't want to be interrupted, even
	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
	 * FH_TX interrupt which is needed to load the firmware). If the
	 * RF-Kill switch is toggled, we will find out after having loaded
	 * the firmware and return the proper value to the caller.
	 */
	iwl_enable_fw_load_int(trans);

	/* really make sure rfkill handshake bits are cleared */
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);

	/* Load the given image to the HW */
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
	else
		ret = iwl_pcie_load_given_ucode(trans, fw);

	/* re-check RF-Kill state since we may have missed the interrupt */
1269
	hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
	if (hw_rfkill && !run_in_rfkill)
		ret = -ERFKILL;

out:
	mutex_unlock(&trans_pcie->mutex);
	return ret;
}

static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
	iwl_pcie_reset_ict(trans);
	iwl_pcie_tx_start(trans, scd_addr);
}

1284 1285 1286 1287 1288 1289 1290 1291 1292
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	mutex_lock(&trans_pcie->mutex);
	_iwl_trans_pcie_stop_device(trans, low_power);
	mutex_unlock(&trans_pcie->mutex);
}

1293 1294
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
1295 1296 1297 1298 1299
	struct iwl_trans_pcie __maybe_unused *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->mutex);

1300
	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1301
		_iwl_trans_pcie_stop_device(trans, true);
1302 1303
}

1304 1305
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
				      bool reset)
1306
{
1307
	if (!reset) {
1308 1309 1310 1311 1312
		/* Enable persistence mode to avoid reset */
		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
	}

1313
	iwl_disable_interrupts(trans);
1314 1315 1316 1317 1318 1319 1320 1321

	/*
	 * in testing mode, the host stays awake and the
	 * hardware won't be reset (not even partially)
	 */
	if (test)
		return;

1322 1323
	iwl_pcie_disable_ict(trans);

1324
	iwl_pcie_synchronize_irqs(trans);
1325

1326 1327
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1328 1329 1330
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

1331 1332
	iwl_pcie_enable_rx_wake(trans, false);

1333
	if (reset) {
1334 1335 1336 1337 1338 1339 1340
		/*
		 * reset TX queues -- some of their registers reset during S3
		 * so if we don't reset everything here the D3 image would try
		 * to execute some invalid memory upon resume
		 */
		iwl_trans_pcie_tx_reset(trans);
	}
1341 1342 1343 1344 1345

	iwl_pcie_set_pwr(trans, true);
}

static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1346
				    enum iwl_d3_status *status,
1347
				    bool test,  bool reset)
1348 1349 1350 1351
{
	u32 val;
	int ret;

1352 1353 1354 1355 1356 1357
	if (test) {
		iwl_enable_interrupts(trans);
		*status = IWL_D3_STATUS_ALIVE;
		return 0;
	}

1358 1359
	iwl_pcie_enable_rx_wake(trans, true);

1360 1361 1362 1363 1364 1365
	/*
	 * Also enables interrupts - none will happen as the device doesn't
	 * know we're waking it up, only when the opmode actually tells it
	 * after this call.
	 */
	iwl_pcie_reset_ict(trans);
1366
	iwl_enable_interrupts(trans);
1367 1368 1369 1370

	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

1371 1372 1373
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
		udelay(2);

1374 1375 1376 1377
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   25000);
1378
	if (ret < 0) {
1379 1380 1381 1382
		IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
		return ret;
	}

1383 1384
	iwl_pcie_set_pwr(trans, false);

1385
	if (!reset) {
1386 1387 1388 1389
		iwl_clear_bit(trans, CSR_GP_CNTRL,
			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
	} else {
		iwl_trans_pcie_tx_reset(trans);
1390

1391 1392 1393 1394 1395 1396
		ret = iwl_pcie_rx_init(trans);
		if (ret) {
			IWL_ERR(trans,
				"Failed to resume the device (RX reset)\n");
			return ret;
		}
1397 1398
	}

1399 1400 1401 1402 1403 1404
	val = iwl_read32(trans, CSR_RESET);
	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
		*status = IWL_D3_STATUS_RESET;
	else
		*status = IWL_D3_STATUS_ALIVE;

1405
	return 0;
1406 1407
}

1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
struct iwl_causes_list {
	u32 cause_num;
	u32 mask_reg;
	u8 addr;
};

static struct iwl_causes_list causes_list[] = {
	{MSIX_FH_INT_CAUSES_D2S_CH0_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0},
	{MSIX_FH_INT_CAUSES_D2S_CH1_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0x1},
	{MSIX_FH_INT_CAUSES_S2D,		CSR_MSIX_FH_INT_MASK_AD, 0x3},
	{MSIX_FH_INT_CAUSES_FH_ERR,		CSR_MSIX_FH_INT_MASK_AD, 0x5},
	{MSIX_HW_INT_CAUSES_REG_ALIVE,		CSR_MSIX_HW_INT_MASK_AD, 0x10},
	{MSIX_HW_INT_CAUSES_REG_WAKEUP,		CSR_MSIX_HW_INT_MASK_AD, 0x11},
	{MSIX_HW_INT_CAUSES_REG_CT_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x16},
	{MSIX_HW_INT_CAUSES_REG_RF_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x17},
	{MSIX_HW_INT_CAUSES_REG_PERIODIC,	CSR_MSIX_HW_INT_MASK_AD, 0x18},
	{MSIX_HW_INT_CAUSES_REG_SW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x29},
	{MSIX_HW_INT_CAUSES_REG_SCD,		CSR_MSIX_HW_INT_MASK_AD, 0x2A},
	{MSIX_HW_INT_CAUSES_REG_FH_TX,		CSR_MSIX_HW_INT_MASK_AD, 0x2B},
	{MSIX_HW_INT_CAUSES_REG_HW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x2D},
	{MSIX_HW_INT_CAUSES_REG_HAP,		CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
	int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
	int i;

	/*
	 * Access all non RX causes and map them to the default irq.
	 * In case we are missing at least one interrupt vector,
	 * the first interrupt vector will serve non-RX and FBQ causes.
	 */
	for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
		iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
		iwl_clear_bit(trans, causes_list[i].mask_reg,
			      causes_list[i].cause_num);
	}
}

static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 offset =
		trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
	u32 val, idx;

	/*
	 * The first RX queue - fallback queue, which is designated for
	 * management frame, command responses etc, is always mapped to the
	 * first interrupt vector. The other RX queues are mapped to
	 * the other (N - 2) interrupt vectors.
	 */
	val = BIT(MSIX_FH_INT_CAUSES_Q(0));
	for (idx = 1; idx < trans->num_rx_queues; idx++) {
		iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
			   MSIX_FH_INT_CAUSES_Q(idx - offset));
		val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
	}
	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);

	val = MSIX_FH_INT_CAUSES_Q(0);
	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
		val |= MSIX_NON_AUTO_CLEAR_CAUSE;
	iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);

	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
		iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}

1479 1480 1481 1482
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
	struct iwl_trans *trans = trans_pcie->trans;

1483 1484 1485 1486
	if (!trans_pcie->msix_enabled) {
		if (trans->cfg->mq_rx_supported)
			iwl_write_prph(trans, UREG_CHICK,
				       UREG_CHICK_MSI_ENABLE);
1487
		return;
1488
	}
1489 1490 1491 1492

	iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);

	/*
1493 1494 1495 1496 1497
	 * Each cause from the causes list above and the RX causes is
	 * represented as a byte in the IVAR table. The first nibble
	 * represents the bound interrupt vector of the cause, the second
	 * represents no auto clear for this cause. This will be set if its
	 * interrupt vector is bound to serve other causes.
1498
	 */
1499 1500 1501
	iwl_pcie_map_rx_causes(trans);

	iwl_pcie_map_non_rx_causes(trans);
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514

	trans_pcie->fh_init_mask =
		~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
	trans_pcie->fh_mask = trans_pcie->fh_init_mask;
	trans_pcie->hw_init_mask =
		~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
	trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}

static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
					struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1515
	int max_irqs, num_irqs, i, ret, nr_online_cpus;
1516 1517
	u16 pci_cmd;

1518 1519 1520
	if (!trans->cfg->mq_rx_supported)
		goto enable_msi;

1521 1522
	nr_online_cpus = num_online_cpus();
	max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
1523 1524
	for (i = 0; i < max_irqs; i++)
		trans_pcie->msix_entries[i].entry = i;
1525

1526 1527 1528 1529
	num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
					 MSIX_MIN_INTERRUPT_VECTORS,
					 max_irqs);
	if (num_irqs < 0) {
1530
		IWL_DEBUG_INFO(trans,
1531 1532 1533 1534 1535
			       "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
			       num_irqs);
		goto enable_msi;
	}
	trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1536

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	IWL_DEBUG_INFO(trans,
		       "MSI-X enabled. %d interrupt vectors were allocated\n",
		       num_irqs);

	/*
	 * In case the OS provides fewer interrupts than requested, different
	 * causes will share the same interrupt vector as follows:
	 * One interrupt less: non rx causes shared with FBQ.
	 * Two interrupts less: non rx causes shared with FBQ and RSS.
	 * More than two interrupts: we will use fewer RSS queues.
	 */
1548
	if (num_irqs <= nr_online_cpus) {
1549 1550 1551
		trans_pcie->trans->num_rx_queues = num_irqs + 1;
		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
			IWL_SHARED_IRQ_FIRST_RSS;
1552
	} else if (num_irqs == nr_online_cpus + 1) {
1553 1554 1555 1556
		trans_pcie->trans->num_rx_queues = num_irqs;
		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
	} else {
		trans_pcie->trans->num_rx_queues = num_irqs - 1;
1557 1558
	}

1559 1560 1561 1562 1563 1564 1565 1566
	trans_pcie->alloc_vecs = num_irqs;
	trans_pcie->msix_enabled = true;
	return;

enable_msi:
	ret = pci_enable_msi(pdev);
	if (ret) {
		dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1567 1568 1569 1570 1571 1572 1573 1574 1575
		/* enable rfkill interrupt: hw bug w/a */
		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
		}
	}
}

1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
{
	int iter_rx_q, i, ret, cpu, offset;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
	iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
	offset = 1 + i;
	for (; i < iter_rx_q ; i++) {
		/*
		 * Get the cpu prior to the place to search
		 * (i.e. return will be > i - 1).
		 */
		cpu = cpumask_next(i - offset, cpu_online_mask);
		cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
		ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
					    &trans_pcie->affinity_mask[i]);
		if (ret)
			IWL_ERR(trans_pcie->trans,
				"Failed to set affinity mask for IRQ %d\n",
				i);
	}
}

1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
static const char *queue_name(struct device *dev,
			      struct iwl_trans_pcie *trans_p, int i)
{
	if (trans_p->shared_vec_mask) {
		int vec = trans_p->shared_vec_mask &
			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;

		if (i == 0)
			return DRV_NAME ": shared IRQ";

		return devm_kasprintf(dev, GFP_KERNEL,
				      DRV_NAME ": queue %d", i + vec);
	}
	if (i == 0)
		return DRV_NAME ": default queue";

	if (i == trans_p->alloc_vecs - 1)
		return DRV_NAME ": exception";

	return devm_kasprintf(dev, GFP_KERNEL,
			      DRV_NAME  ": queue %d", i);
}

1623 1624 1625
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
				      struct iwl_trans_pcie *trans_pcie)
{
1626
	int i;
1627

1628
	for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1629
		int ret;
S
Sharon Dvir 已提交
1630
		struct msix_entry *msix_entry;
1631 1632 1633 1634
		const char *qname = queue_name(&pdev->dev, trans_pcie, i);

		if (!qname)
			return -ENOMEM;
S
Sharon Dvir 已提交
1635 1636 1637 1638 1639 1640 1641 1642 1643

		msix_entry = &trans_pcie->msix_entries[i];
		ret = devm_request_threaded_irq(&pdev->dev,
						msix_entry->vector,
						iwl_pcie_msix_isr,
						(i == trans_pcie->def_irq) ?
						iwl_pcie_irq_msix_handler :
						iwl_pcie_irq_rx_msix_handler,
						IRQF_SHARED,
1644
						qname,
S
Sharon Dvir 已提交
1645
						msix_entry);
1646 1647 1648
		if (ret) {
			IWL_ERR(trans_pcie->trans,
				"Error allocating IRQ %d\n", i);
S
Sharon Dvir 已提交
1649

1650 1651 1652
			return ret;
		}
	}
1653
	iwl_pcie_irq_set_affinity(trans_pcie->trans);
1654 1655 1656 1657

	return 0;
}

1658
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1659
{
1660
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
J
Johannes Berg 已提交
1661
	int err;
1662

1663 1664
	lockdep_assert_held(&trans_pcie->mutex);

1665
	err = iwl_pcie_prepare_card_hw(trans);
1666
	if (err) {
1667
		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
J
Johannes Berg 已提交
1668
		return err;
1669
	}
1670

1671
	/* Reset the entire device */
1672
	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1673
	usleep_range(1000, 2000);
1674

1675
	iwl_pcie_apm_init(trans);
1676

1677
	iwl_pcie_init_msix(trans_pcie);
1678 1679 1680
	/* From now on, the op_mode will be kept updated about RF kill state */
	iwl_enable_rfkill_int(trans);

1681 1682 1683
	/* Set is_down to false here so that...*/
	trans_pcie->is_down = false;

1684 1685
	/* ...rfkill can call stop_device and set it false if needed */
	iwl_trans_check_hw_rf_kill(trans);
1686

1687 1688 1689 1690
	/* Make sure we sync here, because we'll need full access later */
	if (low_power)
		pm_runtime_resume(trans->dev);

J
Johannes Berg 已提交
1691
	return 0;
1692 1693
}

1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret;

	mutex_lock(&trans_pcie->mutex);
	ret = _iwl_trans_pcie_start_hw(trans, low_power);
	mutex_unlock(&trans_pcie->mutex);

	return ret;
}

1706
static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1707
{
1708
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1709

1710 1711
	mutex_lock(&trans_pcie->mutex);

1712
	/* disable interrupts - don't enable HW RF kill interrupt */
1713 1714
	iwl_disable_interrupts(trans);

1715
	iwl_pcie_apm_stop(trans, true);
1716

1717
	iwl_disable_interrupts(trans);
1718

E
Emmanuel Grumbach 已提交
1719
	iwl_pcie_disable_ict(trans);
1720

1721
	mutex_unlock(&trans_pcie->mutex);
1722

1723
	iwl_pcie_synchronize_irqs(trans);
1724 1725
}

1726 1727
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
1728
	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1729 1730 1731 1732
}

static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
1733
	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1734 1735 1736 1737
}

static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
1738
	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1739 1740
}

1741 1742
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
A
Amnon Paz 已提交
1743 1744
	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
			       ((reg & 0x000FFFFF) | (3 << 24)));
1745 1746 1747 1748 1749 1750 1751
	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}

static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
				      u32 val)
{
	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
A
Amnon Paz 已提交
1752
			       ((addr & 0x000FFFFF) | (3 << 24)));
1753 1754 1755
	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}

1756
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1757
				     const struct iwl_trans_config *trans_cfg)
1758 1759 1760 1761
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1762
	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1763
	trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1764 1765 1766 1767 1768 1769 1770
	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
		trans_pcie->n_no_reclaim_cmds = 0;
	else
		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
	if (trans_pcie->n_no_reclaim_cmds)
		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1771

1772 1773 1774
	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
	trans_pcie->rx_page_order =
		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1775

1776
	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1777
	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1778
	trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
1779

1780 1781 1782
	trans_pcie->page_offs = trans_cfg->cb_data_offs;
	trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);

1783 1784 1785
	trans->command_groups = trans_cfg->command_groups;
	trans->command_groups_size = trans_cfg->command_groups_size;

1786 1787 1788 1789 1790
	/* Initialize NAPI here - it should be before registering to mac80211
	 * in the opmode but after the HW struct is allocated.
	 * As this function may be called again in some corner cases don't
	 * do anything if NAPI was already initialized.
	 */
1791
	if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1792
		init_dummy_netdev(&trans_pcie->napi_dev);
1793 1794
}

1795
void iwl_trans_pcie_free(struct iwl_trans *trans)
1796
{
1797
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1798
	int i;
1799

1800
	iwl_pcie_synchronize_irqs(trans);
1801

1802
	iwl_pcie_tx_free(trans);
1803
	iwl_pcie_rx_free(trans);
1804

1805
	if (trans_pcie->msix_enabled) {
1806 1807 1808 1809 1810
		for (i = 0; i < trans_pcie->alloc_vecs; i++) {
			irq_set_affinity_hint(
				trans_pcie->msix_entries[i].vector,
				NULL);
		}
1811 1812 1813 1814 1815

		trans_pcie->msix_enabled = false;
	} else {
		iwl_pcie_free_ict(trans);
	}
1816

1817 1818
	iwl_pcie_free_fw_monitor(trans);

1819 1820 1821 1822 1823 1824 1825 1826 1827
	for_each_possible_cpu(i) {
		struct iwl_tso_hdr_page *p =
			per_cpu_ptr(trans_pcie->tso_hdr_page, i);

		if (p->page)
			__free_page(p->page);
	}

	free_percpu(trans_pcie->tso_hdr_page);
1828
	mutex_destroy(&trans_pcie->mutex);
1829
	iwl_trans_free(trans);
1830 1831
}

D
Don Fry 已提交
1832 1833 1834
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
	if (state)
1835
		set_bit(STATUS_TPOWER_PMI, &trans->status);
D
Don Fry 已提交
1836
	else
1837
		clear_bit(STATUS_TPOWER_PMI, &trans->status);
D
Don Fry 已提交
1838 1839
}

1840 1841
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
					   unsigned long *flags)
1842 1843
{
	int ret;
1844 1845 1846
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1847

1848
	if (trans_pcie->cmd_hold_nic_awake)
1849 1850
		goto out;

1851
	/* this bit wakes up the NIC */
1852 1853
	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1854 1855
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
		udelay(2);
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881

	/*
	 * These bits say the device is running, and should keep running for
	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
	 * but they do not indicate that embedded SRAM is restored yet;
	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
	 * to/from host DRAM when sleeping/waking for power-saving.
	 * Each direction takes approximately 1/4 millisecond; with this
	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
	 * series of register accesses are expected (e.g. reading Event Log),
	 * to keep device from sleeping.
	 *
	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
	 * SRAM is okay/restored.  We don't check that here because this call
	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
	 *
	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
	 * and do not save/restore SRAM when power cycling.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
	if (unlikely(ret < 0)) {
		iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1882 1883 1884 1885 1886
		WARN_ONCE(1,
			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
			  iwl_read32(trans, CSR_GP_CNTRL));
		spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
		return false;
1887 1888
	}

1889
out:
1890 1891 1892 1893
	/*
	 * Fool sparse by faking we release the lock - sparse will
	 * track nic_access anyway.
	 */
1894
	__release(&trans_pcie->reg_lock);
1895 1896 1897
	return true;
}

1898 1899
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
					      unsigned long *flags)
1900
{
1901
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1902

1903
	lockdep_assert_held(&trans_pcie->reg_lock);
1904 1905 1906 1907 1908

	/*
	 * Fool sparse by faking we acquiring the lock - sparse will
	 * track nic_access anyway.
	 */
1909
	__acquire(&trans_pcie->reg_lock);
1910

1911
	if (trans_pcie->cmd_hold_nic_awake)
1912 1913
		goto out;

1914 1915
	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1916 1917 1918 1919 1920 1921 1922
	/*
	 * Above we read the CSR_GP_CNTRL register, which will flush
	 * any previous writes, but we need the write that clears the
	 * MAC_ACCESS_REQ bit to be performed before any other writes
	 * scheduled on different CPUs (after we drop reg_lock).
	 */
	mmiowb();
1923
out:
1924
	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1925 1926
}

1927 1928 1929 1930 1931 1932 1933
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
				   void *buf, int dwords)
{
	unsigned long flags;
	int offs, ret = 0;
	u32 *vals = buf;

1934
	if (iwl_trans_grab_nic_access(trans, &flags)) {
1935 1936 1937
		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
		for (offs = 0; offs < dwords; offs++)
			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1938
		iwl_trans_release_nic_access(trans, &flags);
1939 1940 1941 1942 1943 1944 1945
	} else {
		ret = -EBUSY;
	}
	return ret;
}

static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1946
				    const void *buf, int dwords)
1947 1948 1949
{
	unsigned long flags;
	int offs, ret = 0;
1950
	const u32 *vals = buf;
1951

1952
	if (iwl_trans_grab_nic_access(trans, &flags)) {
1953 1954
		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
		for (offs = 0; offs < dwords; offs++)
E
Emmanuel Grumbach 已提交
1955 1956
			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
				    vals ? vals[offs] : 0);
1957
		iwl_trans_release_nic_access(trans, &flags);
1958 1959 1960 1961 1962
	} else {
		ret = -EBUSY;
	}
	return ret;
}
1963

1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
					    unsigned long txqs,
					    bool freeze)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int queue;

	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
		struct iwl_txq *txq = &trans_pcie->txq[queue];
		unsigned long now;

		spin_lock_bh(&txq->lock);

		now = jiffies;

		if (txq->frozen == freeze)
			goto next_queue;

		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
				    freeze ? "Freezing" : "Waking", queue);

		txq->frozen = freeze;

1987
		if (txq->read_ptr == txq->write_ptr)
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
			goto next_queue;

		if (freeze) {
			if (unlikely(time_after(now,
						txq->stuck_timer.expires))) {
				/*
				 * The timer should have fired, maybe it is
				 * spinning right now on the lock.
				 */
				goto next_queue;
			}
			/* remember how long until the timer fires */
			txq->frozen_expiry_remainder =
				txq->stuck_timer.expires - now;
			del_timer(&txq->stuck_timer);
			goto next_queue;
		}

		/*
		 * Wake a non-empty queue -> arm timer with the
		 * remainder before it froze
		 */
		mod_timer(&txq->stuck_timer,
			  now + txq->frozen_expiry_remainder);

next_queue:
		spin_unlock_bh(&txq->lock);
	}
}

2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		struct iwl_txq *txq = &trans_pcie->txq[i];

		if (i == trans_pcie->cmd_queue)
			continue;

		spin_lock_bh(&txq->lock);

		if (!block && !(WARN_ON_ONCE(!txq->block))) {
			txq->block--;
			if (!txq->block) {
				iwl_write32(trans, HBUS_TARG_WRPTR,
2035
					    txq->write_ptr | (i << 8));
2036 2037 2038 2039 2040 2041 2042 2043 2044
			}
		} else if (block) {
			txq->block++;
		}

		spin_unlock_bh(&txq->lock);
	}
}

2045 2046
#define IWL_FLUSH_WAIT_MS	2000

2047 2048 2049 2050 2051 2052 2053 2054
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 scd_sram_addr;
	u8 buf[16];
	int cnt;

	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
2055
		txq->read_ptr, txq->write_ptr);
2056

2057 2058 2059 2060
	if (trans->cfg->use_tfh)
		/* TODO: access new SCD registers and dump them */
		return;

2061
	scd_sram_addr = trans_pcie->scd_base_addr +
2062
			SCD_TX_STTS_QUEUE_OFFSET(txq->id);
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));

	iwl_print_hex_error(trans, buf, sizeof(buf));

	for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
			iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));

	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
		u32 tbl_dw =
			iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
					     SCD_TRANS_TBL_OFFSET_QUEUE(cnt));

		if (cnt & 0x1)
			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
		else
			tbl_dw = tbl_dw & 0x0000FFFF;

		IWL_ERR(trans,
			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
			cnt, active ? "" : "in", fifo, tbl_dw,
			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
				(TFD_QUEUE_SIZE_MAX - 1),
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
	}
}

2093
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
2094
{
2095
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2096
	struct iwl_txq *txq;
2097 2098 2099 2100 2101
	int cnt;
	unsigned long now = jiffies;
	int ret = 0;

	/* waiting for all the tx frames complete might take a while */
2102
	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
2103 2104
		u8 wr_ptr;

W
Wey-Yi Guy 已提交
2105
		if (cnt == trans_pcie->cmd_queue)
2106
			continue;
2107 2108 2109 2110
		if (!test_bit(cnt, trans_pcie->queue_used))
			continue;
		if (!(BIT(cnt) & txq_bm))
			continue;
2111 2112

		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
2113
		txq = &trans_pcie->txq[cnt];
2114
		wr_ptr = ACCESS_ONCE(txq->write_ptr);
2115

2116
		while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
2117 2118
		       !time_after(jiffies,
				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2119
			u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
2120 2121 2122 2123 2124

			if (WARN_ONCE(wr_ptr != write_ptr,
				      "WR pointer moved while flushing %d -> %d\n",
				      wr_ptr, write_ptr))
				return -ETIMEDOUT;
2125
			usleep_range(1000, 2000);
2126
		}
2127

2128
		if (txq->read_ptr != txq->write_ptr) {
2129 2130
			IWL_ERR(trans,
				"fail to flush all tx fifo queues Q %d\n", cnt);
2131 2132 2133
			ret = -ETIMEDOUT;
			break;
		}
2134
		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
2135
	}
2136

2137 2138
	if (ret)
		iwl_trans_pcie_log_scd_error(trans, txq);
2139

2140 2141 2142
	return ret;
}

2143 2144 2145
static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
					 u32 mask, u32 value)
{
2146
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2147 2148
	unsigned long flags;

2149
	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
2150
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2151
	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
2152 2153
}

2154
static void iwl_trans_pcie_ref(struct iwl_trans *trans)
2155 2156 2157 2158 2159 2160
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (iwlwifi_mod_params.d0i3_disable)
		return;

2161
	pm_runtime_get(&trans_pcie->pci_dev->dev);
2162 2163 2164 2165 2166

#ifdef CONFIG_PM
	IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
		      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
#endif /* CONFIG_PM */
2167 2168
}

2169
static void iwl_trans_pcie_unref(struct iwl_trans *trans)
2170 2171 2172 2173 2174 2175
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (iwlwifi_mod_params.d0i3_disable)
		return;

2176 2177 2178
	pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
	pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);

2179 2180 2181 2182
#ifdef CONFIG_PM
	IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
		      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
#endif /* CONFIG_PM */
2183 2184
}

2185 2186
static const char *get_csr_string(int cmd)
{
J
Johannes Berg 已提交
2187
#define IWL_CMD(x) case x: return #x
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
	switch (cmd) {
	IWL_CMD(CSR_HW_IF_CONFIG_REG);
	IWL_CMD(CSR_INT_COALESCING);
	IWL_CMD(CSR_INT);
	IWL_CMD(CSR_INT_MASK);
	IWL_CMD(CSR_FH_INT_STATUS);
	IWL_CMD(CSR_GPIO_IN);
	IWL_CMD(CSR_RESET);
	IWL_CMD(CSR_GP_CNTRL);
	IWL_CMD(CSR_HW_REV);
	IWL_CMD(CSR_EEPROM_REG);
	IWL_CMD(CSR_EEPROM_GP);
	IWL_CMD(CSR_OTP_GP_REG);
	IWL_CMD(CSR_GIO_REG);
	IWL_CMD(CSR_GP_UCODE_REG);
	IWL_CMD(CSR_GP_DRIVER_REG);
	IWL_CMD(CSR_UCODE_DRV_GP1);
	IWL_CMD(CSR_UCODE_DRV_GP2);
	IWL_CMD(CSR_LED_REG);
	IWL_CMD(CSR_DRAM_INT_TBL_REG);
	IWL_CMD(CSR_GIO_CHICKEN_BITS);
	IWL_CMD(CSR_ANA_PLL_CFG);
	IWL_CMD(CSR_HW_REV_WA_REG);
2211
	IWL_CMD(CSR_MONITOR_STATUS_REG);
2212 2213 2214 2215
	IWL_CMD(CSR_DBG_HPET_MEM_REG);
	default:
		return "UNKNOWN";
	}
J
Johannes Berg 已提交
2216
#undef IWL_CMD
2217 2218
}

2219
void iwl_pcie_dump_csr(struct iwl_trans *trans)
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
{
	int i;
	static const u32 csr_tbl[] = {
		CSR_HW_IF_CONFIG_REG,
		CSR_INT_COALESCING,
		CSR_INT,
		CSR_INT_MASK,
		CSR_FH_INT_STATUS,
		CSR_GPIO_IN,
		CSR_RESET,
		CSR_GP_CNTRL,
		CSR_HW_REV,
		CSR_EEPROM_REG,
		CSR_EEPROM_GP,
		CSR_OTP_GP_REG,
		CSR_GIO_REG,
		CSR_GP_UCODE_REG,
		CSR_GP_DRIVER_REG,
		CSR_UCODE_DRV_GP1,
		CSR_UCODE_DRV_GP2,
		CSR_LED_REG,
		CSR_DRAM_INT_TBL_REG,
		CSR_GIO_CHICKEN_BITS,
		CSR_ANA_PLL_CFG,
2244
		CSR_MONITOR_STATUS_REG,
2245 2246 2247 2248 2249 2250 2251 2252 2253
		CSR_HW_REV_WA_REG,
		CSR_DBG_HPET_MEM_REG
	};
	IWL_ERR(trans, "CSR values:\n");
	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
		"CSR_INT_PERIODIC_REG)\n");
	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
		IWL_ERR(trans, "  %25s: 0X%08x\n",
			get_csr_string(csr_tbl[i]),
2254
			iwl_read32(trans, csr_tbl[i]));
2255 2256 2257
	}
}

2258 2259 2260
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
2261
	if (!debugfs_create_file(#name, mode, parent, trans,		\
2262
				 &iwl_dbgfs_##name##_ops))		\
2263
		goto err;						\
2264 2265 2266 2267 2268 2269
} while (0)

/* file operation */
#define DEBUGFS_READ_FILE_OPS(name)					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.read = iwl_dbgfs_##name##_read,				\
2270
	.open = simple_open,						\
2271 2272 2273
	.llseek = generic_file_llseek,					\
};

2274 2275 2276
#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
static const struct file_operations iwl_dbgfs_##name##_ops = {          \
	.write = iwl_dbgfs_##name##_write,                              \
2277
	.open = simple_open,						\
2278 2279 2280
	.llseek = generic_file_llseek,					\
};

2281 2282 2283 2284
#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.write = iwl_dbgfs_##name##_write,				\
	.read = iwl_dbgfs_##name##_read,				\
2285
	.open = simple_open,						\
2286 2287 2288 2289
	.llseek = generic_file_llseek,					\
};

static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
2290 2291
				       char __user *user_buf,
				       size_t count, loff_t *ppos)
2292
{
2293
	struct iwl_trans *trans = file->private_data;
2294
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2295
	struct iwl_txq *txq;
2296 2297 2298 2299
	char *buf;
	int pos = 0;
	int cnt;
	int ret;
2300 2301
	size_t bufsz;

2302
	bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
2303

J
Johannes Berg 已提交
2304
	if (!trans_pcie->txq)
2305
		return -EAGAIN;
J
Johannes Berg 已提交
2306

2307 2308 2309 2310
	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

2311
	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
2312
		txq = &trans_pcie->txq[cnt];
2313
		pos += scnprintf(buf + pos, bufsz - pos,
2314
				"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
2315
				cnt, txq->read_ptr, txq->write_ptr,
2316
				!!test_bit(cnt, trans_pcie->queue_used),
2317
				 !!test_bit(cnt, trans_pcie->queue_stopped),
2318
				 txq->need_update, txq->frozen,
2319
				 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
2320 2321 2322 2323 2324 2325 2326
	}
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2327 2328 2329
				       char __user *user_buf,
				       size_t count, loff_t *ppos)
{
2330
	struct iwl_trans *trans = file->private_data;
2331
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
	char *buf;
	int pos = 0, i, ret;
	size_t bufsz = sizeof(buf);

	bufsz = sizeof(char) * 121 * trans->num_rx_queues;

	if (!trans_pcie->rxq)
		return -EAGAIN;

	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
				 i);
		pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
				 rxq->read);
		pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
				 rxq->write);
		pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
				 rxq->write_actual);
		pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
				 rxq->need_update);
		pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
				 rxq->free_count);
		if (rxq->rb_stts) {
			pos += scnprintf(buf + pos, bufsz - pos,
					 "\tclosed_rb_num: %u\n",
					 le16_to_cpu(rxq->rb_stts->closed_rb_num) &
					 0x0FFF);
		} else {
			pos += scnprintf(buf + pos, bufsz - pos,
					 "\tclosed_rb_num: Not Allocated\n");
2368
		}
2369
	}
2370 2371 2372 2373
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);

	return ret;
2374 2375
}

2376 2377
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
					char __user *user_buf,
2378 2379
					size_t count, loff_t *ppos)
{
2380
	struct iwl_trans *trans = file->private_data;
2381
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2382 2383 2384 2385 2386 2387 2388 2389
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	int pos = 0;
	char *buf;
	int bufsz = 24 * 64; /* 24 items * 64 char per item */
	ssize_t ret;

	buf = kzalloc(bufsz, GFP_KERNEL);
J
Johannes Berg 已提交
2390
	if (!buf)
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
		return -ENOMEM;

	pos += scnprintf(buf + pos, bufsz - pos,
			"Interrupt Statistics Report:\n");

	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
		isr_stats->hw);
	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
		isr_stats->sw);
	if (isr_stats->sw || isr_stats->hw) {
		pos += scnprintf(buf + pos, bufsz - pos,
			"\tLast Restarting Code:  0x%X\n",
			isr_stats->err_code);
	}
#ifdef CONFIG_IWLWIFI_DEBUG
	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
		isr_stats->sch);
	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
		isr_stats->alive);
#endif
	pos += scnprintf(buf + pos, bufsz - pos,
		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);

	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
		isr_stats->ctkill);

	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
		isr_stats->wakeup);

	pos += scnprintf(buf + pos, bufsz - pos,
		"Rx command responses:\t\t %u\n", isr_stats->rx);

	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
		isr_stats->tx);

	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
		isr_stats->unhandled);

	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
					 const char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
2439
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	char buf[8];
	int buf_size;
	u32 reset_flag;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%x", &reset_flag) != 1)
		return -EFAULT;
	if (reset_flag == 0)
		memset(isr_stats, 0, sizeof(*isr_stats));

	return count;
}

2458
static ssize_t iwl_dbgfs_csr_write(struct file *file,
2459 2460
				   const char __user *user_buf,
				   size_t count, loff_t *ppos)
2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473
{
	struct iwl_trans *trans = file->private_data;
	char buf[8];
	int buf_size;
	int csr;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%d", &csr) != 1)
		return -EFAULT;

2474
	iwl_pcie_dump_csr(trans);
2475 2476 2477 2478 2479

	return count;
}

static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2480 2481
				     char __user *user_buf,
				     size_t count, loff_t *ppos)
2482 2483
{
	struct iwl_trans *trans = file->private_data;
2484
	char *buf = NULL;
2485
	ssize_t ret;
2486

2487 2488 2489 2490 2491 2492 2493
	ret = iwl_dump_fh(trans, &buf);
	if (ret < 0)
		return ret;
	if (!buf)
		return -EINVAL;
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
	kfree(buf);
2494 2495 2496
	return ret;
}

2497
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2498
DEBUGFS_READ_FILE_OPS(fh_reg);
2499 2500
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
2501
DEBUGFS_WRITE_FILE_OPS(csr);
2502

2503 2504
/* Create the debugfs files and directories */
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2505
{
2506 2507
	struct dentry *dir = trans->dbgfs_dir;

2508 2509
	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2510
	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2511 2512
	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2513
	return 0;
2514 2515 2516 2517

err:
	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
	return -ENOMEM;
2518
}
2519
#endif /*CONFIG_IWLWIFI_DEBUGFS */
2520

2521
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2522
{
2523
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2524 2525 2526
	u32 cmdlen = 0;
	int i;

2527
	for (i = 0; i < trans_pcie->max_tbs; i++)
2528
		cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
2529 2530 2531 2532

	return cmdlen;
}

2533 2534 2535 2536 2537 2538
static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
				   struct iwl_fw_error_dump_data **data,
				   int allocated_rb_nums)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
2539 2540
	/* Dump RBs is supported only for pre-9000 devices (1 queue) */
	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
	u32 i, r, j, rb_len = 0;

	spin_lock(&rxq->lock);

	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;

	for (i = rxq->read, j = 0;
	     i != r && j < allocated_rb_nums;
	     i = (i + 1) & RX_QUEUE_MASK, j++) {
		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
		struct iwl_fw_error_dump_rb *rb;

		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
			       DMA_FROM_DEVICE);

		rb_len += sizeof(**data) + sizeof(*rb) + max_len;

		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
		rb = (void *)(*data)->data;
		rb->index = cpu_to_le32(i);
		memcpy(rb->data, page_address(rxb->page), max_len);
		/* remap the page for the free benefit */
		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
						     max_len,
						     DMA_FROM_DEVICE);

		*data = iwl_fw_error_next_data(*data);
	}

	spin_unlock(&rxq->lock);

	return rb_len;
}
2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
#define IWL_CSR_TO_DUMP (0x250)

static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
				   struct iwl_fw_error_dump_data **data)
{
	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
	__le32 *val;
	int i;

	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
	val = (void *)(*data)->data;

	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));

	*data = iwl_fw_error_next_data(*data);

	return csr_len;
}

2596 2597 2598 2599 2600 2601 2602 2603
static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
				       struct iwl_fw_error_dump_data **data)
{
	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
	unsigned long flags;
	__le32 *val;
	int i;

2604
	if (!iwl_trans_grab_nic_access(trans, &flags))
2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
		return 0;

	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
	(*data)->len = cpu_to_le32(fh_regs_len);
	val = (void *)(*data)->data;

	for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));

	iwl_trans_release_nic_access(trans, &flags);

	*data = iwl_fw_error_next_data(*data);

	return sizeof(**data) + fh_regs_len;
}

2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
static u32
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
				 u32 monitor_len)
{
	u32 buf_size_in_dwords = (monitor_len >> 2);
	u32 *buffer = (u32 *)fw_mon_data->data;
	unsigned long flags;
	u32 i;

2631
	if (!iwl_trans_grab_nic_access(trans, &flags))
2632 2633
		return 0;

2634
	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
2635
	for (i = 0; i < buf_size_in_dwords; i++)
2636 2637 2638
		buffer[i] = iwl_read_prph_no_grab(trans,
				MON_DMARB_RD_DATA_ADDR);
	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
2639 2640 2641 2642 2643 2644

	iwl_trans_release_nic_access(trans, &flags);

	return monitor_len;
}

2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724
static u32
iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
			    struct iwl_fw_error_dump_data **data,
			    u32 monitor_len)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 len = 0;

	if ((trans_pcie->fw_mon_page &&
	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
	    trans->dbg_dest_tlv) {
		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
		u32 base, write_ptr, wrap_cnt;

		/* If there was a dest TLV - use the values from there */
		if (trans->dbg_dest_tlv) {
			write_ptr =
				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
		} else {
			base = MON_BUFF_BASE_ADDR;
			write_ptr = MON_BUFF_WRPTR;
			wrap_cnt = MON_BUFF_CYCLE_CNT;
		}

		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
		fw_mon_data = (void *)(*data)->data;
		fw_mon_data->fw_mon_wr_ptr =
			cpu_to_le32(iwl_read_prph(trans, write_ptr));
		fw_mon_data->fw_mon_cycle_cnt =
			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
		fw_mon_data->fw_mon_base_ptr =
			cpu_to_le32(iwl_read_prph(trans, base));

		len += sizeof(**data) + sizeof(*fw_mon_data);
		if (trans_pcie->fw_mon_page) {
			/*
			 * The firmware is now asserted, it won't write anything
			 * to the buffer. CPU can take ownership to fetch the
			 * data. The buffer will be handed back to the device
			 * before the firmware will be restarted.
			 */
			dma_sync_single_for_cpu(trans->dev,
						trans_pcie->fw_mon_phys,
						trans_pcie->fw_mon_size,
						DMA_FROM_DEVICE);
			memcpy(fw_mon_data->data,
			       page_address(trans_pcie->fw_mon_page),
			       trans_pcie->fw_mon_size);

			monitor_len = trans_pcie->fw_mon_size;
		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
			/*
			 * Update pointers to reflect actual values after
			 * shifting
			 */
			base = iwl_read_prph(trans, base) <<
			       trans->dbg_dest_tlv->base_shift;
			iwl_trans_read_mem(trans, base, fw_mon_data->data,
					   monitor_len / sizeof(u32));
		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
			monitor_len =
				iwl_trans_pci_dump_marbh_monitor(trans,
								 fw_mon_data,
								 monitor_len);
		} else {
			/* Didn't match anything - output no monitor data */
			monitor_len = 0;
		}

		len += monitor_len;
		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
	}

	return len;
}

static struct iwl_trans_dump_data
*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
2725
			  const struct iwl_fw_dbg_trigger_tlv *trigger)
2726 2727 2728 2729 2730
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_fw_error_dump_data *data;
	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
	struct iwl_fw_error_dump_txcmd *txcmd;
2731
	struct iwl_trans_dump_data *dump_data;
2732
	u32 len, num_rbs;
2733
	u32 monitor_len;
2734
	int i, ptr;
2735 2736
	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
			!trans->cfg->mq_rx_supported;
2737

2738 2739 2740 2741 2742
	/* transport dump header */
	len = sizeof(*dump_data);

	/* host commands */
	len += sizeof(*data) +
2743
		cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
2744

2745
	/* FW monitor */
2746
	if (trans_pcie->fw_mon_page) {
2747
		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
		       trans_pcie->fw_mon_size;
		monitor_len = trans_pcie->fw_mon_size;
	} else if (trans->dbg_dest_tlv) {
		u32 base, end;

		base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
		end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);

		base = iwl_read_prph(trans, base) <<
		       trans->dbg_dest_tlv->base_shift;
		end = iwl_read_prph(trans, end) <<
		      trans->dbg_dest_tlv->end_shift;

		/* Make "end" point to the actual end */
2762 2763
		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
		    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
2764 2765 2766 2767 2768 2769 2770
			end += (1 << trans->dbg_dest_tlv->end_shift);
		monitor_len = end - base;
		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
		       monitor_len;
	} else {
		monitor_len = 0;
	}
2771

2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
		dump_data = vzalloc(len);
		if (!dump_data)
			return NULL;

		data = (void *)dump_data->data;
		len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
		dump_data->len = len;

		return dump_data;
	}

	/* CSR registers */
	len += sizeof(*data) + IWL_CSR_TO_DUMP;

	/* FH registers */
	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);

	if (dump_rbs) {
2791 2792
		/* Dump RBs is supported only for pre-9000 devices (1 queue) */
		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2793
		/* RBs */
2794
		num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
2795
				      & 0x0FFF;
2796
		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
2797 2798 2799 2800 2801
		len += num_rbs * (sizeof(*data) +
				  sizeof(struct iwl_fw_error_dump_rb) +
				  (PAGE_SIZE << trans_pcie->rx_page_order));
	}

2802 2803 2804
	dump_data = vzalloc(len);
	if (!dump_data)
		return NULL;
2805 2806

	len = 0;
2807
	data = (void *)dump_data->data;
2808 2809 2810
	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
	txcmd = (void *)data->data;
	spin_lock_bh(&cmdq->lock);
2811 2812 2813
	ptr = cmdq->write_ptr;
	for (i = 0; i < cmdq->n_window; i++) {
		u8 idx = get_cmd_index(cmdq, ptr);
2814 2815
		u32 caplen, cmdlen;

2816 2817
		cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
						   trans_pcie->tfd_size * ptr);
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832
		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);

		if (cmdlen) {
			len += sizeof(*txcmd) + caplen;
			txcmd->cmdlen = cpu_to_le32(cmdlen);
			txcmd->caplen = cpu_to_le32(caplen);
			memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
			txcmd = (void *)((u8 *)txcmd->data + caplen);
		}

		ptr = iwl_queue_dec_wrap(ptr);
	}
	spin_unlock_bh(&cmdq->lock);

	data->len = cpu_to_le32(len);
2833
	len += sizeof(*data);
2834 2835
	data = iwl_fw_error_next_data(data);

2836
	len += iwl_trans_pcie_dump_csr(trans, &data);
2837
	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
2838 2839
	if (dump_rbs)
		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
2840

2841
	len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
2842

2843 2844 2845
	dump_data->len = len;

	return dump_data;
2846
}
2847

2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863
#ifdef CONFIG_PM_SLEEP
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
		return iwl_pci_fw_enter_d0i3(trans);

	return 0;
}

static void iwl_trans_pcie_resume(struct iwl_trans *trans)
{
	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
		iwl_pci_fw_exit_d0i3(trans);
}
#endif /* CONFIG_PM_SLEEP */

2864
static const struct iwl_trans_ops trans_ops_pcie = {
2865
	.start_hw = iwl_trans_pcie_start_hw,
2866
	.op_mode_leave = iwl_trans_pcie_op_mode_leave,
2867
	.fw_alive = iwl_trans_pcie_fw_alive,
2868
	.start_fw = iwl_trans_pcie_start_fw,
2869
	.stop_device = iwl_trans_pcie_stop_device,
2870

2871 2872
	.d3_suspend = iwl_trans_pcie_d3_suspend,
	.d3_resume = iwl_trans_pcie_d3_resume,
2873

2874 2875 2876 2877 2878
#ifdef CONFIG_PM_SLEEP
	.suspend = iwl_trans_pcie_suspend,
	.resume = iwl_trans_pcie_resume,
#endif /* CONFIG_PM_SLEEP */

2879
	.send_cmd = iwl_trans_pcie_send_hcmd,
2880

2881
	.tx = iwl_trans_pcie_tx,
2882
	.reclaim = iwl_trans_pcie_reclaim,
2883

2884
	.txq_disable = iwl_trans_pcie_txq_disable,
2885
	.txq_enable = iwl_trans_pcie_txq_enable,
2886

2887 2888
	.get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,

2889 2890
	.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,

2891
	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2892
	.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2893
	.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
2894

2895 2896 2897
	.write8 = iwl_trans_pcie_write8,
	.write32 = iwl_trans_pcie_write32,
	.read32 = iwl_trans_pcie_read32,
2898 2899
	.read_prph = iwl_trans_pcie_read_prph,
	.write_prph = iwl_trans_pcie_write_prph,
2900 2901
	.read_mem = iwl_trans_pcie_read_mem,
	.write_mem = iwl_trans_pcie_write_mem,
2902
	.configure = iwl_trans_pcie_configure,
D
Don Fry 已提交
2903
	.set_pmi = iwl_trans_pcie_set_pmi,
2904
	.grab_nic_access = iwl_trans_pcie_grab_nic_access,
2905 2906
	.release_nic_access = iwl_trans_pcie_release_nic_access,
	.set_bits_mask = iwl_trans_pcie_set_bits_mask,
2907

2908 2909 2910
	.ref = iwl_trans_pcie_ref,
	.unref = iwl_trans_pcie_unref,

2911
	.dump_data = iwl_trans_pcie_dump_data,
2912
};
2913

2914
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2915 2916
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg)
2917 2918 2919
{
	struct iwl_trans_pcie *trans_pcie;
	struct iwl_trans *trans;
2920
	int ret, addr_size;
2921

S
Sharon Dvir 已提交
2922 2923 2924 2925
	ret = pcim_enable_device(pdev);
	if (ret)
		return ERR_PTR(ret);

2926 2927 2928 2929
	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
				&pdev->dev, cfg, &trans_ops_pcie, 0);
	if (!trans)
		return ERR_PTR(-ENOMEM);
2930 2931 2932 2933

	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans_pcie->trans = trans;
J
Johannes Berg 已提交
2934
	spin_lock_init(&trans_pcie->irq_lock);
2935
	spin_lock_init(&trans_pcie->reg_lock);
2936
	mutex_init(&trans_pcie->mutex);
2937
	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2938 2939 2940 2941 2942
	trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
	if (!trans_pcie->tso_hdr_page) {
		ret = -ENOMEM;
		goto out_no_pci;
	}
2943

J
Johannes Berg 已提交
2944

2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
	if (!cfg->base_params->pcie_l1_allowed) {
		/*
		 * W/A - seems to solve weird behavior. We need to remove this
		 * if we don't want to stay in L1 all the time. This wastes a
		 * lot of power.
		 */
		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
				       PCIE_LINK_STATE_L1 |
				       PCIE_LINK_STATE_CLKPM);
	}
2955

2956
	if (cfg->use_tfh) {
2957
		addr_size = 64;
2958
		trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
2959
		trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
2960
	} else {
2961
		addr_size = 36;
2962
		trans_pcie->max_tbs = IWL_NUM_OF_TBS;
2963 2964
		trans_pcie->tfd_size = sizeof(struct iwl_tfd);
	}
2965 2966
	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);

2967 2968
	pci_set_master(pdev);

2969
	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
2970
	if (!ret)
2971 2972
		ret = pci_set_consistent_dma_mask(pdev,
						  DMA_BIT_MASK(addr_size));
2973 2974 2975 2976
	if (ret) {
		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!ret)
			ret = pci_set_consistent_dma_mask(pdev,
2977
							  DMA_BIT_MASK(32));
2978
		/* both attempts failed: */
2979
		if (ret) {
2980
			dev_err(&pdev->dev, "No suitable DMA available\n");
S
Sharon Dvir 已提交
2981
			goto out_no_pci;
2982 2983 2984
		}
	}

S
Sharon Dvir 已提交
2985
	ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
2986
	if (ret) {
S
Sharon Dvir 已提交
2987 2988
		dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
		goto out_no_pci;
2989 2990
	}

S
Sharon Dvir 已提交
2991
	trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
2992
	if (!trans_pcie->hw_base) {
S
Sharon Dvir 已提交
2993
		dev_err(&pdev->dev, "pcim_iomap_table failed\n");
2994
		ret = -ENODEV;
S
Sharon Dvir 已提交
2995
		goto out_no_pci;
2996 2997 2998 2999 3000 3001
	}

	/* We disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state */
	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);

3002 3003 3004 3005
	trans->dev = &pdev->dev;
	trans_pcie->pci_dev = pdev;
	iwl_disable_interrupts(trans);

3006
	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3007 3008 3009 3010 3011 3012
	/*
	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
	 * changed, and now the revision step also includes bit 0-1 (no more
	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
	 * in the old format.
	 */
3013 3014 3015
	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
		unsigned long flags;

3016
		trans->hw_rev = (trans->hw_rev & 0xfff0) |
3017
				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3018

3019 3020 3021
		ret = iwl_pcie_prepare_card_hw(trans);
		if (ret) {
			IWL_WARN(trans, "Exit HW not ready\n");
S
Sharon Dvir 已提交
3022
			goto out_no_pci;
3023 3024
		}

3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
		/*
		 * in-order to recognize C step driver should read chip version
		 * id located at the AUX bus MISC address space.
		 */
		iwl_set_bit(trans, CSR_GP_CNTRL,
			    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
		udelay(2);

		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
				   25000);
		if (ret < 0) {
			IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
S
Sharon Dvir 已提交
3039
			goto out_no_pci;
3040 3041
		}

3042
		if (iwl_trans_grab_nic_access(trans, &flags)) {
3043 3044
			u32 hw_step;

3045
			hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
3046
			hw_step |= ENABLE_WFPM;
3047 3048
			iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
			hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
3049 3050 3051 3052 3053 3054 3055 3056
			hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
			if (hw_step == 0x3)
				trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
						(SILICON_C_STEP << 2);
			iwl_trans_release_nic_access(trans, &flags);
		}
	}

3057 3058
	trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);

3059
	iwl_pcie_set_interrupt_capa(pdev, trans);
E
Emmanuel Grumbach 已提交
3060
	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3061 3062
	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3063

3064
	/* Initialize the wait queue for commands */
3065
	init_waitqueue_head(&trans_pcie->wait_command_queue);
3066

3067 3068
	init_waitqueue_head(&trans_pcie->d0i3_waitq);

3069 3070
	if (trans_pcie->msix_enabled) {
		if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
S
Sharon Dvir 已提交
3071
			goto out_no_pci;
3072 3073 3074
	 } else {
		ret = iwl_pcie_alloc_ict(trans);
		if (ret)
S
Sharon Dvir 已提交
3075
			goto out_no_pci;
J
Johannes Berg 已提交
3076

S
Sharon Dvir 已提交
3077 3078 3079 3080
		ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
						iwl_pcie_isr,
						iwl_pcie_irq_handler,
						IRQF_SHARED, DRV_NAME, trans);
3081 3082 3083 3084 3085 3086
		if (ret) {
			IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
			goto out_free_ict;
		}
		trans_pcie->inta_mask = CSR_INI_SET_MASK;
	 }
3087

3088 3089 3090 3091 3092 3093
#ifdef CONFIG_IWLWIFI_PCIE_RTPM
	trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
#else
	trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
#endif /* CONFIG_IWLWIFI_PCIE_RTPM */

3094 3095
	return trans;

J
Johannes Berg 已提交
3096 3097
out_free_ict:
	iwl_pcie_free_ict(trans);
3098
out_no_pci:
3099
	free_percpu(trans_pcie->tso_hdr_page);
3100
	iwl_trans_free(trans);
3101
	return ERR_PTR(ret);
3102
}