fw.c 36.8 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 Intel Deutschland GmbH
J
Johannes Berg 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
27
 * in the file called COPYING.
J
Johannes Berg 已提交
28 29
 *
 * Contact Information:
30
 *  Intel Linux Wireless <linuxwifi@intel.com>
J
Johannes Berg 已提交
31 32 33 34
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
35
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
J
Johannes Berg 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
#include <net/mac80211.h>
67
#include <linux/netdevice.h>
68
#include <linux/acpi.h>
J
Johannes Berg 已提交
69 70 71 72 73 74 75

#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-fw.h"
#include "iwl-debug.h"
#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
76
#include "iwl-prph.h"
J
Johannes Berg 已提交
77 78 79
#include "iwl-eeprom-parse.h"

#include "mvm.h"
80
#include "fw-dbg.h"
J
Johannes Berg 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
#include "iwl-phy-db.h"

#define MVM_UCODE_ALIVE_TIMEOUT	HZ
#define MVM_UCODE_CALIB_TIMEOUT	(2*HZ)

#define UCODE_VALID_OK	cpu_to_le32(0x1)

struct iwl_mvm_alive_data {
	bool valid;
	u32 scd_base_addr;
};

static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
		.valid = cpu_to_le32(valid_tx_ant),
	};

99
	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
E
Emmanuel Grumbach 已提交
100
	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
J
Johannes Berg 已提交
101 102 103
				    sizeof(tx_ant_cmd), &tx_ant_cmd);
}

104 105 106 107 108 109
static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
{
	int i;
	struct iwl_rss_config_cmd cmd = {
		.flags = cpu_to_le32(IWL_RSS_ENABLE),
		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
110
			     IWL_RSS_HASH_TYPE_IPV4_UDP |
111 112
			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
			     IWL_RSS_HASH_TYPE_IPV6_TCP |
113
			     IWL_RSS_HASH_TYPE_IPV6_UDP |
114 115 116
			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
	};

117 118 119
	if (mvm->trans->num_rx_queues == 1)
		return 0;

120
	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
121
	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
122 123 124
		cmd.indirection_table[i] =
			1 + (i % (mvm->trans->num_rx_queues - 1));
	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
125 126 127 128

	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
	struct iwl_dqa_enable_cmd dqa_cmd = {
		.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
	};
	u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
	int ret;

	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
	if (ret)
		IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
	else
		IWL_DEBUG_FW(mvm, "Working in DQA mode\n");

	return ret;
}

146
void iwl_free_fw_paging(struct iwl_mvm *mvm)
147 148 149 150 151 152 153
{
	int i;

	if (!mvm->fw_paging_db[0].fw_paging_block)
		return;

	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
154 155 156
		struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];

		if (!paging->fw_paging_block) {
157 158 159 160 161 162
			IWL_DEBUG_FW(mvm,
				     "Paging: block %d already freed, continue to next page\n",
				     i);

			continue;
		}
163 164
		dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
			       paging->fw_paging_size, DMA_BIDIRECTIONAL);
165

166 167 168
		__free_pages(paging->fw_paging_block,
			     get_order(paging->fw_paging_size));
		paging->fw_paging_block = NULL;
169
	}
170
	kfree(mvm->trans->paging_download_buf);
171
	mvm->trans->paging_download_buf = NULL;
172
	mvm->trans->paging_db = NULL;
173

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}

static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
{
	int sec_idx, idx;
	u32 offset = 0;

	/*
	 * find where is the paging image start point:
	 * if CPU2 exist and it's in paging format, then the image looks like:
	 * CPU1 sections (2 or more)
	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
	 * CPU2 sections (not paged)
	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
	 * non paged to CPU2 paging sec
	 * CPU2 paging CSS
	 * CPU2 paging image (including instruction and data)
	 */
193
	for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
194 195 196 197 198 199
		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
			sec_idx++;
			break;
		}
	}

200 201 202 203
	/*
	 * If paging is enabled there should be at least 2 more sections left
	 * (one for CSS and one for Paging data)
	 */
204
	if (sec_idx >= image->num_sec - 1) {
205
		IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
		iwl_free_fw_paging(mvm);
		return -EINVAL;
	}

	/* copy the CSS block to the dram */
	IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
		     sec_idx);

	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
	       image->sec[sec_idx].data,
	       mvm->fw_paging_db[0].fw_paging_size);

	IWL_DEBUG_FW(mvm,
		     "Paging: copied %d CSS bytes to first block\n",
		     mvm->fw_paging_db[0].fw_paging_size);

	sec_idx++;

	/*
	 * copy the paging blocks to the dram
	 * loop index start from 1 since that CSS block already copied to dram
	 * and CSS index is 0.
	 * loop stop at num_of_paging_blk since that last block is not full.
	 */
	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
		       image->sec[sec_idx].data + offset,
		       mvm->fw_paging_db[idx].fw_paging_size);

		IWL_DEBUG_FW(mvm,
			     "Paging: copied %d paging bytes to block %d\n",
			     mvm->fw_paging_db[idx].fw_paging_size,
			     idx);

		offset += mvm->fw_paging_db[idx].fw_paging_size;
	}

	/* copy the last paging block */
	if (mvm->num_of_pages_in_last_blk > 0) {
		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
		       image->sec[sec_idx].data + offset,
		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);

		IWL_DEBUG_FW(mvm,
			     "Paging: copied %d pages in the last block %d\n",
			     mvm->num_of_pages_in_last_blk, idx);
	}

	return 0;
}

static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
				   const struct fw_img *image)
{
	struct page *block;
	dma_addr_t phys = 0;
262
	int blk_idx, order, num_of_pages, size, dma_enabled;
263 264 265 266 267 268 269 270 271 272

	if (mvm->fw_paging_db[0].fw_paging_block)
		return 0;

	dma_enabled = is_device_dma_capable(mvm->trans->dev);

	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);

	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
273 274
	mvm->num_of_paging_blk =
		DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
275 276 277 278 279 280 281 282 283 284
	mvm->num_of_pages_in_last_blk =
		num_of_pages -
		NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);

	IWL_DEBUG_FW(mvm,
		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
		     mvm->num_of_paging_blk,
		     mvm->num_of_pages_in_last_blk);

	/*
285
	 * Allocate CSS and paging blocks in dram.
286
	 */
287 288 289 290
	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
		/* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
		size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
		order = get_order(size);
291 292 293 294 295 296 297 298
		block = alloc_pages(GFP_KERNEL, order);
		if (!block) {
			/* free all the previous pages since we failed */
			iwl_free_fw_paging(mvm);
			return -ENOMEM;
		}

		mvm->fw_paging_db[blk_idx].fw_paging_block = block;
299
		mvm->fw_paging_db[blk_idx].fw_paging_size = size;
300 301 302 303 304 305 306 307 308 309 310 311 312 313

		if (dma_enabled) {
			phys = dma_map_page(mvm->trans->dev, block, 0,
					    PAGE_SIZE << order,
					    DMA_BIDIRECTIONAL);
			if (dma_mapping_error(mvm->trans->dev, phys)) {
				/*
				 * free the previous pages and the current one
				 * since we failed to map_page.
				 */
				iwl_free_fw_paging(mvm);
				return -ENOMEM;
			}
			mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
314 315 316 317
		} else {
			mvm->fw_paging_db[blk_idx].fw_paging_phys =
				PAGING_ADDR_SIG |
				blk_idx << BLOCK_2_EXP_SIZE;
318 319
		}

320 321 322 323 324 325 326 327
		if (!blk_idx)
			IWL_DEBUG_FW(mvm,
				     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
				     order);
		else
			IWL_DEBUG_FW(mvm,
				     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
				     order);
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	}

	return 0;
}

static int iwl_save_fw_paging(struct iwl_mvm *mvm,
			      const struct fw_img *fw)
{
	int ret;

	ret = iwl_alloc_fw_paging_mem(mvm, fw);
	if (ret)
		return ret;

	return iwl_fill_paging_mem(mvm, fw);
}

/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
348
	struct iwl_fw_paging_cmd paging_cmd = {
349 350 351 352 353 354 355 356
		.flags =
			cpu_to_le32(PAGING_CMD_IS_SECURED |
				    PAGING_CMD_IS_ENABLED |
				    (mvm->num_of_pages_in_last_blk <<
				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
	};
357 358 359 360 361
	int blk_idx, size = sizeof(paging_cmd);

	/* A bit hard coded - but this is the old API and will be deprecated */
	if (!iwl_mvm_has_new_tx_api(mvm))
		size -= NUM_OF_FW_PAGING_BLOCKS * 4;
362 363 364

	/* loop for for all paging blocks + CSS block */
	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
365 366 367 368 369 370 371 372 373 374 375 376 377
		dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;

		addr = addr >> PAGE_2_EXP_SIZE;

		if (iwl_mvm_has_new_tx_api(mvm)) {
			__le64 phy_addr = cpu_to_le64(addr);

			paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
		} else {
			__le32 phy_addr = cpu_to_le32(addr);

			paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
		}
378 379 380 381
	}

	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
						    IWL_ALWAYS_LONG_GROUP, 0),
382
				    0, size, &paging_cmd);
383 384
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
/*
 * Send paging item cmd to FW in case CPU2 has paging image
 */
static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
{
	int ret;
	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
	};

	struct iwl_fw_get_item_resp *item_resp;
	struct iwl_host_cmd cmd = {
		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &fw_get_item_cmd, },
	};

	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret) {
		IWL_ERR(mvm,
			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
			ret);
		return ret;
	}

	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
		IWL_ERR(mvm,
			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
			le32_to_cpu(item_resp->item_id));
		ret = -EIO;
		goto exit;
	}

421 422 423
	/* Add an extra page for headers */
	mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
						  FW_PAGING_SIZE,
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
						  GFP_KERNEL);
	if (!mvm->trans->paging_download_buf) {
		ret = -ENOMEM;
		goto exit;
	}
	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
	mvm->trans->paging_db = mvm->fw_paging_db;
	IWL_DEBUG_FW(mvm,
		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
		     mvm->trans->paging_req_addr);

exit:
	iwl_free_resp(&cmd);

	return ret;
}

J
Johannes Berg 已提交
441 442 443 444 445 446
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
			 struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_mvm *mvm =
		container_of(notif_wait, struct iwl_mvm, notif_wait);
	struct iwl_mvm_alive_data *alive_data = data;
447
	struct mvm_alive_resp_ver1 *palive1;
448
	struct mvm_alive_resp_ver2 *palive2;
449
	struct mvm_alive_resp *palive;
450

451 452
	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
		palive1 = (void *)pkt->data;
453 454 455

		mvm->support_umac_log = false;
		mvm->error_event_table =
456 457 458 459
			le32_to_cpu(palive1->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive1->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
460

461
		alive_data->valid = le16_to_cpu(palive1->status) ==
462 463 464
				    IWL_ALIVE_STATUS_OK;
		IWL_DEBUG_FW(mvm,
			     "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
465 466 467
			     le16_to_cpu(palive1->status), palive1->ver_type,
			     palive1->ver_subtype, palive1->flags);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
468 469 470 471 472 473 474 475 476
		palive2 = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive2->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive2->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive2->error_info_addr);
477 478
		mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
479 480 481

		alive_data->valid = le16_to_cpu(palive2->status) ==
				    IWL_ALIVE_STATUS_OK;
482 483 484
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

485 486 487 488 489 490 491 492
		IWL_DEBUG_FW(mvm,
			     "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive2->status), palive2->ver_type,
			     palive2->ver_subtype, palive2->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     palive2->umac_major, palive2->umac_minor);
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
		palive = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive->status), palive->ver_type,
			     palive->ver_subtype, palive->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     le32_to_cpu(palive->umac_major),
			     le32_to_cpu(palive->umac_minor));
520
	}
J
Johannes Berg 已提交
521 522 523 524 525 526 527 528 529 530 531 532 533 534

	return true;
}

static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
				  struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_phy_db *phy_db = data;

	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
		return true;
	}

535
	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
J
Johannes Berg 已提交
536 537 538 539 540 541 542 543 544 545 546 547

	return false;
}

static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->cur_ucode;
548
	static const u16 alive_cmd[] = { MVM_ALIVE };
549
	struct iwl_sf_region st_fwrd_space;
J
Johannes Berg 已提交
550

551
	if (ucode_type == IWL_UCODE_REGULAR &&
552 553 554
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
	    !(fw_has_capa(&mvm->fw->ucode_capa,
			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
555
		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
556
	else
557
		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
558
	if (WARN_ON(!fw))
J
Johannes Berg 已提交
559
		return -EINVAL;
560 561
	mvm->cur_ucode = ucode_type;
	mvm->ucode_loaded = false;
J
Johannes Berg 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		mvm->cur_ucode = old_type;
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
581 582 583 584 585
		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
J
Johannes Berg 已提交
586 587 588 589 590 591 592 593 594 595
		mvm->cur_ucode = old_type;
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		mvm->cur_ucode = old_type;
		return -EIO;
	}

596 597 598 599 600 601 602
	/*
	 * update the sdio allocation according to the pointer we get in the
	 * alive notification.
	 */
	st_fwrd_space.addr = mvm->sf_space.addr;
	st_fwrd_space.size = mvm->sf_space.size;
	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
603 604 605 606
	if (ret) {
		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
		return ret;
	}
607

J
Johannes Berg 已提交
608 609
	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

610 611 612 613 614 615
	/*
	 * configure and operate fw paging mechanism.
	 * driver configures the paging flow only once, CPU2 paging image
	 * included in the IWL_UCODE_INIT image.
	 */
	if (fw->paging_mem_size) {
616 617 618 619 620 621 622 623 624 625 626 627 628 629
		/*
		 * When dma is not enabled, the driver needs to copy / write
		 * the downloaded / uploaded page to / from the smem.
		 * This gets the location of the place were the pages are
		 * stored.
		 */
		if (!is_device_dma_capable(mvm->trans->dev)) {
			ret = iwl_trans_get_paging_item(mvm);
			if (ret) {
				IWL_ERR(mvm, "failed to get FW paging item\n");
				return ret;
			}
		}

630 631 632 633 634 635 636 637 638 639 640 641 642 643
		ret = iwl_save_fw_paging(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to save the FW paging image\n");
			return ret;
		}

		ret = iwl_send_paging_cmd(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to send the paging cmd\n");
			iwl_free_fw_paging(mvm);
			return ret;
		}
	}

J
Johannes Berg 已提交
644 645 646 647 648 649 650 651 652
	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

653
	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
654 655 656 657
	if (iwl_mvm_is_dqa_supported(mvm))
		mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
	else
		mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
J
Johannes Berg 已提交
658

659 660
	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
J
Johannes Berg 已提交
661 662 663 664 665 666 667 668 669 670 671 672

	mvm->ucode_loaded = true;

	return 0;
}

static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
	struct iwl_phy_cfg_cmd phy_cfg_cmd;
	enum iwl_ucode_type ucode_type = mvm->cur_ucode;

	/* Set parameters */
673
	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
J
Johannes Berg 已提交
674 675 676 677 678 679 680 681
	phy_cfg_cmd.calib_control.event_trigger =
		mvm->fw->default_calib[ucode_type].event_trigger;
	phy_cfg_cmd.calib_control.flow_trigger =
		mvm->fw->default_calib[ucode_type].flow_trigger;

	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
		       phy_cfg_cmd.phy_cfg);

E
Emmanuel Grumbach 已提交
682
	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
J
Johannes Berg 已提交
683 684 685 686 687 688
				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}

int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait calib_wait;
689
	static const u16 init_complete[] = {
J
Johannes Berg 已提交
690 691 692 693 694 695 696
		INIT_COMPLETE_NOTIF,
		CALIB_RES_NOTIF_PHY_DB
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

697
	if (WARN_ON_ONCE(mvm->calibrating))
J
Johannes Berg 已提交
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
		return 0;

	iwl_init_notification_wait(&mvm->notif_wait,
				   &calib_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_phy_db_entry,
				   mvm->phy_db);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
	if (ret) {
		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
		goto error;
	}

714
	ret = iwl_send_bt_init_conf(mvm);
715 716 717
	if (ret)
		goto error;

718
	/* Read the NVM only at driver load time, no need to do this twice */
J
Johannes Berg 已提交
719 720
	if (read_nvm) {
		/* Read nvm */
721
		ret = iwl_nvm_init(mvm, true);
J
Johannes Berg 已提交
722 723 724 725 726 727
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

728
	/* In case we read the NVM from external file, load it to the NIC */
729
	if (mvm->nvm_file_name)
730 731
		iwl_mvm_load_nvm_to_nic(mvm);

J
Johannes Berg 已提交
732 733 734
	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
	WARN_ON(ret);

735 736 737 738
	/*
	 * abort after reading the nvm in case RF Kill is on, we will complete
	 * the init seq later when RF kill will switch to off
	 */
739
	if (iwl_mvm_is_radio_hw_killed(mvm)) {
740 741 742
		IWL_DEBUG_RF_KILL(mvm,
				  "jump over all phy activities due to RF kill\n");
		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
743 744
		ret = 1;
		goto out;
745 746
	}

747 748
	mvm->calibrating = true;

749
	/* Send TX valid antennas before triggering calibrations */
750
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
751 752 753
	if (ret)
		goto error;

J
Johannes Berg 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
	/*
	 * Send phy configurations command to init uCode
	 * to start the 16.0 uCode init image internal calibrations.
	 */
	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
		goto error;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the calibration complete notification.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
			MVM_UCODE_CALIB_TIMEOUT);
771

772
	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
773 774 775
		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
		ret = 1;
	}
J
Johannes Berg 已提交
776 777 778 779 780
	goto out;

error:
	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
781
	mvm->calibrating = false;
782
	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
J
Johannes Berg 已提交
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
		/* we want to debug INIT and we have no NVM - fake */
		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
					sizeof(struct ieee80211_channel) +
					sizeof(struct ieee80211_rate),
					GFP_KERNEL);
		if (!mvm->nvm_data)
			return -ENOMEM;
		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
		mvm->nvm_data->bands[0].n_channels = 1;
		mvm->nvm_data->bands[0].n_bitrates = 1;
		mvm->nvm_data->bands[0].bitrates =
			(void *)mvm->nvm_data->channels + 1;
		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
	}

	return ret;
}

801 802
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
					  struct iwl_rx_packet *pkt)
803
{
804 805
	struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
	int i;
806

807 808 809 810 811 812 813 814
	mvm->shared_mem_cfg.num_txfifo_entries =
		ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
	for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
		mvm->shared_mem_cfg.txfifo_size[i] =
			le32_to_cpu(mem_cfg->txfifo_size[i]);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
		mvm->shared_mem_cfg.rxfifo_size[i] =
			le32_to_cpu(mem_cfg->rxfifo_size[i]);
815

816 817
	BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
		     sizeof(mem_cfg->internal_txfifo_size));
818

819 820 821 822 823
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
	     i++)
		mvm->shared_mem_cfg.internal_txfifo_size[i] =
			le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
824

825 826 827 828 829 830 831 832 833
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
				     struct iwl_rx_packet *pkt)
{
	struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
	int i;

	mvm->shared_mem_cfg.num_txfifo_entries =
		ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
	for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
834 835 836 837 838
		mvm->shared_mem_cfg.txfifo_size[i] =
			le32_to_cpu(mem_cfg->txfifo_size[i]);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
		mvm->shared_mem_cfg.rxfifo_size[i] =
			le32_to_cpu(mem_cfg->rxfifo_size[i]);
839

840
	/* new API has more data, from rxfifo_addr field and on */
841 842 843 844 845 846 847 848 849 850 851
	if (fw_has_capa(&mvm->fw->ucode_capa,
			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
		BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
			     sizeof(mem_cfg->internal_txfifo_size));

		for (i = 0;
		     i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
		     i++)
			mvm->shared_mem_cfg.internal_txfifo_size[i] =
				le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
	}
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
}

static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
	struct iwl_host_cmd cmd = {
		.flags = CMD_WANT_SKB,
		.data = { NULL, },
		.len = { 0, },
	};
	struct iwl_rx_packet *pkt;

	lockdep_assert_held(&mvm->mutex);

	if (fw_has_capa(&mvm->fw->ucode_capa,
			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
		cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
	else
		cmd.id = SHARED_MEM_CFG;

	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
		return;

	pkt = cmd.resp_pkt;
	if (iwl_mvm_has_new_tx_api(mvm))
		iwl_mvm_parse_shared_mem_a000(mvm, pkt);
	else
		iwl_mvm_parse_shared_mem(mvm, pkt);
879

880 881 882 883 884
	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");

	iwl_free_resp(&cmd);
}

885 886 887 888 889 890 891 892 893 894 895 896 897
static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
{
	struct iwl_ltr_config_cmd cmd = {
		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
	};

	if (!mvm->trans->ltr_enabled)
		return 0;

	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
				    sizeof(cmd), &cmd);
}

898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
#define ACPI_WRDS_METHOD	"WRDS"
#define ACPI_WRDS_WIFI		(0x07)
#define ACPI_WRDS_TABLE_SIZE	10

struct iwl_mvm_sar_table {
	bool enabled;
	u8 values[ACPI_WRDS_TABLE_SIZE];
};

#ifdef CONFIG_ACPI
static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
				struct iwl_mvm_sar_table *sar_table)
{
	union acpi_object *data_pkg;
	u32 i;

	/* We need at least two packages, one for the revision and one
	 * for the data itself.  Also check that the revision is valid
	 * (i.e. it is an integer set to 0).
	*/
	if (wrds->type != ACPI_TYPE_PACKAGE ||
	    wrds->package.count < 2 ||
	    wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
	    wrds->package.elements[0].integer.value != 0) {
		IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
		return -EINVAL;
	}

	/* loop through all the packages to find the one for WiFi */
	for (i = 1; i < wrds->package.count; i++) {
		union acpi_object *domain;

		data_pkg = &wrds->package.elements[i];

		/* Skip anything that is not a package with the right
		 * amount of elements (i.e. domain_type,
		 * enabled/disabled plus the sar table size.
		 */
		if (data_pkg->type != ACPI_TYPE_PACKAGE ||
		    data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
			continue;

		domain = &data_pkg->package.elements[0];
		if (domain->type == ACPI_TYPE_INTEGER &&
		    domain->integer.value == ACPI_WRDS_WIFI)
			break;

		data_pkg = NULL;
	}

	if (!data_pkg)
		return -ENOENT;

	if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
		return -EINVAL;

	sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);

	for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
		union acpi_object *entry;

		entry = &data_pkg->package.elements[i + 2];
		if ((entry->type != ACPI_TYPE_INTEGER) ||
		    (entry->integer.value > U8_MAX))
			return -EINVAL;

		sar_table->values[i] = entry->integer.value;
	}

	return 0;
}

static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
				 struct iwl_mvm_sar_table *sar_table)
{
	acpi_handle root_handle;
	acpi_handle handle;
	struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
	acpi_status status;
	int ret;

	root_handle = ACPI_HANDLE(mvm->dev);
	if (!root_handle) {
		IWL_DEBUG_RADIO(mvm,
				"Could not retrieve root port ACPI handle\n");
		return -ENOENT;
	}

	/* Get the method's handle */
	status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
				 &handle);
	if (ACPI_FAILURE(status)) {
		IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
		return -ENOENT;
	}

	/* Call WRDS with no arguments */
	status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
	if (ACPI_FAILURE(status)) {
		IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
		return -ENOENT;
	}

	ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
	kfree(wrds.pointer);

	return ret;
}
#else /* CONFIG_ACPI */
static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
				 struct iwl_mvm_sar_table *sar_table)
{
	return -ENOENT;
}
#endif /* CONFIG_ACPI */

static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
	struct iwl_mvm_sar_table sar_table;
	struct iwl_dev_tx_power_cmd cmd = {
1018
		.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
1019 1020
	};
	int ret, i, j, idx;
1021
	int len = sizeof(cmd);
1022

1023 1024 1025
	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
		len = sizeof(cmd.v3);

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	ret = iwl_mvm_sar_get_table(mvm, &sar_table);
	if (ret < 0) {
		IWL_DEBUG_RADIO(mvm,
				"SAR BIOS table invalid or unavailable. (%d)\n",
				ret);
		/* we don't fail if the table is not available */
		return 0;
	}

	if (!sar_table.enabled)
		return 0;

	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");

	BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
		     ACPI_WRDS_TABLE_SIZE);

	for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
		IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
		for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
			idx = (i * IWL_NUM_SUB_BANDS) + j;
1047
			cmd.v3.per_chain_restriction[i][j] =
1048 1049 1050 1051 1052 1053
				cpu_to_le16(sar_table.values[idx]);
			IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
					j, sar_table.values[idx]);
		}
	}

1054
	ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1055 1056 1057 1058 1059 1060
	if (ret)
		IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);

	return ret;
}

J
Johannes Berg 已提交
1061 1062 1063
int iwl_mvm_up(struct iwl_mvm *mvm)
{
	int ret, i;
1064 1065
	struct ieee80211_channel *chan;
	struct cfg80211_chan_def chandef;
J
Johannes Berg 已提交
1066 1067 1068 1069 1070 1071 1072

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

1073 1074 1075 1076 1077
	/*
	 * If we haven't completed the run of the init ucode during
	 * module loading, load init ucode now
	 * (for example, if we were in RFKILL)
	 */
1078
	ret = iwl_run_init_mvm_ucode(mvm, false);
1079 1080 1081 1082 1083

	if (iwlmvm_mod_params.init_dbg)
		return 0;

	if (ret) {
1084 1085 1086 1087 1088 1089
		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
		/* this can't happen */
		if (WARN_ON(ret > 0))
			ret = -ERFKILL;
		goto error;
	}
J
Johannes Berg 已提交
1090

1091 1092 1093 1094 1095 1096 1097 1098 1099
	/*
	 * Stop and start the transport without entering low power
	 * mode. This will save the state of other components on the
	 * device that are triggered by the INIT firwmare (MFUART).
	 */
	_iwl_trans_stop_device(mvm->trans, false);
	ret = _iwl_trans_start_hw(mvm->trans, false);
	if (ret)
		goto error;
J
Johannes Berg 已提交
1100 1101 1102 1103 1104 1105 1106

	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
	if (ret) {
		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
		goto error;
	}

1107
	iwl_mvm_get_shared_mem_conf(mvm);
1108

1109 1110 1111 1112
	ret = iwl_mvm_sf_update(mvm, NULL, false);
	if (ret)
		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");

1113
	mvm->fw_dbg_conf = FW_DBG_INVALID;
1114 1115 1116
	/* if we have a destination, assume EARLY START */
	if (mvm->fw->dbg_dest_tlv)
		mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
1117
	iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
1118

1119
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
J
Johannes Berg 已提交
1120 1121 1122
	if (ret)
		goto error;

1123 1124 1125 1126
	ret = iwl_send_bt_init_conf(mvm);
	if (ret)
		goto error;

J
Johannes Berg 已提交
1127 1128 1129 1130 1131 1132 1133 1134 1135
	/* Send phy db control command and then phy db calibration*/
	ret = iwl_send_phy_db_data(mvm->phy_db);
	if (ret)
		goto error;

	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret)
		goto error;

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	/* Init RSS configuration */
	if (iwl_mvm_has_new_rx_api(mvm)) {
		ret = iwl_send_rss_cfg_cmd(mvm);
		if (ret) {
			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
				ret);
			goto error;
		}
	}

J
Johannes Berg 已提交
1146 1147 1148 1149
	/* init the fw <-> mac80211 STA mapping */
	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

1150 1151
	mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;

1152 1153 1154
	/* reset quota debouncing buffer - 0xff will yield invalid data */
	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));

1155 1156 1157 1158 1159 1160 1161 1162 1163
	/* Enable DQA-mode if required */
	if (iwl_mvm_is_dqa_supported(mvm)) {
		ret = iwl_mvm_send_dqa_cmd(mvm);
		if (ret)
			goto error;
	} else {
		IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
	}

J
Johannes Berg 已提交
1164 1165 1166 1167 1168
	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

1169
	/* Add all the PHY contexts */
1170
	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
	for (i = 0; i < NUM_PHY_CTX; i++) {
		/*
		 * The channel used here isn't relevant as it's
		 * going to be overwritten in the other flows.
		 * For now use the first channel we have.
		 */
		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
					   &chandef, 1, 1);
		if (ret)
			goto error;
	}
J
Johannes Berg 已提交
1183

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
#ifdef CONFIG_THERMAL
	if (iwl_mvm_is_tt_in_fw(mvm)) {
		/* in order to give the responsibility of ct-kill and
		 * TX backoff to FW we need to send empty temperature reporting
		 * cmd during init time
		 */
		iwl_mvm_send_temp_report_ths_cmd(mvm);
	} else {
		/* Initialize tx backoffs to the minimal possible */
		iwl_mvm_tt_tx_backoff(mvm, 0);
	}
1195 1196

	/* TODO: read the budget from BIOS / Platform NVM */
1197
	if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
1198 1199
		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
					   mvm->cooling_dev.cur_state);
1200 1201 1202
		if (ret)
			goto error;
	}
1203
#else
1204 1205
	/* Initialize tx backoffs to the minimal possible */
	iwl_mvm_tt_tx_backoff(mvm, 0);
1206
#endif
1207

1208
	WARN_ON(iwl_mvm_config_ltr(mvm));
E
Emmanuel Grumbach 已提交
1209

1210
	ret = iwl_mvm_power_update_device(mvm);
1211 1212 1213
	if (ret)
		goto error;

1214 1215 1216 1217 1218 1219 1220 1221 1222
	/*
	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
	 * anyway, so don't init MCC.
	 */
	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
		ret = iwl_mvm_init_mcc(mvm);
		if (ret)
			goto error;
	}
1223

1224
	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1225
		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1226 1227 1228 1229 1230
		ret = iwl_mvm_config_scan(mvm);
		if (ret)
			goto error;
	}

1231 1232 1233 1234
	if (iwl_mvm_is_csum_supported(mvm) &&
	    mvm->cfg->features & NETIF_F_RXCSUM)
		iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);

1235 1236 1237 1238
	/* allow FW/transport low power modes if not during restart */
	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);

1239 1240 1241 1242
	ret = iwl_mvm_sar_init(mvm);
	if (ret)
		goto error;

1243
	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
J
Johannes Berg 已提交
1244 1245
	return 0;
 error:
1246
	iwl_mvm_stop_device(mvm);
J
Johannes Berg 已提交
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
	return ret;
}

int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
{
	int ret, i;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
	if (ret) {
		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
		goto error;
	}

1266
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
J
Johannes Berg 已提交
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	if (ret)
		goto error;

	/* Send phy db control command and then phy db calibration*/
	ret = iwl_send_phy_db_data(mvm->phy_db);
	if (ret)
		goto error;

	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret)
		goto error;

	/* init the fw <-> mac80211 STA mapping */
	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

	return 0;
 error:
1290
	iwl_mvm_stop_device(mvm);
J
Johannes Berg 已提交
1291 1292 1293
	return ret;
}

1294 1295
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
				 struct iwl_rx_cmd_buffer *rxb)
J
Johannes Berg 已提交
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
	u32 flags = le32_to_cpu(card_state_notif->flags);

	IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & CT_KILL_CARD_DISABLED) ?
			  "Reached" : "Not reached");
}

1308 1309
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
			     struct iwl_rx_cmd_buffer *rxb)
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;

	IWL_DEBUG_INFO(mvm,
		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
		       le32_to_cpu(mfuart_notif->installed_ver),
		       le32_to_cpu(mfuart_notif->external_ver),
		       le32_to_cpu(mfuart_notif->status),
		       le32_to_cpu(mfuart_notif->duration));
}