fw.c 29.3 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
J
Johannes Berg 已提交
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
26
 * in the file called COPYING.
J
Johannes Berg 已提交
27 28
 *
 * Contact Information:
29
 *  Intel Linux Wireless <linuxwifi@intel.com>
J
Johannes Berg 已提交
30 31 32 33
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
34
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
J
Johannes Berg 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
#include <net/mac80211.h>

#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-fw.h"
#include "iwl-debug.h"
#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73
#include "iwl-prph.h"
J
Johannes Berg 已提交
74 75 76
#include "iwl-eeprom-parse.h"

#include "mvm.h"
77
#include "fw-dbg.h"
J
Johannes Berg 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
#include "iwl-phy-db.h"

#define MVM_UCODE_ALIVE_TIMEOUT	HZ
#define MVM_UCODE_CALIB_TIMEOUT	(2*HZ)

#define UCODE_VALID_OK	cpu_to_le32(0x1)

struct iwl_mvm_alive_data {
	bool valid;
	u32 scd_base_addr;
};

static inline const struct fw_img *
iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
{
	if (ucode_type >= IWL_UCODE_TYPE_MAX)
		return NULL;

	return &mvm->fw->img[ucode_type];
}

static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
		.valid = cpu_to_le32(valid_tx_ant),
	};

105
	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
E
Emmanuel Grumbach 已提交
106
	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
J
Johannes Berg 已提交
107 108 109
				    sizeof(tx_ant_cmd), &tx_ant_cmd);
}

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
	int i;

	if (!mvm->fw_paging_db[0].fw_paging_block)
		return;

	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
		if (!mvm->fw_paging_db[i].fw_paging_block) {
			IWL_DEBUG_FW(mvm,
				     "Paging: block %d already freed, continue to next page\n",
				     i);

			continue;
		}

		__free_pages(mvm->fw_paging_db[i].fw_paging_block,
			     get_order(mvm->fw_paging_db[i].fw_paging_size));
	}
129
	kfree(mvm->trans->paging_download_buf);
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}

static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
{
	int sec_idx, idx;
	u32 offset = 0;

	/*
	 * find where is the paging image start point:
	 * if CPU2 exist and it's in paging format, then the image looks like:
	 * CPU1 sections (2 or more)
	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
	 * CPU2 sections (not paged)
	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
	 * non paged to CPU2 paging sec
	 * CPU2 paging CSS
	 * CPU2 paging image (including instruction and data)
	 */
	for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
			sec_idx++;
			break;
		}
	}

	if (sec_idx >= IWL_UCODE_SECTION_MAX) {
		IWL_ERR(mvm, "driver didn't find paging image\n");
		iwl_free_fw_paging(mvm);
		return -EINVAL;
	}

	/* copy the CSS block to the dram */
	IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
		     sec_idx);

	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
	       image->sec[sec_idx].data,
	       mvm->fw_paging_db[0].fw_paging_size);

	IWL_DEBUG_FW(mvm,
		     "Paging: copied %d CSS bytes to first block\n",
		     mvm->fw_paging_db[0].fw_paging_size);

	sec_idx++;

	/*
	 * copy the paging blocks to the dram
	 * loop index start from 1 since that CSS block already copied to dram
	 * and CSS index is 0.
	 * loop stop at num_of_paging_blk since that last block is not full.
	 */
	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
		       image->sec[sec_idx].data + offset,
		       mvm->fw_paging_db[idx].fw_paging_size);

		IWL_DEBUG_FW(mvm,
			     "Paging: copied %d paging bytes to block %d\n",
			     mvm->fw_paging_db[idx].fw_paging_size,
			     idx);

		offset += mvm->fw_paging_db[idx].fw_paging_size;
	}

	/* copy the last paging block */
	if (mvm->num_of_pages_in_last_blk > 0) {
		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
		       image->sec[sec_idx].data + offset,
		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);

		IWL_DEBUG_FW(mvm,
			     "Paging: copied %d pages in the last block %d\n",
			     mvm->num_of_pages_in_last_blk, idx);
	}

	return 0;
}

static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
				   const struct fw_img *image)
{
	struct page *block;
	dma_addr_t phys = 0;
	int blk_idx = 0;
	int order, num_of_pages;
	int dma_enabled;

	if (mvm->fw_paging_db[0].fw_paging_block)
		return 0;

	dma_enabled = is_device_dma_capable(mvm->trans->dev);

	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);

	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
	mvm->num_of_paging_blk = ((num_of_pages - 1) /
				    NUM_OF_PAGE_PER_GROUP) + 1;

	mvm->num_of_pages_in_last_blk =
		num_of_pages -
		NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);

	IWL_DEBUG_FW(mvm,
		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
		     mvm->num_of_paging_blk,
		     mvm->num_of_pages_in_last_blk);

	/* allocate block of 4Kbytes for paging CSS */
	order = get_order(FW_PAGING_SIZE);
	block = alloc_pages(GFP_KERNEL, order);
	if (!block) {
		/* free all the previous pages since we failed */
		iwl_free_fw_paging(mvm);
		return -ENOMEM;
	}

	mvm->fw_paging_db[blk_idx].fw_paging_block = block;
	mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;

	if (dma_enabled) {
		phys = dma_map_page(mvm->trans->dev, block, 0,
				    PAGE_SIZE << order, DMA_BIDIRECTIONAL);
		if (dma_mapping_error(mvm->trans->dev, phys)) {
			/*
			 * free the previous pages and the current one since
			 * we failed to map_page.
			 */
			iwl_free_fw_paging(mvm);
			return -ENOMEM;
		}
		mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
263 264 265
	} else {
		mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
			blk_idx << BLOCK_2_EXP_SIZE;
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	}

	IWL_DEBUG_FW(mvm,
		     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
		     order);

	/*
	 * allocate blocks in dram.
	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
	 */
	for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
		/* allocate block of PAGING_BLOCK_SIZE (32K) */
		order = get_order(PAGING_BLOCK_SIZE);
		block = alloc_pages(GFP_KERNEL, order);
		if (!block) {
			/* free all the previous pages since we failed */
			iwl_free_fw_paging(mvm);
			return -ENOMEM;
		}

		mvm->fw_paging_db[blk_idx].fw_paging_block = block;
		mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;

		if (dma_enabled) {
			phys = dma_map_page(mvm->trans->dev, block, 0,
					    PAGE_SIZE << order,
					    DMA_BIDIRECTIONAL);
			if (dma_mapping_error(mvm->trans->dev, phys)) {
				/*
				 * free the previous pages and the current one
				 * since we failed to map_page.
				 */
				iwl_free_fw_paging(mvm);
				return -ENOMEM;
			}
			mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
302 303 304 305
		} else {
			mvm->fw_paging_db[blk_idx].fw_paging_phys =
				PAGING_ADDR_SIG |
				blk_idx << BLOCK_2_EXP_SIZE;
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
		}

		IWL_DEBUG_FW(mvm,
			     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
			     order);
	}

	return 0;
}

static int iwl_save_fw_paging(struct iwl_mvm *mvm,
			      const struct fw_img *fw)
{
	int ret;

	ret = iwl_alloc_fw_paging_mem(mvm, fw);
	if (ret)
		return ret;

	return iwl_fill_paging_mem(mvm, fw);
}

/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
	int blk_idx;
	__le32 dev_phy_addr;
	struct iwl_fw_paging_cmd fw_paging_cmd = {
		.flags =
			cpu_to_le32(PAGING_CMD_IS_SECURED |
				    PAGING_CMD_IS_ENABLED |
				    (mvm->num_of_pages_in_last_blk <<
				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
	};

	/* loop for for all paging blocks + CSS block */
	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
		dev_phy_addr =
			cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
				    PAGE_2_EXP_SIZE);
		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
	}

	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
						    IWL_ALWAYS_LONG_GROUP, 0),
				    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*
 * Send paging item cmd to FW in case CPU2 has paging image
 */
static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
{
	int ret;
	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
	};

	struct iwl_fw_get_item_resp *item_resp;
	struct iwl_host_cmd cmd = {
		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
		.data = { &fw_get_item_cmd, },
	};

	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret) {
		IWL_ERR(mvm,
			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
			ret);
		return ret;
	}

	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
		IWL_ERR(mvm,
			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
			le32_to_cpu(item_resp->item_id));
		ret = -EIO;
		goto exit;
	}

	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
						  GFP_KERNEL);
	if (!mvm->trans->paging_download_buf) {
		ret = -ENOMEM;
		goto exit;
	}
	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
	mvm->trans->paging_db = mvm->fw_paging_db;
	IWL_DEBUG_FW(mvm,
		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
		     mvm->trans->paging_req_addr);

exit:
	iwl_free_resp(&cmd);

	return ret;
}

J
Johannes Berg 已提交
410 411 412 413 414 415
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
			 struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_mvm *mvm =
		container_of(notif_wait, struct iwl_mvm, notif_wait);
	struct iwl_mvm_alive_data *alive_data = data;
416
	struct mvm_alive_resp_ver1 *palive1;
417
	struct mvm_alive_resp_ver2 *palive2;
418
	struct mvm_alive_resp *palive;
419

420 421
	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
		palive1 = (void *)pkt->data;
422 423 424

		mvm->support_umac_log = false;
		mvm->error_event_table =
425 426 427 428
			le32_to_cpu(palive1->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive1->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
429

430
		alive_data->valid = le16_to_cpu(palive1->status) ==
431 432 433
				    IWL_ALIVE_STATUS_OK;
		IWL_DEBUG_FW(mvm,
			     "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
434 435 436
			     le16_to_cpu(palive1->status), palive1->ver_type,
			     palive1->ver_subtype, palive1->flags);
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
437 438 439 440 441 442 443 444 445
		palive2 = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive2->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive2->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive2->error_info_addr);
446 447
		mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
448 449 450

		alive_data->valid = le16_to_cpu(palive2->status) ==
				    IWL_ALIVE_STATUS_OK;
451 452 453
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

454 455 456 457 458 459 460 461
		IWL_DEBUG_FW(mvm,
			     "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive2->status), palive2->ver_type,
			     palive2->ver_subtype, palive2->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     palive2->umac_major, palive2->umac_minor);
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
		palive = (void *)pkt->data;

		mvm->error_event_table =
			le32_to_cpu(palive->error_event_table_ptr);
		mvm->log_event_table =
			le32_to_cpu(palive->log_event_table_ptr);
		alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
		mvm->umac_error_event_table =
			le32_to_cpu(palive->error_info_addr);
		mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
		mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);

		alive_data->valid = le16_to_cpu(palive->status) ==
				    IWL_ALIVE_STATUS_OK;
		if (mvm->umac_error_event_table)
			mvm->support_umac_log = true;

		IWL_DEBUG_FW(mvm,
			     "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
			     le16_to_cpu(palive->status), palive->ver_type,
			     palive->ver_subtype, palive->flags);

		IWL_DEBUG_FW(mvm,
			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
			     le32_to_cpu(palive->umac_major),
			     le32_to_cpu(palive->umac_minor));
489
	}
J
Johannes Berg 已提交
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516

	return true;
}

static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
				  struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_phy_db *phy_db = data;

	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
		return true;
	}

	WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));

	return false;
}

static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
	struct iwl_mvm_alive_data alive_data;
	const struct fw_img *fw;
	int ret, i;
	enum iwl_ucode_type old_type = mvm->cur_ucode;
517
	static const u16 alive_cmd[] = { MVM_ALIVE };
518
	struct iwl_sf_region st_fwrd_space;
J
Johannes Berg 已提交
519

520
	if (ucode_type == IWL_UCODE_REGULAR &&
521
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
522 523 524
		fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
	else
		fw = iwl_get_ucode_image(mvm, ucode_type);
525
	if (WARN_ON(!fw))
J
Johannes Berg 已提交
526
		return -EINVAL;
527 528
	mvm->cur_ucode = ucode_type;
	mvm->ucode_loaded = false;
J
Johannes Berg 已提交
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
	if (ret) {
		mvm->cur_ucode = old_type;
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
548 549 550 551 552
		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
J
Johannes Berg 已提交
553 554 555 556 557 558 559 560 561 562
		mvm->cur_ucode = old_type;
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
		mvm->cur_ucode = old_type;
		return -EIO;
	}

563 564 565 566 567 568 569
	/*
	 * update the sdio allocation according to the pointer we get in the
	 * alive notification.
	 */
	st_fwrd_space.addr = mvm->sf_space.addr;
	st_fwrd_space.size = mvm->sf_space.size;
	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
570 571 572 573
	if (ret) {
		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
		return ret;
	}
574

J
Johannes Berg 已提交
575 576
	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

577 578 579 580 581 582
	/*
	 * configure and operate fw paging mechanism.
	 * driver configures the paging flow only once, CPU2 paging image
	 * included in the IWL_UCODE_INIT image.
	 */
	if (fw->paging_mem_size) {
583 584 585 586 587 588 589 590 591 592 593 594 595 596
		/*
		 * When dma is not enabled, the driver needs to copy / write
		 * the downloaded / uploaded page to / from the smem.
		 * This gets the location of the place were the pages are
		 * stored.
		 */
		if (!is_device_dma_capable(mvm->trans->dev)) {
			ret = iwl_trans_get_paging_item(mvm);
			if (ret) {
				IWL_ERR(mvm, "failed to get FW paging item\n");
				return ret;
			}
		}

597 598 599 600 601 602 603 604 605 606 607 608 609 610
		ret = iwl_save_fw_paging(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to save the FW paging image\n");
			return ret;
		}

		ret = iwl_send_paging_cmd(mvm, fw);
		if (ret) {
			IWL_ERR(mvm, "failed to send the paging cmd\n");
			iwl_free_fw_paging(mvm);
			return ret;
		}
	}

J
Johannes Berg 已提交
611 612 613 614 615 616 617 618 619
	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

620 621
	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
	mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
J
Johannes Berg 已提交
622

623 624
	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
J
Johannes Berg 已提交
625 626 627 628 629 630 631 632 633 634 635 636

	mvm->ucode_loaded = true;

	return 0;
}

static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
	struct iwl_phy_cfg_cmd phy_cfg_cmd;
	enum iwl_ucode_type ucode_type = mvm->cur_ucode;

	/* Set parameters */
637
	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
J
Johannes Berg 已提交
638 639 640 641 642 643 644 645
	phy_cfg_cmd.calib_control.event_trigger =
		mvm->fw->default_calib[ucode_type].event_trigger;
	phy_cfg_cmd.calib_control.flow_trigger =
		mvm->fw->default_calib[ucode_type].flow_trigger;

	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
		       phy_cfg_cmd.phy_cfg);

E
Emmanuel Grumbach 已提交
646
	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
J
Johannes Berg 已提交
647 648 649 650 651 652
				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}

int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait calib_wait;
653
	static const u16 init_complete[] = {
J
Johannes Berg 已提交
654 655 656 657 658 659 660
		INIT_COMPLETE_NOTIF,
		CALIB_RES_NOTIF_PHY_DB
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

661
	if (WARN_ON_ONCE(mvm->calibrating))
J
Johannes Berg 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
		return 0;

	iwl_init_notification_wait(&mvm->notif_wait,
				   &calib_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_phy_db_entry,
				   mvm->phy_db);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
	if (ret) {
		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
		goto error;
	}

678
	ret = iwl_send_bt_init_conf(mvm);
679 680 681
	if (ret)
		goto error;

682
	/* Read the NVM only at driver load time, no need to do this twice */
J
Johannes Berg 已提交
683 684
	if (read_nvm) {
		/* Read nvm */
685
		ret = iwl_nvm_init(mvm, true);
J
Johannes Berg 已提交
686 687 688 689 690 691
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

692
	/* In case we read the NVM from external file, load it to the NIC */
693
	if (mvm->nvm_file_name)
694 695
		iwl_mvm_load_nvm_to_nic(mvm);

J
Johannes Berg 已提交
696 697 698
	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
	WARN_ON(ret);

699 700 701 702
	/*
	 * abort after reading the nvm in case RF Kill is on, we will complete
	 * the init seq later when RF kill will switch to off
	 */
703
	if (iwl_mvm_is_radio_hw_killed(mvm)) {
704 705 706
		IWL_DEBUG_RF_KILL(mvm,
				  "jump over all phy activities due to RF kill\n");
		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
707 708
		ret = 1;
		goto out;
709 710
	}

711 712
	mvm->calibrating = true;

713
	/* Send TX valid antennas before triggering calibrations */
714
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
715 716 717
	if (ret)
		goto error;

J
Johannes Berg 已提交
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	/*
	 * Send phy configurations command to init uCode
	 * to start the 16.0 uCode init image internal calibrations.
	 */
	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
		goto error;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the calibration complete notification.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
			MVM_UCODE_CALIB_TIMEOUT);
735

736
	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
737 738 739
		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
		ret = 1;
	}
J
Johannes Berg 已提交
740 741 742 743 744
	goto out;

error:
	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
745
	mvm->calibrating = false;
746
	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
J
Johannes Berg 已提交
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
		/* we want to debug INIT and we have no NVM - fake */
		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
					sizeof(struct ieee80211_channel) +
					sizeof(struct ieee80211_rate),
					GFP_KERNEL);
		if (!mvm->nvm_data)
			return -ENOMEM;
		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
		mvm->nvm_data->bands[0].n_channels = 1;
		mvm->nvm_data->bands[0].n_bitrates = 1;
		mvm->nvm_data->bands[0].bitrates =
			(void *)mvm->nvm_data->channels + 1;
		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
	}

	return ret;
}

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
	struct iwl_host_cmd cmd = {
		.id = SHARED_MEM_CFG,
		.flags = CMD_WANT_SKB,
		.data = { NULL, },
		.len = { 0, },
	};
	struct iwl_rx_packet *pkt;
	struct iwl_shared_mem_cfg *mem_cfg;
	u32 i;

	lockdep_assert_held(&mvm->mutex);

	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
		return;

	pkt = cmd.resp_pkt;
	mem_cfg = (void *)pkt->data;

	mvm->shared_mem_cfg.shared_mem_addr =
		le32_to_cpu(mem_cfg->shared_mem_addr);
	mvm->shared_mem_cfg.shared_mem_size =
		le32_to_cpu(mem_cfg->shared_mem_size);
	mvm->shared_mem_cfg.sample_buff_addr =
		le32_to_cpu(mem_cfg->sample_buff_addr);
	mvm->shared_mem_cfg.sample_buff_size =
		le32_to_cpu(mem_cfg->sample_buff_size);
	mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
		mvm->shared_mem_cfg.txfifo_size[i] =
			le32_to_cpu(mem_cfg->txfifo_size[i]);
	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
		mvm->shared_mem_cfg.rxfifo_size[i] =
			le32_to_cpu(mem_cfg->rxfifo_size[i]);
	mvm->shared_mem_cfg.page_buff_addr =
		le32_to_cpu(mem_cfg->page_buff_addr);
	mvm->shared_mem_cfg.page_buff_size =
		le32_to_cpu(mem_cfg->page_buff_size);
	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");

	iwl_free_resp(&cmd);
}

809 810 811 812 813 814 815 816 817 818 819 820 821
static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
{
	struct iwl_ltr_config_cmd cmd = {
		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
	};

	if (!mvm->trans->ltr_enabled)
		return 0;

	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
				    sizeof(cmd), &cmd);
}

J
Johannes Berg 已提交
822 823 824
int iwl_mvm_up(struct iwl_mvm *mvm)
{
	int ret, i;
825 826
	struct ieee80211_channel *chan;
	struct cfg80211_chan_def chandef;
J
Johannes Berg 已提交
827 828 829 830 831 832 833

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

834 835 836 837 838
	/*
	 * If we haven't completed the run of the init ucode during
	 * module loading, load init ucode now
	 * (for example, if we were in RFKILL)
	 */
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
	ret = iwl_run_init_mvm_ucode(mvm, false);
	if (ret && !iwlmvm_mod_params.init_dbg) {
		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
		/* this can't happen */
		if (WARN_ON(ret > 0))
			ret = -ERFKILL;
		goto error;
	}
	if (!iwlmvm_mod_params.init_dbg) {
		/*
		 * Stop and start the transport without entering low power
		 * mode. This will save the state of other components on the
		 * device that are triggered by the INIT firwmare (MFUART).
		 */
		_iwl_trans_stop_device(mvm->trans, false);
854
		ret = _iwl_trans_start_hw(mvm->trans, false);
855
		if (ret)
856
			goto error;
J
Johannes Berg 已提交
857 858 859 860 861 862 863 864 865 866 867
	}

	if (iwlmvm_mod_params.init_dbg)
		return 0;

	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
	if (ret) {
		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
		goto error;
	}

868
	iwl_mvm_get_shared_mem_conf(mvm);
869

870 871 872 873
	ret = iwl_mvm_sf_update(mvm, NULL, false);
	if (ret)
		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");

874
	mvm->fw_dbg_conf = FW_DBG_INVALID;
875 876 877
	/* if we have a destination, assume EARLY START */
	if (mvm->fw->dbg_dest_tlv)
		mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
878
	iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
879

880
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
J
Johannes Berg 已提交
881 882 883
	if (ret)
		goto error;

884 885 886 887
	ret = iwl_send_bt_init_conf(mvm);
	if (ret)
		goto error;

J
Johannes Berg 已提交
888 889 890 891 892 893 894 895 896 897 898 899 900
	/* Send phy db control command and then phy db calibration*/
	ret = iwl_send_phy_db_data(mvm->phy_db);
	if (ret)
		goto error;

	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret)
		goto error;

	/* init the fw <-> mac80211 STA mapping */
	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

901 902
	mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;

903 904 905
	/* reset quota debouncing buffer - 0xff will yield invalid data */
	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));

J
Johannes Berg 已提交
906 907 908 909 910
	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

911 912 913 914 915 916 917 918 919 920 921 922 923 924
	/* Add all the PHY contexts */
	chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
	for (i = 0; i < NUM_PHY_CTX; i++) {
		/*
		 * The channel used here isn't relevant as it's
		 * going to be overwritten in the other flows.
		 * For now use the first channel we have.
		 */
		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
					   &chandef, 1, 1);
		if (ret)
			goto error;
	}
J
Johannes Berg 已提交
925

926 927 928
	/* Initialize tx backoffs to the minimal possible */
	iwl_mvm_tt_tx_backoff(mvm, 0);

929
	WARN_ON(iwl_mvm_config_ltr(mvm));
E
Emmanuel Grumbach 已提交
930

931
	ret = iwl_mvm_power_update_device(mvm);
932 933 934
	if (ret)
		goto error;

935 936 937 938 939 940 941 942 943
	/*
	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
	 * anyway, so don't init MCC.
	 */
	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
		ret = iwl_mvm_init_mcc(mvm);
		if (ret)
			goto error;
	}
944

945
	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
946 947 948 949 950
		ret = iwl_mvm_config_scan(mvm);
		if (ret)
			goto error;
	}

951 952 953 954
	if (iwl_mvm_is_csum_supported(mvm) &&
	    mvm->cfg->features & NETIF_F_RXCSUM)
		iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);

955 956 957 958
	/* allow FW/transport low power modes if not during restart */
	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);

959
	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
J
Johannes Berg 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
	return 0;
 error:
	iwl_trans_stop_device(mvm->trans);
	return ret;
}

int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
{
	int ret, i;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
	if (ret) {
		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
		goto error;
	}

982
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
J
Johannes Berg 已提交
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	if (ret)
		goto error;

	/* Send phy db control command and then phy db calibration*/
	ret = iwl_send_phy_db_data(mvm->phy_db);
	if (ret)
		goto error;

	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret)
		goto error;

	/* init the fw <-> mac80211 STA mapping */
	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

	return 0;
 error:
	iwl_trans_stop_device(mvm->trans);
	return ret;
}

1010 1011
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
				 struct iwl_rx_cmd_buffer *rxb)
J
Johannes Berg 已提交
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
	u32 flags = le32_to_cpu(card_state_notif->flags);

	IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & CT_KILL_CARD_DISABLED) ?
			  "Reached" : "Not reached");
}

1024 1025
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
			     struct iwl_rx_cmd_buffer *rxb)
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;

	IWL_DEBUG_INFO(mvm,
		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
		       le32_to_cpu(mfuart_notif->installed_ver),
		       le32_to_cpu(mfuart_notif->external_ver),
		       le32_to_cpu(mfuart_notif->status),
		       le32_to_cpu(mfuart_notif->duration));
}