fw.c 37.9 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
 * Copyright(c) 2018 - 2019        Intel Corporation
J
Johannes Berg 已提交
12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * The full GNU General Public License is included in this distribution
23
 * in the file called COPYING.
J
Johannes Berg 已提交
24 25
 *
 * Contact Information:
26
 *  Intel Linux Wireless <linuxwifi@intel.com>
J
Johannes Berg 已提交
27 28 29 30
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
31
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
 * Copyright(c) 2018 - 2019       Intel Corporation
J
Johannes Berg 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
#include <net/mac80211.h>
65
#include <linux/netdevice.h>
J
Johannes Berg 已提交
66 67 68

#include "iwl-trans.h"
#include "iwl-op-mode.h"
69
#include "fw/img.h"
J
Johannes Berg 已提交
70 71 72
#include "iwl-debug.h"
#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73
#include "iwl-prph.h"
74
#include "fw/acpi.h"
J
Johannes Berg 已提交
75 76

#include "mvm.h"
77
#include "fw/dbg.h"
J
Johannes Berg 已提交
78
#include "iwl-phy-db.h"
79 80
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
J
Johannes Berg 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

#define MVM_UCODE_ALIVE_TIMEOUT	HZ
#define MVM_UCODE_CALIB_TIMEOUT	(2*HZ)

#define UCODE_VALID_OK	cpu_to_le32(0x1)

struct iwl_mvm_alive_data {
	bool valid;
	u32 scd_base_addr;
};

static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
		.valid = cpu_to_le32(valid_tx_ant),
	};

98
	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
E
Emmanuel Grumbach 已提交
99
	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
J
Johannes Berg 已提交
100 101 102
				    sizeof(tx_ant_cmd), &tx_ant_cmd);
}

103 104 105 106 107
static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
{
	int i;
	struct iwl_rss_config_cmd cmd = {
		.flags = cpu_to_le32(IWL_RSS_ENABLE),
108 109 110 111 112 113
		.hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
			     BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
			     BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
			     BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
			     BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
			     BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
114 115
	};

116 117 118
	if (mvm->trans->num_rx_queues == 1)
		return 0;

119
	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
120
	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
121 122 123
		cmd.indirection_table[i] =
			1 + (i % (mvm->trans->num_rx_queues - 1));
	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
124 125 126 127

	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}

128 129
static int iwl_configure_rxq(struct iwl_mvm *mvm)
{
130
	int i, num_queues, size, ret;
131
	struct iwl_rfh_queue_config *cmd;
132 133 134 135
	struct iwl_host_cmd hcmd = {
		.id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
	};
136 137 138 139

	/* Do not configure default queue, it is configured via context info */
	num_queues = mvm->trans->num_rx_queues - 1;

140
	size = struct_size(cmd, data, num_queues);
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

	cmd = kzalloc(size, GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	cmd->num_queues = num_queues;

	for (i = 0; i < num_queues; i++) {
		struct iwl_trans_rxq_dma_data data;

		cmd->data[i].q_num = i + 1;
		iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);

		cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
		cmd->data[i].urbd_stts_wrptr =
			cpu_to_le64(data.urbd_stts_wrptr);
		cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
		cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
	}

161 162 163 164 165 166 167 168
	hcmd.data[0] = cmd;
	hcmd.len[0] = size;

	ret = iwl_mvm_send_cmd(mvm, &hcmd);

	kfree(cmd);

	return ret;
169 170
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
	struct iwl_dqa_enable_cmd dqa_cmd = {
		.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
	};
	u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
	int ret;

	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
	if (ret)
		IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
	else
		IWL_DEBUG_FW(mvm, "Working in DQA mode\n");

	return ret;
}

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
				   struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
	__le32 *dump_data = mfu_dump_notif->data;
	int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
	int i;

	if (mfu_dump_notif->index_num == 0)
		IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
			 le32_to_cpu(mfu_dump_notif->assert_id));

	for (i = 0; i < n_words; i++)
		IWL_DEBUG_INFO(mvm,
			       "MFUART assert dump, dword %u: 0x%08x\n",
			       le16_to_cpu(mfu_dump_notif->index_num) *
			       n_words + i,
			       le32_to_cpu(dump_data[i]));
}

J
Johannes Berg 已提交
209 210 211 212 213 214
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
			 struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_mvm *mvm =
		container_of(notif_wait, struct iwl_mvm, notif_wait);
	struct iwl_mvm_alive_data *alive_data = data;
215
	struct mvm_alive_resp_v3 *palive3;
216
	struct mvm_alive_resp *palive;
217 218 219 220
	struct iwl_umac_alive *umac;
	struct iwl_lmac_alive *lmac1;
	struct iwl_lmac_alive *lmac2 = NULL;
	u16 status;
221
	u32 lmac_error_event_table, umac_error_event_table;
222

223 224 225 226 227 228 229 230 231 232 233 234
	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
		palive = (void *)pkt->data;
		umac = &palive->umac_data;
		lmac1 = &palive->lmac_data[0];
		lmac2 = &palive->lmac_data[1];
		status = le16_to_cpu(palive->status);
	} else {
		palive3 = (void *)pkt->data;
		umac = &palive3->umac_data;
		lmac1 = &palive3->lmac_data;
		status = le16_to_cpu(palive3->status);
	}
235

236 237 238 239
	lmac_error_event_table =
		le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
	iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);

240
	if (lmac2)
241 242
		mvm->trans->lmac_error_event_table[1] =
			le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
243

244
	umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
245

246 247 248 249 250 251
	if (!umac_error_event_table) {
		mvm->support_umac_log = false;
	} else if (umac_error_event_table >=
		   mvm->trans->cfg->min_umac_error_event_table) {
		mvm->support_umac_log = true;
	} else {
252 253
		IWL_ERR(mvm,
			"Not valid error log pointer 0x%08X for %s uCode\n",
254
			umac_error_event_table,
255 256
			(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
			"Init" : "RT");
257 258
		mvm->support_umac_log = false;
	}
259

260 261 262 263 264
	if (mvm->support_umac_log)
		iwl_fw_umac_set_alive_err_table(mvm->trans,
						umac_error_event_table);

	alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
265
	alive_data->valid = status == IWL_ALIVE_STATUS_OK;
266

267 268 269
	IWL_DEBUG_FW(mvm,
		     "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
		     status, lmac1->ver_type, lmac1->ver_subtype);
270

271 272
	if (lmac2)
		IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
273

274 275 276 277
	IWL_DEBUG_FW(mvm,
		     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
		     le32_to_cpu(umac->umac_major),
		     le32_to_cpu(umac->umac_minor));
J
Johannes Berg 已提交
278

279 280
	iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);

J
Johannes Berg 已提交
281 282 283
	return true;
}

284 285 286 287 288 289 290 291
static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
				   struct iwl_rx_packet *pkt, void *data)
{
	WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);

	return true;
}

J
Johannes Berg 已提交
292 293 294 295 296 297 298 299 300 301
static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
				  struct iwl_rx_packet *pkt, void *data)
{
	struct iwl_phy_db *phy_db = data;

	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
		return true;
	}

302
	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
J
Johannes Berg 已提交
303 304 305 306 307 308 309 310

	return false;
}

static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
					 enum iwl_ucode_type ucode_type)
{
	struct iwl_notification_wait alive_wait;
311
	struct iwl_mvm_alive_data alive_data = {};
J
Johannes Berg 已提交
312
	const struct fw_img *fw;
313
	int ret;
314
	enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
315
	static const u16 alive_cmd[] = { MVM_ALIVE };
316 317
	bool run_in_rfkill =
		ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
J
Johannes Berg 已提交
318

319
	if (ucode_type == IWL_UCODE_REGULAR &&
320 321 322
	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
	    !(fw_has_capa(&mvm->fw->ucode_capa,
			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
323
		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
324
	else
325
		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
326
	if (WARN_ON(!fw))
J
Johannes Berg 已提交
327
		return -EINVAL;
328
	iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
329
	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
J
Johannes Berg 已提交
330 331 332 333 334

	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
				   alive_cmd, ARRAY_SIZE(alive_cmd),
				   iwl_alive_fn, &alive_data);

335 336 337 338 339 340
	/*
	 * We want to load the INIT firmware even in RFKILL
	 * For the unified firmware case, the ucode_type is not
	 * INIT, but we still need to run it.
	 */
	ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
J
Johannes Berg 已提交
341
	if (ret) {
342
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
J
Johannes Berg 已提交
343 344 345 346 347 348 349 350 351 352 353
		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
		return ret;
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the ALIVE notification here.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret) {
354 355
		struct iwl_trans *trans = mvm->trans;

356
		if (ret == -ETIMEDOUT)
357 358
			iwl_fw_dbg_error_collect(&mvm->fwrt,
						 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
359

360
		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
361 362
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
363 364 365
				iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
				iwl_read_umac_prph(trans,
						   UMAG_SB_CPU_2_STATUS));
366
		else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
367 368
			IWL_ERR(mvm,
				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
369 370
				iwl_read_prph(trans, SB_CPU_1_STATUS),
				iwl_read_prph(trans, SB_CPU_2_STATUS));
371
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
J
Johannes Berg 已提交
372 373 374 375 376
		return ret;
	}

	if (!alive_data.valid) {
		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
377
		iwl_fw_set_current_image(&mvm->fwrt, old_type);
J
Johannes Berg 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390 391
		return -EIO;
	}

	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);

	/*
	 * Note: all the queues are enabled as part of the interface
	 * initialization, but in firmware restart scenarios they
	 * could be stopped, so wake them up. In firmware restart,
	 * mac80211 will have the queues stopped as well until the
	 * reconfiguration completes. During normal startup, they
	 * will be empty.
	 */

392
	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
393 394 395 396 397 398 399 400
	/*
	 * Set a 'fake' TID for the command queue, since we use the
	 * hweight() of the tid_bitmap as a refcount now. Not that
	 * we ever even consider the command queue as one we might
	 * want to reuse, but be safe nevertheless.
	 */
	mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
		BIT(IWL_MAX_TID_COUNT + 2);
J
Johannes Berg 已提交
401

402
	set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
403 404 405
#ifdef CONFIG_IWLWIFI_DEBUGFS
	iwl_fw_set_dbg_rec_on(&mvm->fwrt);
#endif
J
Johannes Berg 已提交
406 407 408 409

	return 0;
}

410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait init_wait;
	struct iwl_nvm_access_complete_cmd nvm_complete = {};
	struct iwl_init_extended_cfg_cmd init_cfg = {
		.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
	};
	static const u16 init_complete[] = {
		INIT_COMPLETE_NOTIF,
	};
	int ret;

	lockdep_assert_held(&mvm->mutex);

	iwl_init_notification_wait(&mvm->notif_wait,
				   &init_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_init_complete,
				   NULL);

431 432
	iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);

433 434 435 436 437 438
	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
	if (ret) {
		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
		goto error;
	}
439
	iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
440 441 442 443 444

	/* Send init config command to mark that we are sending NVM access
	 * commands
	 */
	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
445 446
						INIT_EXTENDED_CFG_CMD),
				   CMD_SEND_IN_RFKILL,
447 448 449 450 451 452 453
				   sizeof(init_cfg), &init_cfg);
	if (ret) {
		IWL_ERR(mvm, "Failed to run init config command: %d\n",
			ret);
		goto error;
	}

454 455
	/* Load NVM to NIC if needed */
	if (mvm->nvm_file_name) {
456 457
		iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
				      mvm->nvm_sections);
458
		iwl_mvm_load_nvm_to_nic(mvm);
459
	}
460

461
	if (IWL_MVM_PARSE_NVM && read_nvm) {
462
		ret = iwl_nvm_init(mvm);
463 464 465 466 467 468
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			goto error;
		}
	}

469
	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
470 471
						NVM_ACCESS_COMPLETE),
				   CMD_SEND_IN_RFKILL,
472 473 474 475 476 477 478 479
				   sizeof(nvm_complete), &nvm_complete);
	if (ret) {
		IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
			ret);
		goto error;
	}

	/* We wait for the INIT complete notification */
480 481 482 483 484 485
	ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
				    MVM_UCODE_ALIVE_TIMEOUT);
	if (ret)
		return ret;

	/* Read the NVM only at driver load time, no need to do this twice */
486
	if (!IWL_MVM_PARSE_NVM && read_nvm) {
S
Shaul Triebitz 已提交
487
		mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
488 489 490
		if (IS_ERR(mvm->nvm_data)) {
			ret = PTR_ERR(mvm->nvm_data);
			mvm->nvm_data = NULL;
491 492 493 494 495
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
			return ret;
		}
	}

496 497
	mvm->rfkill_safe_init_done = true;

498
	return 0;
499 500 501 502 503 504

error:
	iwl_remove_notification(&mvm->notif_wait, &init_wait);
	return ret;
}

J
Johannes Berg 已提交
505 506 507
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
	struct iwl_phy_cfg_cmd phy_cfg_cmd;
508
	enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
J
Johannes Berg 已提交
509 510

	/* Set parameters */
511
	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
512 513 514 515

	/* set flags extra PHY configuration flags from the device's cfg */
	phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);

J
Johannes Berg 已提交
516 517 518 519 520 521 522 523
	phy_cfg_cmd.calib_control.event_trigger =
		mvm->fw->default_calib[ucode_type].event_trigger;
	phy_cfg_cmd.calib_control.flow_trigger =
		mvm->fw->default_calib[ucode_type].flow_trigger;

	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
		       phy_cfg_cmd.phy_cfg);

E
Emmanuel Grumbach 已提交
524
	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
J
Johannes Berg 已提交
525 526 527 528 529 530
				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}

int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
	struct iwl_notification_wait calib_wait;
531
	static const u16 init_complete[] = {
J
Johannes Berg 已提交
532 533 534 535 536
		INIT_COMPLETE_NOTIF,
		CALIB_RES_NOTIF_PHY_DB
	};
	int ret;

537
	if (iwl_mvm_has_unified_ucode(mvm))
538 539
		return iwl_run_unified_mvm_ucode(mvm, true);

J
Johannes Berg 已提交
540 541
	lockdep_assert_held(&mvm->mutex);

542
	if (WARN_ON_ONCE(mvm->rfkill_safe_init_done))
J
Johannes Berg 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555
		return 0;

	iwl_init_notification_wait(&mvm->notif_wait,
				   &calib_wait,
				   init_complete,
				   ARRAY_SIZE(init_complete),
				   iwl_wait_phy_db_entry,
				   mvm->phy_db);

	/* Will also start the device */
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
	if (ret) {
		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
556
		goto remove_notif;
J
Johannes Berg 已提交
557 558
	}

559 560 561
	if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
		ret = iwl_mvm_send_bt_init_conf(mvm);
		if (ret)
562
			goto remove_notif;
563
	}
564

565
	/* Read the NVM only at driver load time, no need to do this twice */
J
Johannes Berg 已提交
566
	if (read_nvm) {
567
		ret = iwl_nvm_init(mvm);
J
Johannes Berg 已提交
568 569
		if (ret) {
			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
570
			goto remove_notif;
J
Johannes Berg 已提交
571 572 573
		}
	}

574
	/* In case we read the NVM from external file, load it to the NIC */
575
	if (mvm->nvm_file_name)
576 577
		iwl_mvm_load_nvm_to_nic(mvm);

578 579 580
	WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
		  "Too old NVM version (0x%0x, required = 0x%0x)",
		  mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
J
Johannes Berg 已提交
581

582 583 584 585
	/*
	 * abort after reading the nvm in case RF Kill is on, we will complete
	 * the init seq later when RF kill will switch to off
	 */
586
	if (iwl_mvm_is_radio_hw_killed(mvm)) {
587 588
		IWL_DEBUG_RF_KILL(mvm,
				  "jump over all phy activities due to RF kill\n");
589
		goto remove_notif;
590 591
	}

592
	mvm->rfkill_safe_init_done = true;
593

594
	/* Send TX valid antennas before triggering calibrations */
595
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
596
	if (ret)
597
		goto remove_notif;
598

J
Johannes Berg 已提交
599 600 601 602
	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret) {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
603
		goto remove_notif;
J
Johannes Berg 已提交
604 605 606 607 608 609 610
	}

	/*
	 * Some things may run in the background now, but we
	 * just wait for the calibration complete notification.
	 */
	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
611 612 613
				    MVM_UCODE_CALIB_TIMEOUT);
	if (!ret)
		goto out;
614

615
	if (iwl_mvm_is_radio_hw_killed(mvm)) {
616
		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
617 618 619 620
		ret = 0;
	} else {
		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
			ret);
621
	}
622

J
Johannes Berg 已提交
623 624
	goto out;

625
remove_notif:
J
Johannes Berg 已提交
626 627
	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
628
	mvm->rfkill_safe_init_done = false;
629
	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
J
Johannes Berg 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
		/* we want to debug INIT and we have no NVM - fake */
		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
					sizeof(struct ieee80211_channel) +
					sizeof(struct ieee80211_rate),
					GFP_KERNEL);
		if (!mvm->nvm_data)
			return -ENOMEM;
		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
		mvm->nvm_data->bands[0].n_channels = 1;
		mvm->nvm_data->bands[0].n_bitrates = 1;
		mvm->nvm_data->bands[0].bitrates =
			(void *)mvm->nvm_data->channels + 1;
		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
	}

	return ret;
}

648 649 650 651 652 653 654 655 656 657 658 659 660
static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
{
	struct iwl_ltr_config_cmd cmd = {
		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
	};

	if (!mvm->trans->ltr_enabled)
		return 0;

	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
				    sizeof(cmd), &cmd);
}

661
#ifdef CONFIG_ACPI
662 663 664 665
static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
					  union acpi_object *table,
					  struct iwl_mvm_sar_profile *profile,
					  bool enabled)
666 667
{
	int i;
668

669
	profile->enabled = enabled;
670

671
	for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
672 673 674 675 676 677 678 679 680 681 682
		if ((table[i].type != ACPI_TYPE_INTEGER) ||
		    (table[i].integer.value > U8_MAX))
			return -EINVAL;

		profile->table[i] = table[i].integer.value;
	}

	return 0;
}

static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
683
{
684
	union acpi_object *wifi_pkg, *table, *data;
685
	bool enabled;
686
	int ret, tbl_rev;
687

688 689 690
	data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
	if (IS_ERR(data))
		return PTR_ERR(data);
691

692
	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
693 694
					 ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
	if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
695 696 697 698 699 700 701 702 703 704
		ret = PTR_ERR(wifi_pkg);
		goto out_free;
	}

	if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
		ret = -EINVAL;
		goto out_free;
	}

	enabled = !!(wifi_pkg->package.elements[1].integer.value);
705

706 707 708 709 710 711 712 713 714
	/* position of the actual table */
	table = &wifi_pkg->package.elements[2];

	/* The profile from WRDS is officially profile 1, but goes
	 * into sar_profiles[0] (because we don't have a profile 0).
	 */
	ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
				      enabled);
out_free:
715
	kfree(data);
716 717 718
	return ret;
}

719 720
static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
{
721
	union acpi_object *wifi_pkg, *data;
722
	bool enabled;
723
	int i, n_profiles, ret, tbl_rev;
724

725 726 727
	data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
	if (IS_ERR(data))
		return PTR_ERR(data);
728

729
	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
730 731
					 ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
	if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
732 733 734 735 736 737 738 739 740 741 742 743 744
		ret = PTR_ERR(wifi_pkg);
		goto out_free;
	}

	if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
	    (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
		ret = -EINVAL;
		goto out_free;
	}

	enabled = !!(wifi_pkg->package.elements[1].integer.value);
	n_profiles = wifi_pkg->package.elements[2].integer.value;

745 746 747 748 749 750
	/*
	 * Check the validity of n_profiles.  The EWRD profiles start
	 * from index 1, so the maximum value allowed here is
	 * ACPI_SAR_PROFILES_NUM - 1.
	 */
	if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
751 752 753 754
		ret = -EINVAL;
		goto out_free;
	}

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
	for (i = 0; i < n_profiles; i++) {
		/* the tables start at element 3 */
		static int pos = 3;

		/* The EWRD profiles officially go from 2 to 4, but we
		 * save them in sar_profiles[1-3] (because we don't
		 * have profile 0).  So in the array we start from 1.
		 */
		ret = iwl_mvm_sar_set_profile(mvm,
					      &wifi_pkg->package.elements[pos],
					      &mvm->sar_profiles[i + 1],
					      enabled);
		if (ret < 0)
			break;

		/* go to the next table */
771
		pos += ACPI_SAR_TABLE_SIZE;
772 773 774
	}

out_free:
775
	kfree(data);
776 777 778
	return ret;
}

H
Haim Dreyfuss 已提交
779
static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
780
{
781
	union acpi_object *wifi_pkg, *data;
782
	int i, j, ret, tbl_rev;
H
Haim Dreyfuss 已提交
783
	int idx = 1;
784

785 786 787
	data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
	if (IS_ERR(data))
		return PTR_ERR(data);
788

789
	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
790 791
					 ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
	if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
792 793 794 795
		ret = PTR_ERR(wifi_pkg);
		goto out_free;
	}

796
	mvm->geo_rev = tbl_rev;
797 798
	for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
		for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
H
Haim Dreyfuss 已提交
799
			union acpi_object *entry;
800

H
Haim Dreyfuss 已提交
801 802
			entry = &wifi_pkg->package.elements[idx++];
			if ((entry->type != ACPI_TYPE_INTEGER) ||
803 804 805 806
			    (entry->integer.value > U8_MAX)) {
				ret = -EINVAL;
				goto out_free;
			}
807

H
Haim Dreyfuss 已提交
808 809
			mvm->geo_profiles[i].values[j] = entry->integer.value;
		}
810 811 812
	}
	ret = 0;
out_free:
813
	kfree(data);
814 815 816
	return ret;
}

817
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
818
{
819 820 821 822
	union {
		struct iwl_dev_tx_power_cmd v5;
		struct iwl_dev_tx_power_cmd_v4 v4;
	} cmd;
823
	int i, j, idx;
824
	int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
825
	int len;
826

827 828 829
	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
		     ACPI_SAR_TABLE_SIZE);
830

831 832 833 834 835 836 837 838 839 840
	cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);

	if (fw_has_api(&mvm->fw->ucode_capa,
		       IWL_UCODE_TLV_API_REDUCE_TX_POWER))
		len = sizeof(cmd.v5);
	else if (fw_has_capa(&mvm->fw->ucode_capa,
			     IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
		len = sizeof(cmd.v4);
	else
		len = sizeof(cmd.v4.v3);
841

842
	for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
843
		struct iwl_mvm_sar_profile *prof;
844

845 846 847
		/* don't allow SAR to be disabled (profile 0 means disable) */
		if (profs[i] == 0)
			return -EPERM;
848

849 850
		/* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
		if (profs[i] > ACPI_SAR_PROFILE_NUM)
851
			return -EINVAL;
852

853 854 855 856 857 858 859 860 861 862
		/* profiles go from 1 to 4, so decrement to access the array */
		prof = &mvm->sar_profiles[profs[i] - 1];

		/* if the profile is disabled, do nothing */
		if (!prof->enabled) {
			IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
					profs[i]);
			/* if one of the profiles is disabled, we fail all */
			return -ENOENT;
		}
863

864 865 866
		IWL_DEBUG_INFO(mvm,
			       "SAR EWRD: chain %d profile index %d\n",
			       i, profs[i]);
867
		IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
868 869
		for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
			idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
870
			cmd.v5.v3.per_chain_restriction[i][j] =
871
				cpu_to_le16(prof->table[idx]);
872
			IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
873
					j, prof->table[idx]);
874 875 876
		}
	}

877 878 879 880 881
	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");

	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}

H
Haim Dreyfuss 已提交
882 883 884 885
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
	struct iwl_geo_tx_power_profiles_resp *resp;
	int ret;
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	u16 len;
	void *data;
	struct iwl_geo_tx_power_profiles_cmd geo_cmd;
	struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
	struct iwl_host_cmd cmd;

	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
		geo_cmd.ops =
			cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
		len = sizeof(geo_cmd);
		data = &geo_cmd;
	} else {
		geo_cmd_v1.ops =
			cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
		len = sizeof(geo_cmd_v1);
		data = &geo_cmd_v1;
	}
H
Haim Dreyfuss 已提交
903

904
	cmd = (struct iwl_host_cmd){
H
Haim Dreyfuss 已提交
905
		.id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
906
		.len = { len, },
H
Haim Dreyfuss 已提交
907
		.flags = CMD_WANT_SKB,
908
		.data = { data },
H
Haim Dreyfuss 已提交
909 910 911 912 913 914 915 916 917 918
	};

	ret = iwl_mvm_send_cmd(mvm, &cmd);
	if (ret) {
		IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
		return ret;
	}

	resp = (void *)cmd.resp_pkt->data;
	ret = le32_to_cpu(resp->profile_idx);
919
	if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
H
Haim Dreyfuss 已提交
920 921 922 923 924 925 926 927
		ret = -EIO;
		IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
	}

	iwl_free_resp(&cmd);
	return ret;
}

928 929 930 931 932
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
	struct iwl_geo_tx_power_profiles_cmd cmd = {
		.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
	};
H
Haim Dreyfuss 已提交
933
	int ret, i, j;
934 935
	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);

936 937 938 939 940 941 942 943 944
	/*
	 * This command is not supported on earlier firmware versions.
	 * Unfortunately, we don't have a TLV API flag to rely on, so
	 * rely on the major version which is in the first byte of
	 * ucode_ver.
	 */
	if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
		return 0;

H
Haim Dreyfuss 已提交
945
	ret = iwl_mvm_sar_get_wgds_table(mvm);
946 947 948 949 950 951 952 953 954 955
	if (ret < 0) {
		IWL_DEBUG_RADIO(mvm,
				"Geo SAR BIOS table invalid or unavailable. (%d)\n",
				ret);
		/* we don't fail if the table is not available */
		return 0;
	}

	IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");

956
	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
M
Matt Chen 已提交
957
		     ACPI_WGDS_TABLE_SIZE + 1 !=  ACPI_WGDS_WIFI_DATA_SIZE);
958

959 960 961
	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);

	for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
962 963 964 965 966 967
		struct iwl_per_chain_offset *chain =
			(struct iwl_per_chain_offset *)&cmd.table[i];

		for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
			u8 *value;

H
Haim Dreyfuss 已提交
968
			value = &mvm->geo_profiles[i].values[j *
969
				ACPI_GEO_PER_CHAIN_SIZE];
970 971 972 973 974 975 976 977
			chain[j].max_tx_power = cpu_to_le16(value[0]);
			chain[j].chain_a = value[1];
			chain[j].chain_b = value[2];
			IWL_DEBUG_RADIO(mvm,
					"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
					i, j, value[1], value[2], value[0]);
		}
	}
978 979 980 981 982 983 984 985 986 987

	cmd.table_revision = cpu_to_le32(mvm->geo_rev);

	if (!fw_has_api(&mvm->fw->ucode_capa,
		       IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
		return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
				sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
				&cmd);
	}

988 989 990
	return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
}

991 992 993 994 995 996 997 998 999 1000
#else /* CONFIG_ACPI */
static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
	return -ENOENT;
}

static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
{
	return -ENOENT;
}
1001

1002 1003 1004 1005 1006
static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
{
	return -ENOENT;
}

1007 1008 1009 1010
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
	return 0;
}
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
			       int prof_b)
{
	return -ENOENT;
}

int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
	return -ENOENT;
}
1022 1023
#endif /* CONFIG_ACPI */

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
{
	u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
	int ret;
	u32 resp;

	struct iwl_fw_error_recovery_cmd recovery_cmd = {
		.flags = cpu_to_le32(flags),
		.buf_size = 0,
	};
	struct iwl_host_cmd host_cmd = {
		.id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
		.flags = CMD_WANT_SKB,
		.data = {&recovery_cmd, },
		.len = {sizeof(recovery_cmd), },
	};

	/* no error log was defined in TLV */
	if (!error_log_size)
		return;

	if (flags & ERROR_RECOVERY_UPDATE_DB) {
		/* no buf was allocated while HW reset */
		if (!mvm->error_recovery_buf)
			return;

		host_cmd.data[1] = mvm->error_recovery_buf;
		host_cmd.len[1] =  error_log_size;
		host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
		recovery_cmd.buf_size = cpu_to_le32(error_log_size);
	}

	ret = iwl_mvm_send_cmd(mvm, &host_cmd);
	kfree(mvm->error_recovery_buf);
	mvm->error_recovery_buf = NULL;

	if (ret) {
		IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
		return;
	}

	/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
	if (flags & ERROR_RECOVERY_UPDATE_DB) {
		resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
		if (resp)
			IWL_ERR(mvm,
				"Failed to send recovery cmd blob was invalid %d\n",
				resp);
	}
}

1075 1076 1077 1078 1079 1080 1081
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
	int ret;

	ret = iwl_mvm_sar_get_wrds_table(mvm);
	if (ret < 0) {
		IWL_DEBUG_RADIO(mvm,
1082
				"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1083
				ret);
1084 1085 1086 1087 1088
		/*
		 * If not available, don't fail and don't bother with EWRD.
		 * Return 1 to tell that we can't use WGDS either.
		 */
		return 1;
1089 1090
	}

1091 1092 1093 1094 1095 1096 1097
	ret = iwl_mvm_sar_get_ewrd_table(mvm);
	/* if EWRD is not available, we can still use WRDS, so don't fail */
	if (ret < 0)
		IWL_DEBUG_RADIO(mvm,
				"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
				ret);

1098 1099 1100
	/* choose profile 1 (WRDS) as default for both chains */
	ret = iwl_mvm_sar_select_profile(mvm, 1, 1);

1101 1102 1103 1104 1105
	/*
	 * If we don't have profile 0 from BIOS, just skip it.  This
	 * means that SAR Geo will not be enabled either, even if we
	 * have other valid profiles.
	 */
1106
	if (ret == -ENOENT)
1107
		return 1;
1108 1109 1110 1111

	return ret;
}

1112
static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
J
Johannes Berg 已提交
1113
{
1114
	int ret;
J
Johannes Berg 已提交
1115

1116
	if (iwl_mvm_has_unified_ucode(mvm))
1117
		return iwl_run_unified_mvm_ucode(mvm, false);
J
Johannes Berg 已提交
1118

1119
	ret = iwl_run_init_mvm_ucode(mvm, false);
1120 1121

	if (ret) {
1122
		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1123 1124 1125

		if (iwlmvm_mod_params.init_dbg)
			return 0;
1126
		return ret;
1127
	}
J
Johannes Berg 已提交
1128

1129 1130 1131 1132 1133 1134 1135 1136
	/*
	 * Stop and start the transport without entering low power
	 * mode. This will save the state of other components on the
	 * device that are triggered by the INIT firwmare (MFUART).
	 */
	_iwl_trans_stop_device(mvm->trans, false);
	ret = _iwl_trans_start_hw(mvm->trans, false);
	if (ret)
1137
		return ret;
J
Johannes Berg 已提交
1138

S
Sara Sharon 已提交
1139 1140
	iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);

J
Johannes Berg 已提交
1141
	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1142 1143 1144
	if (ret)
		return ret;

S
Sara Sharon 已提交
1145 1146
	iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);

1147
	return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
}

int iwl_mvm_up(struct iwl_mvm *mvm)
{
	int ret, i;
	struct ieee80211_channel *chan;
	struct cfg80211_chan_def chandef;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

	ret = iwl_mvm_load_rt_fw(mvm);
J
Johannes Berg 已提交
1163 1164
	if (ret) {
		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1165 1166 1167
		if (ret != -ERFKILL)
			iwl_fw_dbg_error_collect(&mvm->fwrt,
						 FW_DBG_TRIGGER_DRIVER);
J
Johannes Berg 已提交
1168 1169 1170
		goto error;
	}

1171
	iwl_get_shared_mem_conf(&mvm->fwrt);
1172

1173 1174 1175 1176
	ret = iwl_mvm_sf_update(mvm, NULL, false);
	if (ret)
		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");

1177 1178 1179 1180 1181 1182 1183
	if (!mvm->trans->ini_valid) {
		mvm->fwrt.dump.conf = FW_DBG_INVALID;
		/* if we have a destination, assume EARLY START */
		if (mvm->fw->dbg.dest_tlv)
			mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
		iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
	}
1184

1185
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
J
Johannes Berg 已提交
1186 1187 1188
	if (ret)
		goto error;

1189 1190
	if (!iwl_mvm_has_unified_ucode(mvm)) {
		/* Send phy db control command and then phy db calibration */
1191 1192 1193
		ret = iwl_send_phy_db_data(mvm->phy_db);
		if (ret)
			goto error;
J
Johannes Berg 已提交
1194

1195 1196 1197 1198
		ret = iwl_send_phy_cfg_cmd(mvm);
		if (ret)
			goto error;
	}
J
Johannes Berg 已提交
1199

1200 1201 1202 1203
	ret = iwl_mvm_send_bt_init_conf(mvm);
	if (ret)
		goto error;

1204
	/* Init RSS configuration */
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
		ret = iwl_configure_rxq(mvm);
		if (ret) {
			IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
				ret);
			goto error;
		}
	}

	if (iwl_mvm_has_new_rx_api(mvm)) {
1215 1216 1217 1218 1219 1220 1221 1222
		ret = iwl_send_rss_cfg_cmd(mvm);
		if (ret) {
			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
				ret);
			goto error;
		}
	}

J
Johannes Berg 已提交
1223
	/* init the fw <-> mac80211 STA mapping */
1224
	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
J
Johannes Berg 已提交
1225 1226
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

1227
	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1228

1229 1230 1231
	/* reset quota debouncing buffer - 0xff will yield invalid data */
	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));

1232 1233 1234
	ret = iwl_mvm_send_dqa_cmd(mvm);
	if (ret)
		goto error;
1235

J
Johannes Berg 已提交
1236 1237 1238 1239 1240
	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

1241
	/* Add all the PHY contexts */
1242
	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
	for (i = 0; i < NUM_PHY_CTX; i++) {
		/*
		 * The channel used here isn't relevant as it's
		 * going to be overwritten in the other flows.
		 * For now use the first channel we have.
		 */
		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
					   &chandef, 1, 1);
		if (ret)
			goto error;
	}
J
Johannes Berg 已提交
1255

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
#ifdef CONFIG_THERMAL
	if (iwl_mvm_is_tt_in_fw(mvm)) {
		/* in order to give the responsibility of ct-kill and
		 * TX backoff to FW we need to send empty temperature reporting
		 * cmd during init time
		 */
		iwl_mvm_send_temp_report_ths_cmd(mvm);
	} else {
		/* Initialize tx backoffs to the minimal possible */
		iwl_mvm_tt_tx_backoff(mvm, 0);
	}
1267 1268

	/* TODO: read the budget from BIOS / Platform NVM */
1269 1270 1271 1272 1273 1274

	/*
	 * In case there is no budget from BIOS / Platform NVM the default
	 * budget should be 2000mW (cooling state 0).
	 */
	if (iwl_mvm_is_ctdp_supported(mvm)) {
1275 1276
		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
					   mvm->cooling_dev.cur_state);
1277 1278 1279
		if (ret)
			goto error;
	}
1280
#else
1281 1282
	/* Initialize tx backoffs to the minimal possible */
	iwl_mvm_tt_tx_backoff(mvm, 0);
1283
#endif
1284

1285
	WARN_ON(iwl_mvm_config_ltr(mvm));
E
Emmanuel Grumbach 已提交
1286

1287
	ret = iwl_mvm_power_update_device(mvm);
1288 1289 1290
	if (ret)
		goto error;

1291 1292 1293 1294 1295 1296 1297 1298 1299
	/*
	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
	 * anyway, so don't init MCC.
	 */
	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
		ret = iwl_mvm_init_mcc(mvm);
		if (ret)
			goto error;
	}
1300

1301
	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1302
		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1303
		mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
1304 1305 1306 1307 1308
		ret = iwl_mvm_config_scan(mvm);
		if (ret)
			goto error;
	}

1309 1310 1311 1312
	/* allow FW/transport low power modes if not during restart */
	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);

1313 1314 1315
	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
		iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);

1316 1317 1318
	if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
		IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");

1319
	ret = iwl_mvm_sar_init(mvm);
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
	if (ret == 0) {
		ret = iwl_mvm_sar_geo_init(mvm);
	} else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
		/*
		 * If basic SAR is not available, we check for WGDS,
		 * which should *not* be available either.  If it is
		 * available, issue an error, because we can't use SAR
		 * Geo without basic SAR.
		 */
		IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
	}
1331

1332
	if (ret < 0)
1333 1334
		goto error;

1335 1336
	iwl_mvm_leds_sync(mvm);

1337
	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
J
Johannes Berg 已提交
1338 1339
	return 0;
 error:
1340
	if (!iwlmvm_mod_params.init_dbg || !ret)
1341
		iwl_mvm_stop_device(mvm);
J
Johannes Berg 已提交
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
	return ret;
}

int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
{
	int ret, i;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_trans_start_hw(mvm->trans);
	if (ret)
		return ret;

	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
	if (ret) {
		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
		goto error;
	}

1361
	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
J
Johannes Berg 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
	if (ret)
		goto error;

	/* Send phy db control command and then phy db calibration*/
	ret = iwl_send_phy_db_data(mvm->phy_db);
	if (ret)
		goto error;

	ret = iwl_send_phy_cfg_cmd(mvm);
	if (ret)
		goto error;

	/* init the fw <-> mac80211 STA mapping */
1375
	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
J
Johannes Berg 已提交
1376 1377 1378 1379 1380 1381 1382 1383 1384
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);

	/* Add auxiliary station for scanning */
	ret = iwl_mvm_add_aux_sta(mvm);
	if (ret)
		goto error;

	return 0;
 error:
1385
	iwl_mvm_stop_device(mvm);
J
Johannes Berg 已提交
1386 1387 1388
	return ret;
}

1389 1390
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
				 struct iwl_rx_cmd_buffer *rxb)
J
Johannes Berg 已提交
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
	u32 flags = le32_to_cpu(card_state_notif->flags);

	IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & CT_KILL_CARD_DISABLED) ?
			  "Reached" : "Not reached");
}

1403 1404
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
			     struct iwl_rx_cmd_buffer *rxb)
1405 1406 1407 1408
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;

1409 1410 1411 1412 1413 1414 1415
	IWL_DEBUG_INFO(mvm,
		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
		       le32_to_cpu(mfuart_notif->installed_ver),
		       le32_to_cpu(mfuart_notif->external_ver),
		       le32_to_cpu(mfuart_notif->status),
		       le32_to_cpu(mfuart_notif->duration));

1416 1417
	if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
		IWL_DEBUG_INFO(mvm,
1418
			       "MFUART: image size: 0x%08x\n",
1419
			       le32_to_cpu(mfuart_notif->image_size));
1420
}