ops.c 54.6 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
 * Copyright (C) 2016-2017 Intel Deutschland GmbH
 */
J
Johannes Berg 已提交
7
#include <linux/module.h>
8
#include <linux/vmalloc.h>
J
Johannes Berg 已提交
9 10
#include <net/mac80211.h>

11
#include "fw/notif-wait.h"
J
Johannes Berg 已提交
12 13
#include "iwl-trans.h"
#include "iwl-op-mode.h"
14
#include "fw/img.h"
J
Johannes Berg 已提交
15 16 17 18 19 20 21 22 23 24
#include "iwl-debug.h"
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-phy-db.h"
#include "iwl-eeprom-parse.h"
#include "iwl-csr.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "rs.h"
J
Johannes Berg 已提交
25
#include "fw/api/scan.h"
J
Johannes Berg 已提交
26
#include "time-event.h"
27
#include "fw-api.h"
28
#include "fw/acpi.h"
29
#include "fw/uefi.h"
J
Johannes Berg 已提交
30 31 32 33 34 35

#define DRV_DESCRIPTION	"The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");

static const struct iwl_op_mode_ops iwl_mvm_ops;
36
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
J
Johannes Berg 已提交
37 38 39 40 41 42

struct iwl_mvm_mod_params iwlmvm_mod_params = {
	.power_scheme = IWL_POWER_SCHEME_BPS,
	/* rest of fields are 0 by default */
};

43
module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
J
Johannes Berg 已提交
44 45
MODULE_PARM_DESC(init_dbg,
		 "set to true to debug an ASSERT in INIT fw (default: false");
46
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
J
Johannes Berg 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
MODULE_PARM_DESC(power_scheme,
		 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");

/*
 * module init and exit functions
 */
static int __init iwl_mvm_init(void)
{
	int ret;

	ret = iwl_mvm_rate_control_register();
	if (ret) {
		pr_err("Unable to register rate control algorithm: %d\n", ret);
		return ret;
	}

	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
64
	if (ret)
J
Johannes Berg 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
		pr_err("Unable to register MVM op_mode: %d\n", ret);

	return ret;
}
module_init(iwl_mvm_init);

static void __exit iwl_mvm_exit(void)
{
	iwl_opmode_deregister("iwlmvm");
	iwl_mvm_rate_control_unregister();
}
module_exit(iwl_mvm_exit);

static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
82
	u32 reg_val;
83 84 85 86 87 88 89 90
	u32 phy_config = iwl_mvm_get_phy_config(mvm);

	radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
			 FW_PHY_CFG_RADIO_TYPE_POS;
	radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
			 FW_PHY_CFG_RADIO_STEP_POS;
	radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
			 FW_PHY_CFG_RADIO_DASH_POS;
J
Johannes Berg 已提交
91 92

	/* SKU control */
93
	reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
J
Johannes Berg 已提交
94 95 96 97 98 99 100 101 102

	/* radio configuration */
	reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
	reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
	reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;

	WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
		 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);

103
	/*
104 105 106 107 108 109
	 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
	 * sampling, and shouldn't be set to any non-zero value.
	 * The same is supposed to be true of the other HW, but unsetting
	 * them (such as the 7260) causes automatic tests to fail on seemingly
	 * unrelated errors. Need to further investigate this, but for now
	 * we'll separate cases.
110
	 */
111
	if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
112
		reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
J
Johannes Berg 已提交
113

114
	if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
115 116
		reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;

117
	iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
118
				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH |
119 120 121 122
				CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
				CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
123 124
				CSR_HW_IF_CONFIG_REG_BIT_MAC_SI   |
				CSR_HW_IF_CONFIG_REG_D3_DEBUG,
125
				reg_val);
J
Johannes Berg 已提交
126 127 128 129 130 131 132 133 134

	IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
		       radio_cfg_step, radio_cfg_dash);

	/*
	 * W/A : NIC is stuck in a reset state after Early PCIe power off
	 * (PCIe power is lost before PERST# is asserted), causing ME FW
	 * to lose ownership and not being able to obtain it back.
	 */
135
	if (!mvm->trans->cfg->apmg_not_supported)
136 137 138
		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
J
Johannes Berg 已提交
139 140
}

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
				     struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
	struct ieee80211_supported_band *sband;
	const struct ieee80211_sta_he_cap *he_cap;
	struct ieee80211_vif *vif;

	if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
		return;

	vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
	if (!vif || vif->type != NL80211_IFTYPE_STATION)
		return;

	if (!vif->bss_conf.chandef.chan ||
	    vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
	    vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
		return;

	if (!vif->bss_conf.assoc)
		return;

	/* this shouldn't happen *again*, ignore it */
	if (mvm->cca_40mhz_workaround)
		return;

	/*
	 * We'll decrement this on disconnect - so set to 2 since we'll
	 * still have to disconnect from the current AP first.
	 */
	mvm->cca_40mhz_workaround = 2;

	/*
	 * This capability manipulation isn't really ideal, but it's the
	 * easiest choice - otherwise we'd have to do some major changes
	 * in mac80211 to support this, which isn't worth it. This does
	 * mean that userspace may have outdated information, but that's
	 * actually not an issue at all.
	 */
	sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];

	WARN_ON(!sband->ht_cap.ht_supported);
	WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
	sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;

	he_cap = ieee80211_get_he_iftype_cap(sband,
					     ieee80211_vif_type_p2p(vif));

	if (he_cap) {
		/* we know that ours is writable */
		struct ieee80211_sta_he_cap *he = (void *)he_cap;

		WARN_ON(!he->has_he);
		WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
		he->he_cap_elem.phy_cap_info[0] &=
			~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
	}

	ieee80211_disconnect(vif, true);
}

205 206 207 208
void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	struct iwl_mvm *mvm = mvmvif->mvm;
209
	enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC;
210

211 212 213 214 215 216
	if (mvm->fw_static_smps_request &&
	    vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 &&
	    vif->bss_conf.he_support)
		mode = IEEE80211_SMPS_STATIC;

	iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode);
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
}

static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
					struct ieee80211_vif *vif)
{
	iwl_mvm_apply_fw_smps_request(vif);
}

static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
					      struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;

	/*
	 * We could pass it to the iterator data, but also need to remember
	 * it for new interfaces that are added while in this state.
	 */
	mvm->fw_static_smps_request =
		req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
	ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
				     iwl_mvm_intf_dual_chain_req, NULL);
}

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
/**
 * enum iwl_rx_handler_context context for Rx handler
 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
 *	which can't acquire mvm->mutex.
 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
 *	(and only in this case!), it should be set as ASYNC. In that case,
 *	it will be called from a worker with mvm->mutex held.
 * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
 *	mutex itself, it will be called from a worker without mvm->mutex held.
 */
enum iwl_rx_handler_context {
	RX_HANDLER_SYNC,
	RX_HANDLER_ASYNC_LOCKED,
	RX_HANDLER_ASYNC_UNLOCKED,
};

/**
 * struct iwl_rx_handlers handler for FW notification
 * @cmd_id: command id
 * @context: see &iwl_rx_handler_context
 * @fn: the function is called when notification is received
 */
J
Johannes Berg 已提交
263
struct iwl_rx_handlers {
264
	u16 cmd_id, min_size;
265
	enum iwl_rx_handler_context context;
266
	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
J
Johannes Berg 已提交
267 268
};

269 270 271 272 273 274 275 276 277 278
#define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context)		\
	{ .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
#define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context)	\
	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
#define RX_HANDLER(_cmd_id, _fn, _context, _struct)		\
	{ .cmd_id = _cmd_id, .fn = _fn,				\
	  .context = _context, .min_size = sizeof(_struct), }
#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct)	\
	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn,		\
	  .context = _context, .min_size = sizeof(_struct), }
J
Johannes Berg 已提交
279 280 281 282 283 284

/*
 * Handlers for fw notifications
 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
 * This list should be in order of frequency for performance purposes.
 *
285
 * The handler can be one from three contexts, see &iwl_rx_handler_context
J
Johannes Berg 已提交
286 287
 */
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
288 289 290 291
	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
		   struct iwl_mvm_tx_resp),
	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
		   struct iwl_mvm_ba_notif),
292

293
	RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
294 295
		       iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
		       struct iwl_tlc_update_notif),
296

297
	RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
298 299 300 301 302
		   RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
	RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
			   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
			   RX_HANDLER_ASYNC_LOCKED),
303

304
	RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
305 306
		   iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
		   struct iwl_ba_window_status_notif),
307

308
	RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
309
		   RX_HANDLER_SYNC, struct iwl_time_event_notif),
310
	RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
311 312
		       iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
		       struct iwl_mvm_session_prot_notif),
313
	RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
314
		   RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
315

316 317
	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
		   struct iwl_mvm_eosp_notification),
318

319
	RX_HANDLER(SCAN_ITERATION_COMPLETE,
320 321
		   iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
		   struct iwl_lmac_scan_complete_notif),
322
	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
323
		   iwl_mvm_rx_lmac_scan_complete_notif,
324 325 326 327
		   RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
	RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
			   iwl_mvm_rx_scan_match_found,
			   RX_HANDLER_SYNC),
328
	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
329
		   RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
330
	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
331 332
		   iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
		   struct iwl_umac_scan_iter_complete_notif),
333

334
	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
335
		   RX_HANDLER_SYNC, struct iwl_card_state_notif),
J
Johannes Berg 已提交
336

337
	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
338
		   RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
339

340 341
	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
		   struct iwl_error_resp),
342
	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
343 344 345 346 347 348
		   iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
		   struct iwl_uapsd_misbehaving_ap_notif),
	RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
			   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
			       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
349
	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
350 351
		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
		       struct ct_kill_notif),
352

353
	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
354 355
		   RX_HANDLER_ASYNC_LOCKED,
		   struct iwl_tdls_channel_switch_notif),
356
	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
357
		   RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
358
	RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
359 360
		       iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
		       struct iwl_ftm_responder_stats),
361

362 363 364 365
	RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
			       iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
			       iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
366

367
	RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
368 369
		       iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
		       struct iwl_mfu_assert_dump_notif),
370
	RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
371
		       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
372
		       struct iwl_stored_beacon_notif_v2),
373
	RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
374 375
		       iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
		       struct iwl_mu_group_mgmt_notif),
376
	RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
377 378
		       iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
		       struct iwl_mvm_pm_state_notification),
379 380
	RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
		       iwl_mvm_probe_resp_data_notif,
381 382
		       RX_HANDLER_ASYNC_LOCKED,
		       struct iwl_probe_resp_data_notif),
383 384 385
	RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
		       iwl_mvm_channel_switch_start_notif,
		       RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
386 387 388
	RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
		       iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
		       struct iwl_datapath_monitor_notif),
389 390 391 392 393

	RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
		       iwl_mvm_rx_thermal_dual_chain_req,
		       RX_HANDLER_ASYNC_LOCKED,
		       struct iwl_thermal_dual_chain_request),
J
Johannes Berg 已提交
394 395
};
#undef RX_HANDLER
396
#undef RX_HANDLER_GRP
397 398 399 400 401

/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
402
	HCMD_NAME(UCODE_ALIVE_NTFY),
403 404 405 406 407 408 409 410 411
	HCMD_NAME(REPLY_ERROR),
	HCMD_NAME(ECHO_CMD),
	HCMD_NAME(INIT_COMPLETE_NOTIF),
	HCMD_NAME(PHY_CONTEXT_CMD),
	HCMD_NAME(DBG_CFG),
	HCMD_NAME(SCAN_CFG_CMD),
	HCMD_NAME(SCAN_REQ_UMAC),
	HCMD_NAME(SCAN_ABORT_UMAC),
	HCMD_NAME(SCAN_COMPLETE_UMAC),
412
	HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
	HCMD_NAME(ADD_STA_KEY),
	HCMD_NAME(ADD_STA),
	HCMD_NAME(REMOVE_STA),
	HCMD_NAME(FW_GET_ITEM_CMD),
	HCMD_NAME(TX_CMD),
	HCMD_NAME(SCD_QUEUE_CFG),
	HCMD_NAME(TXPATH_FLUSH),
	HCMD_NAME(MGMT_MCAST_KEY),
	HCMD_NAME(WEP_KEY),
	HCMD_NAME(SHARED_MEM_CFG),
	HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
	HCMD_NAME(MAC_CONTEXT_CMD),
	HCMD_NAME(TIME_EVENT_CMD),
	HCMD_NAME(TIME_EVENT_NOTIFICATION),
	HCMD_NAME(BINDING_CONTEXT_CMD),
	HCMD_NAME(TIME_QUOTA_CMD),
	HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
430
	HCMD_NAME(LEDS_CMD),
431 432 433 434 435 436 437 438 439 440
	HCMD_NAME(LQ_CMD),
	HCMD_NAME(FW_PAGING_BLOCK_CMD),
	HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
	HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
	HCMD_NAME(HOT_SPOT_CMD),
	HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
	HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
	HCMD_NAME(BT_COEX_CI),
	HCMD_NAME(PHY_CONFIGURATION_CMD),
	HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
441
	HCMD_NAME(PHY_DB_CMD),
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
	HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
	HCMD_NAME(POWER_TABLE_CMD),
	HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
	HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
	HCMD_NAME(DC2DC_CONFIG_CMD),
	HCMD_NAME(NVM_ACCESS_CMD),
	HCMD_NAME(BEACON_NOTIFICATION),
	HCMD_NAME(BEACON_TEMPLATE_CMD),
	HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
	HCMD_NAME(BT_CONFIG),
	HCMD_NAME(STATISTICS_CMD),
	HCMD_NAME(STATISTICS_NOTIFICATION),
	HCMD_NAME(EOSP_NOTIFICATION),
	HCMD_NAME(REDUCE_TX_POWER_CMD),
	HCMD_NAME(CARD_STATE_NOTIFICATION),
	HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
	HCMD_NAME(TDLS_CONFIG_CMD),
	HCMD_NAME(MAC_PM_POWER_TABLE),
	HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
	HCMD_NAME(MFUART_LOAD_NOTIFICATION),
463
	HCMD_NAME(RSS_CONFIG_CMD),
464 465 466
	HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
	HCMD_NAME(REPLY_RX_PHY_CMD),
	HCMD_NAME(REPLY_RX_MPDU_CMD),
467
	HCMD_NAME(BAR_FRAME_RELEASE),
468
	HCMD_NAME(FRAME_RELEASE),
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
	HCMD_NAME(BA_NOTIF),
	HCMD_NAME(MCC_UPDATE_CMD),
	HCMD_NAME(MCC_CHUB_UPDATE_CMD),
	HCMD_NAME(MARKER_CMD),
	HCMD_NAME(BT_PROFILE_NOTIFICATION),
	HCMD_NAME(BCAST_FILTER_CMD),
	HCMD_NAME(MCAST_FILTER_CMD),
	HCMD_NAME(REPLY_SF_CFG_CMD),
	HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
	HCMD_NAME(D3_CONFIG_CMD),
	HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
	HCMD_NAME(OFFLOADS_QUERY_CMD),
	HCMD_NAME(MATCH_FOUND_NOTIFICATION),
	HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
	HCMD_NAME(WOWLAN_PATTERNS),
	HCMD_NAME(WOWLAN_CONFIGURATION),
	HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
	HCMD_NAME(WOWLAN_TKIP_PARAM),
	HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
	HCMD_NAME(WOWLAN_GET_STATUSES),
	HCMD_NAME(SCAN_ITERATION_COMPLETE),
	HCMD_NAME(D0I3_END_CMD),
	HCMD_NAME(LTR_CONFIG),
492
	HCMD_NAME(LDBG_CONFIG_CMD),
J
Johannes Berg 已提交
493
};
494

495 496 497 498 499
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
	HCMD_NAME(SHARED_MEM_CFG_CMD),
500
	HCMD_NAME(INIT_EXTENDED_CFG_CMD),
501
	HCMD_NAME(FW_ERROR_RECOVERY_CMD),
502 503
	HCMD_NAME(RFI_CONFIG_CMD),
	HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
504
	HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
505 506
};

507 508 509 510
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
511
	HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
512 513
	HCMD_NAME(SESSION_PROTECTION_CMD),
	HCMD_NAME(SESSION_PROTECTION_NOTIF),
514
	HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
515 516
};

517 518 519 520 521
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
522
	HCMD_NAME(CTDP_CONFIG_CMD),
523
	HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
524
	HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
525
	HCMD_NAME(CT_KILL_NOTIFICATION),
526 527 528
	HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};

529 530 531 532
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
533
	HCMD_NAME(DQA_ENABLE_CMD),
534
	HCMD_NAME(UPDATE_MU_GROUPS_CMD),
535
	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
536
	HCMD_NAME(STA_HE_CTXT_CMD),
537
	HCMD_NAME(RLC_CONFIG_CMD),
538
	HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
539
	HCMD_NAME(TLC_MNG_CONFIG_CMD),
540
	HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
541
	HCMD_NAME(MONITOR_NOTIF),
542
	HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
543
	HCMD_NAME(STA_PM_NOTIF),
544
	HCMD_NAME(MU_GROUP_MGMT_NOTIF),
545
	HCMD_NAME(RX_QUEUES_NOTIFICATION),
546 547
};

J
Johannes Berg 已提交
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
	HCMD_NAME(TOF_RANGE_REQ_CMD),
	HCMD_NAME(TOF_CONFIG_CMD),
	HCMD_NAME(TOF_RANGE_ABORT_CMD),
	HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
	HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
	HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
	HCMD_NAME(TOF_LC_NOTIF),
	HCMD_NAME(TOF_RESPONDER_STATS),
	HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
	HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
};

564 565 566 567 568 569 570
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
	HCMD_NAME(STORED_BEACON_NTF),
};

571 572 573 574 575
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
	HCMD_NAME(NVM_ACCESS_COMPLETE),
576
	HCMD_NAME(NVM_GET_INFO),
577
	HCMD_NAME(TAS_CONFIG),
578 579
};

580 581 582
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
583
	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
584
	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
585
	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
586
	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
J
Johannes Berg 已提交
587
	[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
588
	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
589 590
	[REGULATORY_AND_NVM_GROUP] =
		HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
591 592
};

J
Johannes Berg 已提交
593 594 595
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);

596
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
597
{
598 599
	const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
	u64 dflt_pwr_limit;
600

601
	if (!backoff)
602 603
		return 0;

604
	dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
605

606 607 608 609 610
	while (backoff->pwr) {
		if (dflt_pwr_limit >= backoff->pwr)
			return backoff->backoff;

		backoff++;
611 612 613 614 615
	}

	return 0;
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
{
	struct iwl_mvm *mvm =
		container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
	struct ieee80211_vif *tx_blocked_vif;
	struct iwl_mvm_vif *mvmvif;

	mutex_lock(&mvm->mutex);

	tx_blocked_vif =
		rcu_dereference_protected(mvm->csa_tx_blocked_vif,
					  lockdep_is_held(&mvm->mutex));

	if (!tx_blocked_vif)
		goto unlock;

	mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
	iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
	RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
unlock:
	mutex_unlock(&mvm->mutex);
}

639 640 641
static int iwl_mvm_fwrt_dump_start(void *ctx)
{
	struct iwl_mvm *mvm = ctx;
642 643

	mutex_lock(&mvm->mutex);
644

645
	return 0;
646 647 648 649 650 651
}

static void iwl_mvm_fwrt_dump_end(void *ctx)
{
	struct iwl_mvm *mvm = ctx;

652
	mutex_unlock(&mvm->mutex);
653 654
}

655 656 657 658 659
static bool iwl_mvm_fwrt_fw_running(void *ctx)
{
	return iwl_mvm_firmware_running(ctx);
}

660 661 662 663 664 665 666 667 668 669 670 671
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
	struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
	int ret;

	mutex_lock(&mvm->mutex);
	ret = iwl_mvm_send_cmd(mvm, host_cmd);
	mutex_unlock(&mvm->mutex);

	return ret;
}

672 673 674 675 676
static bool iwl_mvm_d3_debug_enable(void *ctx)
{
	return IWL_MVM_D3_DEBUG;
}

677 678 679
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
	.dump_start = iwl_mvm_fwrt_dump_start,
	.dump_end = iwl_mvm_fwrt_dump_end,
680
	.fw_running = iwl_mvm_fwrt_fw_running,
681
	.send_hcmd = iwl_mvm_fwrt_send_hcmd,
682
	.d3_debug_enable = iwl_mvm_d3_debug_enable,
683 684
};

685 686
static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
{
E
Emmanuel Grumbach 已提交
687
	struct iwl_trans *trans = mvm->trans;
688 689
	int ret;

E
Emmanuel Grumbach 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
	if (trans->csme_own) {
		if (WARN(!mvm->mei_registered,
			 "csme is owner, but we aren't registered to iwlmei\n"))
			goto get_nvm_from_fw;

		mvm->mei_nvm_data = iwl_mei_get_nvm();
		if (mvm->mei_nvm_data) {
			/*
			 * mvm->mei_nvm_data is set and because of that,
			 * we'll load the NVM from the FW when we'll get
			 * ownership.
			 */
			mvm->nvm_data =
				iwl_parse_mei_nvm_data(trans, trans->cfg,
						       mvm->mei_nvm_data, mvm->fw);
			return 0;
		}

		IWL_ERR(mvm,
			"Got a NULL NVM from CSME, trying to get it from the device\n");
	}

get_nvm_from_fw:
713
	rtnl_lock();
714
	wiphy_lock(mvm->hw->wiphy);
715 716
	mutex_lock(&mvm->mutex);

E
Emmanuel Grumbach 已提交
717 718 719 720 721
	ret = iwl_trans_start_hw(mvm->trans);
	if (ret) {
		mutex_unlock(&mvm->mutex);
		return ret;
	}
722

E
Emmanuel Grumbach 已提交
723
	ret = iwl_run_init_mvm_ucode(mvm);
724 725
	if (ret && ret != -ERFKILL)
		iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
726 727 728 729
	if (!ret && iwl_mvm_is_lar_supported(mvm)) {
		mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
		ret = iwl_mvm_init_mcc(mvm);
	}
730 731 732 733 734

	if (!iwlmvm_mod_params.init_dbg || !ret)
		iwl_mvm_stop_device(mvm);

	mutex_unlock(&mvm->mutex);
735
	wiphy_unlock(mvm->hw->wiphy);
736
	rtnl_unlock();
737

E
Emmanuel Grumbach 已提交
738
	if (ret)
739 740 741 742 743
		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);

	return ret;
}

744 745
static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
{
E
Emmanuel Grumbach 已提交
746
	struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
747 748 749 750 751 752 753
	int ret;

	iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);

	ret = iwl_mvm_mac_setup_register(mvm);
	if (ret)
		return ret;
E
Emmanuel Grumbach 已提交
754

755 756 757 758
	mvm->hw_registered = true;

	iwl_mvm_dbgfs_register(mvm);

E
Emmanuel Grumbach 已提交
759 760 761
	wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
					 mvm->mei_rfkill_blocked,
					 RFKILL_HARD_BLOCK_NOT_OWNER);
762 763 764

	iwl_mvm_mei_set_sw_rfkill_state(mvm);

765 766 767
	return 0;
}

768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
struct iwl_mvm_frob_txf_data {
	u8 *buf;
	size_t buflen;
};

static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw,
				      struct ieee80211_vif *vif,
				      struct ieee80211_sta *sta,
				      struct ieee80211_key_conf *key,
				      void *data)
{
	struct iwl_mvm_frob_txf_data *txf = data;
	u8 keylen, match, matchend;
	u8 *keydata;
	size_t i;

	switch (key->cipher) {
	case WLAN_CIPHER_SUITE_CCMP:
		keydata = key->key;
		keylen = key->keylen;
		break;
	case WLAN_CIPHER_SUITE_WEP40:
	case WLAN_CIPHER_SUITE_WEP104:
	case WLAN_CIPHER_SUITE_TKIP:
		/*
		 * WEP has short keys which might show up in the payload,
		 * and then you can deduce the key, so in this case just
		 * remove all FIFO data.
		 * For TKIP, we don't know the phase 2 keys here, so same.
		 */
		memset(txf->buf, 0xBB, txf->buflen);
		return;
	default:
		return;
	}

	/* scan for key material and clear it out */
	match = 0;
	for (i = 0; i < txf->buflen; i++) {
		if (txf->buf[i] != keydata[match]) {
			match = 0;
			continue;
		}
		match++;
		if (match == keylen) {
			memset(txf->buf + i - keylen, 0xAA, keylen);
			match = 0;
		}
	}

	/* we're dealing with a FIFO, so check wrapped around data */
	matchend = match;
	for (i = 0; match && i < keylen - match; i++) {
		if (txf->buf[i] != keydata[match])
			break;
		match++;
		if (match == keylen) {
			memset(txf->buf, 0xAA, i + 1);
			memset(txf->buf + txf->buflen - matchend, 0xAA,
			       matchend);
			break;
		}
	}
}

static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen)
{
	struct iwl_mvm_frob_txf_data txf = {
		.buf = buf,
		.buflen = buflen,
	};
	struct iwl_mvm *mvm = ctx;

	/* embedded key material exists only on old API */
	if (iwl_mvm_has_new_tx_api(mvm))
		return;

	rcu_read_lock();
	ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf);
	rcu_read_unlock();
}

static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len)
{
	/* we only use wide headers for commands */
	struct iwl_cmd_header_wide *hdr = hcmd;
	unsigned int frob_start = sizeof(*hdr), frob_end = 0;

	if (len < sizeof(hdr))
		return;

	/* all the commands we care about are in LONG_GROUP */
	if (hdr->group_id != LONG_GROUP)
		return;

	switch (hdr->cmd) {
	case WEP_KEY:
	case WOWLAN_TKIP_PARAM:
	case WOWLAN_KEK_KCK_MATERIAL:
	case ADD_STA_KEY:
		/*
		 * blank out everything here, easier than dealing
		 * with the various versions of the command
		 */
		frob_end = INT_MAX;
		break;
	case MGMT_MCAST_KEY:
		frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
		BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) !=
			     offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));

		frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
		BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) <
			     offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
		break;
	}

	if (frob_start >= frob_end)
		return;

	if (frob_end > len)
		frob_end = len;

	memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start);
}

static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen)
{
	const struct iwl_dump_exclude *excl;
	struct iwl_mvm *mvm = ctx;
	int i;

	switch (mvm->fwrt.cur_fw_img) {
	case IWL_UCODE_INIT:
	default:
		/* not relevant */
		return;
	case IWL_UCODE_REGULAR:
	case IWL_UCODE_REGULAR_USNIFFER:
		excl = mvm->fw->dump_excl;
		break;
	case IWL_UCODE_WOWLAN:
		excl = mvm->fw->dump_excl_wowlan;
		break;
	}

	BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) !=
		     sizeof(mvm->fw->dump_excl_wowlan));

	for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) {
		u32 start, end;

		if (!excl[i].addr || !excl[i].size)
			continue;

		start = excl[i].addr;
		end = start + excl[i].size;

		if (end <= mem_addr || start >= mem_addr + buflen)
			continue;

		if (start < mem_addr)
			start = mem_addr;

		if (end > mem_addr + buflen)
			end = mem_addr + buflen;

		memset((u8 *)mem + start - mem_addr, 0xAA, end - start);
	}
}

static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
	.frob_txf = iwl_mvm_frob_txf,
	.frob_hcmd = iwl_mvm_frob_hcmd,
	.frob_mem = iwl_mvm_frob_mem,
};

E
Emmanuel Grumbach 已提交
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
{
	struct iwl_mvm *mvm = priv;
	struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;

	/*
	 * This is protected by the guarantee that this function will not be
	 * called twice on two different threads
	 */
	prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);

	curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
	if (!curr_conn_info)
		return;

	curr_conn_info->conn_info = *conn_info;

	rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);

	if (prev_conn_info)
		kfree_rcu(prev_conn_info, rcu_head);
}

static void iwl_mvm_mei_rfkill(void *priv, bool blocked)
{
	struct iwl_mvm *mvm = priv;

	mvm->mei_rfkill_blocked = blocked;
	if (!mvm->hw_registered)
		return;

	wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
					 mvm->mei_rfkill_blocked,
					 RFKILL_HARD_BLOCK_NOT_OWNER);
}

static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
{
	struct iwl_mvm *mvm = priv;

	if (!mvm->hw_registered || !mvm->csme_vif)
		return;

	iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
}

static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm =
		container_of(wk, struct iwl_mvm, sap_connected_wk);
	int ret;

	ret = iwl_mvm_start_get_nvm(mvm);
	if (ret)
		goto out_free;

	ret = iwl_mvm_start_post_nvm(mvm);
	if (ret)
		goto out_free;

	return;

out_free:
	IWL_ERR(mvm, "Couldn't get started...\n");
	iwl_mei_start_unregister();
	iwl_mei_unregister_complete();
	iwl_fw_flush_dumps(&mvm->fwrt);
	iwl_mvm_thermal_exit(mvm);
	iwl_fw_runtime_free(&mvm->fwrt);
	iwl_phy_db_free(mvm->phy_db);
	kfree(mvm->scan_cmd);
	iwl_trans_op_mode_leave(mvm->trans);
	kfree(mvm->nvm_data);
	kfree(mvm->mei_nvm_data);

	ieee80211_free_hw(mvm->hw);
}

static void iwl_mvm_mei_sap_connected(void *priv)
{
	struct iwl_mvm *mvm = priv;

	if (!mvm->hw_registered)
		schedule_work(&mvm->sap_connected_wk);
}

static void iwl_mvm_mei_nic_stolen(void *priv)
{
	struct iwl_mvm *mvm = priv;

	rtnl_lock();
	cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
	rtnl_unlock();
}

static const struct iwl_mei_ops mei_ops = {
	.me_conn_status = iwl_mvm_me_conn_status,
	.rfkill = iwl_mvm_mei_rfkill,
	.roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
	.sap_connected = iwl_mvm_mei_sap_connected,
	.nic_stolen = iwl_mvm_mei_nic_stolen,
};

J
Johannes Berg 已提交
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
		      const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
	struct ieee80211_hw *hw;
	struct iwl_op_mode *op_mode;
	struct iwl_mvm *mvm;
	struct iwl_trans_config trans_cfg = {};
	static const u8 no_reclaim_cmds[] = {
		TX_CMD,
	};
E
Emmanuel Grumbach 已提交
1059
	int scan_size;
1060
	u32 min_backoff;
E
Emmanuel Grumbach 已提交
1061
	struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
J
Johannes Berg 已提交
1062

1063
	/*
1064
	 * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
1065 1066 1067
	 * index all over the driver - check that its value corresponds to the
	 * array size.
	 */
1068 1069
	BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
		     IWL_MVM_STATION_COUNT_MAX);
1070

J
Johannes Berg 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079
	/********************************
	 * 1. Allocating and configuring HW data
	 ********************************/
	hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
				sizeof(struct iwl_mvm),
				&iwl_mvm_hw_ops);
	if (!hw)
		return NULL;

1080
	hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
1081

1082 1083
	if (cfg->max_tx_agg_size)
		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
1084 1085
	else
		hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
1086

J
Johannes Berg 已提交
1087 1088 1089 1090 1091 1092 1093 1094 1095
	op_mode = hw->priv;

	mvm = IWL_OP_MODE_GET_MVM(op_mode);
	mvm->dev = trans->dev;
	mvm->trans = trans;
	mvm->cfg = cfg;
	mvm->fw = fw;
	mvm->hw = hw;

1096
	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
1097
			    &iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
1098

1099
	iwl_mvm_get_acpi_tables(mvm);
1100
	iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
1101

1102 1103
	mvm->init_status = 0;

1104 1105
	if (iwl_mvm_has_new_rx_api(mvm)) {
		op_mode->ops = &iwl_mvm_ops_mq;
1106
		trans->rx_mpdu_cmd_hdr_size =
1107
			(trans->trans_cfg->device_family >=
1108
			 IWL_DEVICE_FAMILY_AX210) ?
1109 1110
			sizeof(struct iwl_rx_mpdu_desc) :
			IWL_RX_DESC_SIZE_V1;
1111 1112
	} else {
		op_mode->ops = &iwl_mvm_ops;
1113 1114
		trans->rx_mpdu_cmd_hdr_size =
			sizeof(struct iwl_rx_mpdu_res_start);
1115 1116 1117 1118 1119

		if (WARN_ON(trans->num_rx_queues > 1))
			goto out_free;
	}

1120
	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
1121

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
	if (iwl_mvm_has_new_tx_api(mvm)) {
		/*
		 * If we have the new TX/queue allocation API initialize them
		 * all to invalid numbers. We'll rewrite the ones that we need
		 * later, but that doesn't happen for all of them all of the
		 * time (e.g. P2P Device is optional), and if a dynamic queue
		 * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
		 * iwl_mvm_is_static_queue() erroneously returns true, and we
		 * might have things getting stuck.
		 */
		mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
		mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
		mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
		mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
	} else {
		mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
		mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
		mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
		mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
	}
1142

1143
	mvm->sf_state = SF_UNINIT;
1144
	if (iwl_mvm_has_unified_ucode(mvm))
1145
		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
1146
	else
1147
		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
1148
	mvm->drop_bcn_ap_mode = true;
1149

J
Johannes Berg 已提交
1150 1151 1152
	mutex_init(&mvm->mutex);
	spin_lock_init(&mvm->async_handlers_lock);
	INIT_LIST_HEAD(&mvm->time_event_list);
1153
	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
J
Johannes Berg 已提交
1154 1155
	INIT_LIST_HEAD(&mvm->async_handlers_list);
	spin_lock_init(&mvm->time_event_lock);
1156
	INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
1157
	INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
1158
	INIT_LIST_HEAD(&mvm->resp_pasn_list);
J
Johannes Berg 已提交
1159 1160 1161

	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
E
Emmanuel Grumbach 已提交
1162
	INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
1163
	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
1164
	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
1165
	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
1166
	INIT_LIST_HEAD(&mvm->add_stream_txqs);
J
Johannes Berg 已提交
1167

1168
	init_waitqueue_head(&mvm->rx_sync_waitq);
1169

1170
	mvm->queue_sync_state = 0;
1171

J
Johannes Berg 已提交
1172 1173
	SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);

1174 1175 1176 1177 1178 1179
	spin_lock_init(&mvm->tcm.lock);
	INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
	mvm->tcm.ts = jiffies;
	mvm->tcm.ll_ts = jiffies;
	mvm->tcm.uapsd_nonagg_ts = jiffies;

1180 1181
	INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);

1182
	mvm->cmd_ver.d0i3_resp =
1183 1184
		iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
					0);
1185 1186 1187 1188
	/* we only support version 1 */
	if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
		goto out_free;

1189 1190 1191
	mvm->cmd_ver.range_resp =
		iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
					TOF_RANGE_RESPONSE_NOTIF, 5);
1192 1193
	/* we only support up to version 9 */
	if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
1194 1195
		goto out_free;

J
Johannes Berg 已提交
1196 1197 1198 1199 1200 1201 1202
	/*
	 * Populate the state variables that the transport layer needs
	 * to know about.
	 */
	trans_cfg.op_mode = op_mode;
	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1203

1204
	switch (iwlwifi_mod_params.amsdu_size) {
1205
	case IWL_AMSDU_DEF:
1206
		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1207
		break;
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	case IWL_AMSDU_4K:
		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
		break;
	case IWL_AMSDU_8K:
		trans_cfg.rx_buf_size = IWL_AMSDU_8K;
		break;
	case IWL_AMSDU_12K:
		trans_cfg.rx_buf_size = IWL_AMSDU_12K;
		break;
	default:
		pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
		       iwlwifi_mod_params.amsdu_size);
1220
		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1221
	}
1222

1223
	trans->wide_cmd_header = true;
1224
	trans_cfg.bc_table_dword =
1225
		mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
J
Johannes Berg 已提交
1226

1227 1228
	trans_cfg.command_groups = iwl_mvm_groups;
	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
J
Johannes Berg 已提交
1229

1230
	trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
1231
	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
1232
	trans_cfg.scd_set_active = true;
J
Johannes Berg 已提交
1233

1234 1235 1236
	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
					  driver_data[2]);

1237 1238
	/* Set a short watchdog for the command queue */
	trans_cfg.cmd_q_wdg_timeout =
1239
		iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
1240

J
Johannes Berg 已提交
1241 1242 1243 1244
	snprintf(mvm->hw->wiphy->fw_version,
		 sizeof(mvm->hw->wiphy->fw_version),
		 "%s", fw->fw_version);

1245 1246 1247
	trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
						   IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);

J
Johannes Berg 已提交
1248 1249 1250 1251
	/* Configure transport layer */
	iwl_trans_configure(mvm->trans, &trans_cfg);

	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
1252 1253 1254 1255 1256
	trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
	trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
	memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
	       sizeof(trans->dbg.conf_tlv));
	trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
J
Johannes Berg 已提交
1257

1258 1259 1260
	trans->iml = mvm->fw->iml;
	trans->iml_len = mvm->fw->iml_len;

J
Johannes Berg 已提交
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	/* set up notification wait support */
	iwl_notification_wait_init(&mvm->notif_wait);

	/* Init phy db */
	mvm->phy_db = iwl_phy_db_init(trans);
	if (!mvm->phy_db) {
		IWL_ERR(mvm, "Cannot init phy_db\n");
		goto out_free;
	}

	IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
1272
		 mvm->trans->name, mvm->trans->hw_rev);
J
Johannes Berg 已提交
1273

1274
	if (iwlwifi_mod_params.nvm_file)
1275
		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
1276 1277 1278
	else
		IWL_DEBUG_EEPROM(mvm->trans->dev,
				 "working without external nvm file\n");
1279

1280
	scan_size = iwl_mvm_scan_size(mvm);
1281

J
Johannes Berg 已提交
1282 1283 1284 1285
	mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
	if (!mvm->scan_cmd)
		goto out_free;

1286 1287 1288 1289
	/* invalidate ids to prevent accidental removal of sta_id 0 */
	mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
	mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;

1290 1291 1292
	/* Set EBS as successful as long as not stated otherwise by the FW. */
	mvm->last_ebs_successful = true;

1293
	min_backoff = iwl_mvm_min_backoff(mvm);
1294 1295
	iwl_mvm_thermal_initialize(mvm, min_backoff);

1296 1297 1298 1299 1300
	if (!iwl_mvm_has_new_rx_stats_api(mvm))
		memset(&mvm->rx_stats_v3, 0,
		       sizeof(struct mvm_statistics_rx_v3));
	else
		memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
1301

1302
	mvm->debugfs_dir = dbgfs_dir;
1303

E
Emmanuel Grumbach 已提交
1304 1305 1306 1307 1308 1309 1310 1311
	mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);

	/*
	 * Get NVM failed, but we are registered to MEI, we'll get
	 * the NVM later when it'll be possible to get it from CSME.
	 */
	if (iwl_mvm_start_get_nvm(mvm) && mvm->mei_registered)
		return op_mode;
1312

1313 1314
	if (iwl_mvm_start_post_nvm(mvm))
		goto out_thermal_exit;
1315

J
Johannes Berg 已提交
1316 1317
	return op_mode;

1318 1319
 out_thermal_exit:
	iwl_mvm_thermal_exit(mvm);
E
Emmanuel Grumbach 已提交
1320 1321 1322 1323
	if (mvm->mei_registered) {
		iwl_mei_start_unregister();
		iwl_mei_unregister_complete();
	}
J
Johannes Berg 已提交
1324
 out_free:
1325
	iwl_fw_flush_dumps(&mvm->fwrt);
1326
	iwl_fw_runtime_free(&mvm->fwrt);
1327 1328 1329

	if (iwlmvm_mod_params.init_dbg)
		return op_mode;
J
Johannes Berg 已提交
1330 1331
	iwl_phy_db_free(mvm->phy_db);
	kfree(mvm->scan_cmd);
1332 1333
	iwl_trans_op_mode_leave(trans);

J
Johannes Berg 已提交
1334 1335 1336 1337
	ieee80211_free_hw(mvm->hw);
	return NULL;
}

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
	lockdep_assert_held(&mvm->mutex);

	iwl_fw_cancel_timestamp(&mvm->fwrt);

	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);

	iwl_fw_dbg_stop_sync(&mvm->fwrt);
	iwl_trans_stop_device(mvm->trans);
	iwl_free_fw_paging(&mvm->fwrt);
	iwl_fw_dump_conf_clear(&mvm->fwrt);
E
Emmanuel Grumbach 已提交
1350
	iwl_mvm_mei_device_down(mvm);
1351 1352
}

J
Johannes Berg 已提交
1353 1354 1355 1356 1357
static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	int i;

E
Emmanuel Grumbach 已提交
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
	if (mvm->mei_registered) {
		rtnl_lock();
		iwl_mei_set_netdev(NULL);
		rtnl_unlock();
		iwl_mei_start_unregister();
	}

	/*
	 * After we unregister from mei, the worker can't be scheduled
	 * anymore.
	 */
	cancel_work_sync(&mvm->sap_connected_wk);

J
Johannes Berg 已提交
1371 1372
	iwl_mvm_leds_exit(mvm);

1373
	iwl_mvm_thermal_exit(mvm);
1374

E
Emmanuel Grumbach 已提交
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	/*
	 * If we couldn't get ownership on the device and we couldn't
	 * get the NVM from CSME, we haven't registered to mac80211.
	 * In that case, we didn't fail op_mode_start, because we are
	 * waiting for CSME to allow us to get the NVM to register to
	 * mac80211. If that didn't happen, we haven't registered to
	 * mac80211, hence the if below.
	 */
	if (mvm->hw_registered)
		ieee80211_unregister_hw(mvm->hw);
J
Johannes Berg 已提交
1385 1386

	kfree(mvm->scan_cmd);
1387 1388
	kfree(mvm->mcast_filter_cmd);
	mvm->mcast_filter_cmd = NULL;
J
Johannes Berg 已提交
1389

1390 1391 1392
	kfree(mvm->error_recovery_buf);
	mvm->error_recovery_buf = NULL;

1393
	iwl_trans_op_mode_leave(mvm->trans);
J
Johannes Berg 已提交
1394 1395 1396 1397

	iwl_phy_db_free(mvm->phy_db);
	mvm->phy_db = NULL;

1398
	kfree(mvm->nvm_data);
E
Emmanuel Grumbach 已提交
1399 1400 1401
	kfree(mvm->mei_nvm_data);
	kfree(rcu_access_pointer(mvm->csme_conn_info));
	kfree(mvm->temp_nvm_data);
1402
	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
J
Johannes Berg 已提交
1403 1404
		kfree(mvm->nvm_sections[i].data);

1405 1406
	cancel_delayed_work_sync(&mvm->tcm.work);

1407
	iwl_fw_runtime_free(&mvm->fwrt);
1408 1409
	mutex_destroy(&mvm->mutex);

E
Emmanuel Grumbach 已提交
1410 1411 1412
	if (mvm->mei_registered)
		iwl_mei_unregister_complete();

J
Johannes Berg 已提交
1413 1414 1415 1416 1417 1418
	ieee80211_free_hw(mvm->hw);
}

struct iwl_async_handler_entry {
	struct list_head list;
	struct iwl_rx_cmd_buffer rxb;
1419
	enum iwl_rx_handler_context context;
1420
	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
J
Johannes Berg 已提交
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
};

void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
{
	struct iwl_async_handler_entry *entry, *tmp;

	spin_lock_bh(&mvm->async_handlers_lock);
	list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
		iwl_free_rxb(&entry->rxb);
		list_del(&entry->list);
		kfree(entry);
	}
	spin_unlock_bh(&mvm->async_handlers_lock);
}

static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm =
		container_of(wk, struct iwl_mvm, async_handlers_wk);
	struct iwl_async_handler_entry *entry, *tmp;
1441
	LIST_HEAD(local_list);
J
Johannes Berg 已提交
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453

	/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */

	/*
	 * Sync with Rx path with a lock. Remove all the entries from this list,
	 * add them to a local one (lock free), and then handle them.
	 */
	spin_lock_bh(&mvm->async_handlers_lock);
	list_splice_init(&mvm->async_handlers_list, &local_list);
	spin_unlock_bh(&mvm->async_handlers_lock);

	list_for_each_entry_safe(entry, tmp, &local_list, list) {
1454 1455
		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
			mutex_lock(&mvm->mutex);
1456
		entry->fn(mvm, &entry->rxb);
J
Johannes Berg 已提交
1457 1458
		iwl_free_rxb(&entry->rxb);
		list_del(&entry->list);
1459 1460
		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
			mutex_unlock(&mvm->mutex);
J
Johannes Berg 已提交
1461 1462 1463 1464
		kfree(entry);
	}
}

1465 1466 1467 1468 1469 1470 1471
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
					    struct iwl_rx_packet *pkt)
{
	struct iwl_fw_dbg_trigger_tlv *trig;
	struct iwl_fw_dbg_trigger_cmd *cmds_trig;
	int i;

1472 1473 1474
	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
				     FW_DBG_TRIGGER_FW_NOTIF);
	if (!trig)
1475 1476 1477 1478 1479 1480 1481 1482 1483
		return;

	cmds_trig = (void *)trig->data;

	for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
		/* don't collect on CMD 0 */
		if (!cmds_trig->cmds[i].cmd_id)
			break;

1484 1485
		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
1486 1487
			continue;

1488 1489 1490
		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
					"CMD 0x%02x.%02x received",
					pkt->hdr.group_id, pkt->hdr.cmd);
1491 1492 1493 1494
		break;
	}
}

1495 1496 1497
static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
			      struct iwl_rx_cmd_buffer *rxb,
			      struct iwl_rx_packet *pkt)
J
Johannes Berg 已提交
1498
{
1499
	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1500
	int i;
1501
	union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
1502

1503 1504
	iwl_dbg_tlv_time_point(&mvm->fwrt,
			       IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data);
1505 1506
	iwl_mvm_rx_check_trigger(mvm, pkt);

J
Johannes Berg 已提交
1507 1508 1509 1510 1511 1512 1513 1514 1515
	/*
	 * Do the notification wait before RX handlers so
	 * even if the RX handler consumes the RXB we have
	 * access to it in the notification wait entry.
	 */
	iwl_notification_wait_notify(&mvm->notif_wait, pkt);

	for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
1516 1517
		struct iwl_async_handler_entry *entry;

1518
		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
1519 1520
			continue;

1521 1522 1523
		if (unlikely(pkt_len < rx_h->min_size))
			return;

1524
		if (rx_h->context == RX_HANDLER_SYNC) {
1525
			rx_h->fn(mvm, rxb);
1526
			return;
1527
		}
1528 1529 1530 1531

		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
		/* we can't do much... */
		if (!entry)
1532
			return;
1533 1534 1535 1536 1537

		entry->rxb._page = rxb_steal_page(rxb);
		entry->rxb._offset = rxb->_offset;
		entry->rxb._rx_page_order = rxb->_rx_page_order;
		entry->fn = rx_h->fn;
1538
		entry->context = rx_h->context;
1539 1540 1541 1542
		spin_lock(&mvm->async_handlers_lock);
		list_add_tail(&entry->list, &mvm->async_handlers_list);
		spin_unlock(&mvm->async_handlers_lock);
		schedule_work(&mvm->async_handlers_wk);
1543
		break;
J
Johannes Berg 已提交
1544 1545 1546
	}
}

1547 1548 1549 1550 1551 1552
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
		       struct napi_struct *napi,
		       struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1553
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1554

1555
	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1556
		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1557
	else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
1558 1559 1560 1561 1562
		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
	else
		iwl_mvm_rx_common(mvm, rxb, pkt);
}

1563 1564 1565
void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
		   struct napi_struct *napi,
		   struct iwl_rx_cmd_buffer *rxb)
1566 1567 1568
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1569
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1570

1571
	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1572
		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1573 1574
	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
					 RX_QUEUES_NOTIFICATION)))
1575
		iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
1576
	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1577
		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1578 1579
	else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
		iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0);
1580
	else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
1581
		iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
1582 1583 1584 1585
	else
		iwl_mvm_rx_common(mvm, rxb, pkt);
}

1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
			     const struct iwl_device_cmd *cmd)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

	/*
	 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
	 * commands that need to block the Tx queues.
	 */
	iwl_trans_block_txq_ptrs(mvm->trans, false);
}

1598 1599 1600 1601 1602 1603
static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
{
	return queue == mvm->aux_queue || queue == mvm->probe_queue ||
		queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
}

1604 1605
static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
				       int hw_queue, bool start)
J
Johannes Berg 已提交
1606 1607
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1608 1609 1610 1611 1612 1613 1614
	struct ieee80211_sta *sta;
	struct ieee80211_txq *txq;
	struct iwl_mvm_txq *mvmtxq;
	int i;
	unsigned long tid_bitmap;
	struct iwl_mvm_sta *mvmsta;
	u8 sta_id;
1615

1616 1617 1618
	sta_id = iwl_mvm_has_new_tx_api(mvm) ?
		mvm->tvqm_info[hw_queue].sta_id :
		mvm->queue_info[hw_queue].ra_sta_id;
1619

1620
	if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
J
Johannes Berg 已提交
1621 1622
		return;

1623 1624 1625 1626 1627 1628 1629
	rcu_read_lock();

	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
	if (IS_ERR_OR_NULL(sta))
		goto out;
	mvmsta = iwl_mvm_sta_from_mac80211(sta);

1630 1631 1632 1633 1634 1635 1636 1637 1638
	if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
		if (!start)
			ieee80211_stop_queues(mvm->hw);
		else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
			ieee80211_wake_queues(mvm->hw);

		goto out;
	}

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
	if (iwl_mvm_has_new_tx_api(mvm)) {
		int tid = mvm->tvqm_info[hw_queue].txq_tid;

		tid_bitmap = BIT(tid);
	} else {
		tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
	}

	for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
		int tid = i;

		if (tid == IWL_MAX_TID_COUNT)
			tid = IEEE80211_NUM_TIDS;
1652

1653 1654 1655 1656 1657 1658
		txq = sta->txq[tid];
		mvmtxq = iwl_mvm_txq_from_mac80211(txq);
		mvmtxq->stopped = !start;

		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1659
	}
1660 1661 1662

out:
	rcu_read_unlock();
J
Johannes Berg 已提交
1663 1664
}

1665
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1666
{
1667 1668
	iwl_mvm_queue_state_change(op_mode, hw_queue, false);
}
1669

1670 1671 1672
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
	iwl_mvm_queue_state_change(op_mode, hw_queue, true);
1673 1674
}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
	bool state = iwl_mvm_is_radio_killed(mvm);

	if (state)
		wake_up(&mvm->rx_sync_waitq);

	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
}

1685 1686 1687 1688 1689 1690 1691
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
{
	if (state)
		set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
	else
		clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);

1692
	iwl_mvm_set_rfkill_state(mvm);
1693 1694
}

E
Emmanuel Grumbach 已提交
1695 1696 1697 1698 1699 1700
struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
{
	return rcu_dereference_protected(mvm->csme_conn_info,
					 lockdep_is_held(&mvm->mutex));
}

1701
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
J
Johannes Berg 已提交
1702 1703
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1704 1705
	bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
	bool unified = iwl_mvm_has_unified_ucode(mvm);
J
Johannes Berg 已提交
1706 1707 1708 1709 1710 1711

	if (state)
		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
	else
		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);

1712
	iwl_mvm_set_rfkill_state(mvm);
1713

1714 1715
	 /* iwl_run_init_mvm_ucode is waiting for results, abort it. */
	if (rfkill_safe_init_done)
1716 1717
		iwl_abort_notification_waits(&mvm->notif_wait);

1718 1719 1720 1721 1722 1723 1724
	/*
	 * Don't ask the transport to stop the firmware. We'll do it
	 * after cfg80211 takes us down.
	 */
	if (unified)
		return false;

1725 1726 1727 1728
	/*
	 * Stop the device if we run OPERATIONAL firmware or if we are in the
	 * middle of the calibrations.
	 */
1729
	return state && rfkill_safe_init_done;
J
Johannes Berg 已提交
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
}

static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	struct ieee80211_tx_info *info;

	info = IEEE80211_SKB_CB(skb);
	iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
	ieee80211_free_txskb(mvm->hw, skb);
}

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
struct iwl_mvm_reprobe {
	struct device *dev;
	struct work_struct work;
};

static void iwl_mvm_reprobe_wk(struct work_struct *wk)
{
	struct iwl_mvm_reprobe *reprobe;

	reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
	if (device_reprobe(reprobe->dev))
		dev_err(reprobe->dev, "reprobe failed!\n");
1754
	put_device(reprobe->dev);
1755 1756 1757 1758
	kfree(reprobe);
	module_put(THIS_MODULE);
}

1759
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
J
Johannes Berg 已提交
1760 1761
{
	iwl_abort_notification_waits(&mvm->notif_wait);
1762
	iwl_dbg_tlv_del_timers(mvm->trans);
J
Johannes Berg 已提交
1763

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
	/*
	 * This is a bit racy, but worst case we tell mac80211 about
	 * a stopped/aborted scan when that was already done which
	 * is not a problem. It is necessary to abort any os scan
	 * here because mac80211 requires having the scan cleared
	 * before restarting.
	 * We'll reset the scan_status to NONE in restart cleanup in
	 * the next start() call from mac80211. If restart isn't called
	 * (no fw restart) scan status will stay busy.
	 */
1774
	iwl_mvm_report_scan_aborted(mvm);
1775

J
Johannes Berg 已提交
1776 1777 1778 1779 1780 1781
	/*
	 * If we're restarting already, don't cycle restarts.
	 * If INIT fw asserted, it will likely fail again.
	 * If WoWLAN fw asserted, don't restart either, mac80211
	 * can't recover this since we're already half suspended.
	 */
1782
	if (!mvm->fw_restart && fw_error) {
1783
		iwl_fw_error_collect(&mvm->fwrt, false);
1784 1785 1786
	} else if (test_bit(IWL_MVM_STATUS_STARTING,
			    &mvm->status)) {
		IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
1787
	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
		struct iwl_mvm_reprobe *reprobe;

		IWL_ERR(mvm,
			"Firmware error during reconfiguration - reprobe!\n");

		/*
		 * get a module reference to avoid doing this while unloading
		 * anyway and to avoid scheduling a work with code that's
		 * being removed.
		 */
		if (!try_module_get(THIS_MODULE)) {
			IWL_ERR(mvm, "Module is being unloaded - abort\n");
			return;
		}

		reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
		if (!reprobe) {
			module_put(THIS_MODULE);
			return;
		}
1808
		reprobe->dev = get_device(mvm->trans->dev);
1809 1810
		INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
		schedule_work(&reprobe->work);
1811 1812 1813
	} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
			    &mvm->status)) {
		IWL_ERR(mvm, "HW restart already requested, but not started\n");
1814
	} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
1815 1816
		   mvm->hw_registered &&
		   !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1817 1818 1819 1820 1821 1822
		/* This should be first thing before trying to collect any
		 * data to avoid endless loops if any HW error happens while
		 * collecting debug data.
		 */
		set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);

1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
		if (mvm->fw->ucode_capa.error_log_size) {
			u32 src_size = mvm->fw->ucode_capa.error_log_size;
			u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
			u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC);

			if (recover_buf) {
				mvm->error_recovery_buf = recover_buf;
				iwl_trans_read_mem_bytes(mvm->trans,
							 src_addr,
							 recover_buf,
							 src_size);
			}
		}

1837
		iwl_fw_error_collect(&mvm->fwrt, false);
1838

1839 1840
		if (fw_error && mvm->fw_restart > 0)
			mvm->fw_restart--;
J
Johannes Berg 已提交
1841 1842 1843 1844
		ieee80211_restart_hw(mvm->hw);
	}
}

1845
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
1846 1847 1848
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

1849 1850 1851
	if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
	    !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
				&mvm->status))
1852
		iwl_mvm_dump_nic_error_log(mvm);
1853

1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
	if (sync) {
		iwl_fw_error_collect(&mvm->fwrt, true);
		/*
		 * Currently, the only case for sync=true is during
		 * shutdown, so just stop in this case. If/when that
		 * changes, we need to be a bit smarter here.
		 */
		return;
	}

1864 1865 1866 1867 1868 1869 1870 1871
	/*
	 * If the firmware crashes while we're already considering it
	 * to be dead then don't ask for a restart, that cannot do
	 * anything useful anyway.
	 */
	if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
		return;

1872
	iwl_mvm_nic_restart(mvm, true);
1873 1874
}

J
Johannes Berg 已提交
1875 1876
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
{
1877 1878
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

J
Johannes Berg 已提交
1879
	WARN_ON(1);
1880
	iwl_mvm_nic_restart(mvm, true);
J
Johannes Berg 已提交
1881 1882
}

1883 1884 1885 1886 1887 1888 1889 1890 1891
static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
				       enum iwl_fw_ini_time_point tp_id,
				       union iwl_dbg_tlv_tp_data *tp_data)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

	iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}

1892 1893
#define IWL_MVM_COMMON_OPS					\
	/* these could be differentiated */			\
1894
	.async_cb = iwl_mvm_async_cb,				\
1895 1896 1897 1898 1899 1900 1901 1902 1903
	.queue_full = iwl_mvm_stop_sw_queue,			\
	.queue_not_full = iwl_mvm_wake_sw_queue,		\
	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
	.free_skb = iwl_mvm_free_skb,				\
	.nic_error = iwl_mvm_nic_error,				\
	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
	.nic_config = iwl_mvm_nic_config,			\
	/* as we only register one, these MUST be common! */	\
	.start = iwl_op_mode_mvm_start,				\
1904 1905
	.stop = iwl_op_mode_mvm_stop,				\
	.time_point = iwl_op_mode_mvm_time_point
1906

J
Johannes Berg 已提交
1907
static const struct iwl_op_mode_ops iwl_mvm_ops = {
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
	IWL_MVM_COMMON_OPS,
	.rx = iwl_mvm_rx,
};

static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
			      struct napi_struct *napi,
			      struct iwl_rx_cmd_buffer *rxb,
			      unsigned int queue)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1918
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1919
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1920

1921 1922 1923
	if (unlikely(queue >= mvm->trans->num_rx_queues))
		return;

1924
	if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
1925
		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
1926 1927
	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
					 RX_QUEUES_NOTIFICATION)))
1928
		iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
1929
	else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1930
		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1931 1932 1933 1934 1935 1936
}

static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
	IWL_MVM_COMMON_OPS,
	.rx = iwl_mvm_rx_mq,
	.rx_rss = iwl_mvm_rx_mq_rss,
J
Johannes Berg 已提交
1937
};