ops.c 53.0 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
 * Copyright(c) 2018        Intel Corporation
J
Johannes Berg 已提交
12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * The full GNU General Public License is included in this distribution
23
 * in the file called COPYING.
J
Johannes Berg 已提交
24 25
 *
 * Contact Information:
26
 *  Intel Linux Wireless <linuxwifi@intel.com>
J
Johannes Berg 已提交
27 28 29 30
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
31
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
 * Copyright(c) 2018        Intel Corporation
J
Johannes Berg 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
#include <linux/module.h>
65
#include <linux/vmalloc.h>
J
Johannes Berg 已提交
66 67
#include <net/mac80211.h>

68
#include "fw/notif-wait.h"
J
Johannes Berg 已提交
69 70
#include "iwl-trans.h"
#include "iwl-op-mode.h"
71
#include "fw/img.h"
J
Johannes Berg 已提交
72 73 74 75 76 77 78 79 80 81
#include "iwl-debug.h"
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-phy-db.h"
#include "iwl-eeprom-parse.h"
#include "iwl-csr.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "rs.h"
J
Johannes Berg 已提交
82
#include "fw/api/scan.h"
J
Johannes Berg 已提交
83
#include "time-event.h"
84
#include "fw-api.h"
85
#include "fw/acpi.h"
J
Johannes Berg 已提交
86 87 88 89 90 91 92

#define DRV_DESCRIPTION	"The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");

static const struct iwl_op_mode_ops iwl_mvm_ops;
93
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
J
Johannes Berg 已提交
94 95 96

struct iwl_mvm_mod_params iwlmvm_mod_params = {
	.power_scheme = IWL_POWER_SCHEME_BPS,
97
	.tfd_q_hang_detect = true
J
Johannes Berg 已提交
98 99 100
	/* rest of fields are 0 by default */
};

101
module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
J
Johannes Berg 已提交
102 103
MODULE_PARM_DESC(init_dbg,
		 "set to true to debug an ASSERT in INIT fw (default: false");
104
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
J
Johannes Berg 已提交
105 106
MODULE_PARM_DESC(power_scheme,
		 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
107
module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
108
		   bool, 0444);
109 110
MODULE_PARM_DESC(tfd_q_hang_detect,
		 "TFD queues hang detection (default: true");
J
Johannes Berg 已提交
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125

/*
 * module init and exit functions
 */
static int __init iwl_mvm_init(void)
{
	int ret;

	ret = iwl_mvm_rate_control_register();
	if (ret) {
		pr_err("Unable to register rate control algorithm: %d\n", ret);
		return ret;
	}

	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
126
	if (ret)
J
Johannes Berg 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
		pr_err("Unable to register MVM op_mode: %d\n", ret);

	return ret;
}
module_init(iwl_mvm_init);

static void __exit iwl_mvm_exit(void)
{
	iwl_opmode_deregister("iwlmvm");
	iwl_mvm_rate_control_unregister();
}
module_exit(iwl_mvm_exit);

static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
	u32 reg_val = 0;
145 146 147 148 149 150 151 152
	u32 phy_config = iwl_mvm_get_phy_config(mvm);

	radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
			 FW_PHY_CFG_RADIO_TYPE_POS;
	radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
			 FW_PHY_CFG_RADIO_STEP_POS;
	radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
			 FW_PHY_CFG_RADIO_DASH_POS;
J
Johannes Berg 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167

	/* SKU control */
	reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
				CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
	reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
				CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;

	/* radio configuration */
	reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
	reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
	reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;

	WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
		 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);

168
	/*
169 170 171 172 173 174
	 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
	 * sampling, and shouldn't be set to any non-zero value.
	 * The same is supposed to be true of the other HW, but unsetting
	 * them (such as the 7260) causes automatic tests to fail on seemingly
	 * unrelated errors. Need to further investigate this, but for now
	 * we'll separate cases.
175
	 */
176
	if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
177
		reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
J
Johannes Berg 已提交
178

179 180 181
	if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
		reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;

182 183 184 185 186 187 188
	iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
				CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
				CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
189 190
				CSR_HW_IF_CONFIG_REG_BIT_MAC_SI   |
				CSR_HW_IF_CONFIG_REG_D3_DEBUG,
191
				reg_val);
J
Johannes Berg 已提交
192 193 194 195 196 197 198 199 200

	IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
		       radio_cfg_step, radio_cfg_dash);

	/*
	 * W/A : NIC is stuck in a reset state after Early PCIe power off
	 * (PCIe power is lost before PERST# is asserted), causing ME FW
	 * to lose ownership and not being able to obtain it back.
	 */
201
	if (!mvm->trans->cfg->apmg_not_supported)
202 203 204
		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
J
Johannes Berg 已提交
205 206
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
/**
 * enum iwl_rx_handler_context context for Rx handler
 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
 *	which can't acquire mvm->mutex.
 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
 *	(and only in this case!), it should be set as ASYNC. In that case,
 *	it will be called from a worker with mvm->mutex held.
 * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
 *	mutex itself, it will be called from a worker without mvm->mutex held.
 */
enum iwl_rx_handler_context {
	RX_HANDLER_SYNC,
	RX_HANDLER_ASYNC_LOCKED,
	RX_HANDLER_ASYNC_UNLOCKED,
};

/**
 * struct iwl_rx_handlers handler for FW notification
 * @cmd_id: command id
 * @context: see &iwl_rx_handler_context
 * @fn: the function is called when notification is received
 */
J
Johannes Berg 已提交
229
struct iwl_rx_handlers {
230
	u16 cmd_id;
231
	enum iwl_rx_handler_context context;
232
	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
J
Johannes Berg 已提交
233 234
};

235 236 237 238
#define RX_HANDLER(_cmd_id, _fn, _context)	\
	{ .cmd_id = _cmd_id, .fn = _fn, .context = _context }
#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)	\
	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
J
Johannes Berg 已提交
239 240 241 242 243 244

/*
 * Handlers for fw notifications
 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
 * This list should be in order of frequency for performance purposes.
 *
245
 * The handler can be one from three contexts, see &iwl_rx_handler_context
J
Johannes Berg 已提交
246 247
 */
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
248 249 250
	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),

251 252 253
	RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
		       iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),

254 255 256 257 258 259
	RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
		   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
		   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
		   RX_HANDLER_ASYNC_LOCKED),
260

261
	RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
262
		   iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
263

264 265 266 267
	RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
		   RX_HANDLER_SYNC),
	RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
		   RX_HANDLER_ASYNC_LOCKED),
268

269
	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
270

271
	RX_HANDLER(SCAN_ITERATION_COMPLETE,
272
		   iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
273
	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
274 275
		   iwl_mvm_rx_lmac_scan_complete_notif,
		   RX_HANDLER_ASYNC_LOCKED),
276
	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
277
		   RX_HANDLER_SYNC),
278
	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
279
		   RX_HANDLER_ASYNC_LOCKED),
280
	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
281
		   iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
282

283 284
	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
		   RX_HANDLER_SYNC),
J
Johannes Berg 已提交
285

286
	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
287
		   RX_HANDLER_SYNC),
288

289
	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
290
	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
291 292 293
		   iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
		   RX_HANDLER_ASYNC_LOCKED),
294
	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
295
		       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
296
	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
297
		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
298

299
	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
300 301 302
		   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
		   RX_HANDLER_SYNC),
303 304
	RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
		       iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED),
305 306 307 308 309 310

	RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
		       iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF,
		       iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),

311 312
	RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
		       iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
313
	RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
314
		       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
315
	RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
316
		       iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
317 318
	RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
		       iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
J
Johannes Berg 已提交
319 320
};
#undef RX_HANDLER
321
#undef RX_HANDLER_GRP
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336

/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
	HCMD_NAME(MVM_ALIVE),
	HCMD_NAME(REPLY_ERROR),
	HCMD_NAME(ECHO_CMD),
	HCMD_NAME(INIT_COMPLETE_NOTIF),
	HCMD_NAME(PHY_CONTEXT_CMD),
	HCMD_NAME(DBG_CFG),
	HCMD_NAME(SCAN_CFG_CMD),
	HCMD_NAME(SCAN_REQ_UMAC),
	HCMD_NAME(SCAN_ABORT_UMAC),
	HCMD_NAME(SCAN_COMPLETE_UMAC),
337
	HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	HCMD_NAME(ADD_STA_KEY),
	HCMD_NAME(ADD_STA),
	HCMD_NAME(REMOVE_STA),
	HCMD_NAME(FW_GET_ITEM_CMD),
	HCMD_NAME(TX_CMD),
	HCMD_NAME(SCD_QUEUE_CFG),
	HCMD_NAME(TXPATH_FLUSH),
	HCMD_NAME(MGMT_MCAST_KEY),
	HCMD_NAME(WEP_KEY),
	HCMD_NAME(SHARED_MEM_CFG),
	HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
	HCMD_NAME(MAC_CONTEXT_CMD),
	HCMD_NAME(TIME_EVENT_CMD),
	HCMD_NAME(TIME_EVENT_NOTIFICATION),
	HCMD_NAME(BINDING_CONTEXT_CMD),
	HCMD_NAME(TIME_QUOTA_CMD),
	HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
355
	HCMD_NAME(LEDS_CMD),
356 357 358 359 360 361 362 363 364 365
	HCMD_NAME(LQ_CMD),
	HCMD_NAME(FW_PAGING_BLOCK_CMD),
	HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
	HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
	HCMD_NAME(HOT_SPOT_CMD),
	HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
	HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
	HCMD_NAME(BT_COEX_CI),
	HCMD_NAME(PHY_CONFIGURATION_CMD),
	HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
366
	HCMD_NAME(PHY_DB_CMD),
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
	HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
	HCMD_NAME(POWER_TABLE_CMD),
	HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
	HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
	HCMD_NAME(DC2DC_CONFIG_CMD),
	HCMD_NAME(NVM_ACCESS_CMD),
	HCMD_NAME(BEACON_NOTIFICATION),
	HCMD_NAME(BEACON_TEMPLATE_CMD),
	HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
	HCMD_NAME(BT_CONFIG),
	HCMD_NAME(STATISTICS_CMD),
	HCMD_NAME(STATISTICS_NOTIFICATION),
	HCMD_NAME(EOSP_NOTIFICATION),
	HCMD_NAME(REDUCE_TX_POWER_CMD),
	HCMD_NAME(CARD_STATE_NOTIFICATION),
	HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
	HCMD_NAME(TDLS_CONFIG_CMD),
	HCMD_NAME(MAC_PM_POWER_TABLE),
	HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
	HCMD_NAME(MFUART_LOAD_NOTIFICATION),
388
	HCMD_NAME(RSS_CONFIG_CMD),
389 390 391
	HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
	HCMD_NAME(REPLY_RX_PHY_CMD),
	HCMD_NAME(REPLY_RX_MPDU_CMD),
392
	HCMD_NAME(FRAME_RELEASE),
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
	HCMD_NAME(BA_NOTIF),
	HCMD_NAME(MCC_UPDATE_CMD),
	HCMD_NAME(MCC_CHUB_UPDATE_CMD),
	HCMD_NAME(MARKER_CMD),
	HCMD_NAME(BT_PROFILE_NOTIFICATION),
	HCMD_NAME(BCAST_FILTER_CMD),
	HCMD_NAME(MCAST_FILTER_CMD),
	HCMD_NAME(REPLY_SF_CFG_CMD),
	HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
	HCMD_NAME(D3_CONFIG_CMD),
	HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
	HCMD_NAME(OFFLOADS_QUERY_CMD),
	HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
	HCMD_NAME(MATCH_FOUND_NOTIFICATION),
	HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
	HCMD_NAME(WOWLAN_PATTERNS),
	HCMD_NAME(WOWLAN_CONFIGURATION),
	HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
	HCMD_NAME(WOWLAN_TKIP_PARAM),
	HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
	HCMD_NAME(WOWLAN_GET_STATUSES),
	HCMD_NAME(SCAN_ITERATION_COMPLETE),
	HCMD_NAME(D0I3_END_CMD),
	HCMD_NAME(LTR_CONFIG),
J
Johannes Berg 已提交
417
};
418

419 420 421 422 423
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
	HCMD_NAME(SHARED_MEM_CFG_CMD),
424
	HCMD_NAME(INIT_EXTENDED_CFG_CMD),
425
	HCMD_NAME(FW_ERROR_RECOVERY_CMD),
426 427
};

428 429 430 431
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
432
	HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
433
	HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
434 435
};

436 437 438 439 440
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
441
	HCMD_NAME(CTDP_CONFIG_CMD),
442
	HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
443
	HCMD_NAME(GEO_TX_POWER_LIMIT),
444
	HCMD_NAME(CT_KILL_NOTIFICATION),
445 446 447
	HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};

448 449 450 451
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
452
	HCMD_NAME(DQA_ENABLE_CMD),
453
	HCMD_NAME(UPDATE_MU_GROUPS_CMD),
454
	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
455
	HCMD_NAME(STA_HE_CTXT_CMD),
456
	HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
457
	HCMD_NAME(TLC_MNG_CONFIG_CMD),
458
	HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
459
	HCMD_NAME(STA_PM_NOTIF),
460
	HCMD_NAME(MU_GROUP_MGMT_NOTIF),
461
	HCMD_NAME(RX_QUEUES_NOTIFICATION),
462 463
};

464 465 466 467 468 469 470
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
	HCMD_NAME(MFU_ASSERT_DUMP_NTF),
};

J
Johannes Berg 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
	HCMD_NAME(TOF_RANGE_REQ_CMD),
	HCMD_NAME(TOF_CONFIG_CMD),
	HCMD_NAME(TOF_RANGE_ABORT_CMD),
	HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
	HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
	HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
	HCMD_NAME(TOF_LC_NOTIF),
	HCMD_NAME(TOF_RESPONDER_STATS),
	HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
	HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
};

487 488 489 490 491 492 493
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
	HCMD_NAME(STORED_BEACON_NTF),
};

494 495 496 497 498
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
	HCMD_NAME(NVM_ACCESS_COMPLETE),
499
	HCMD_NAME(NVM_GET_INFO),
500 501
};

502 503 504
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
505
	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
506
	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
507
	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
508
	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
J
Johannes Berg 已提交
509
	[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
510
	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
511 512
	[REGULATORY_AND_NVM_GROUP] =
		HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
513 514
};

J
Johannes Berg 已提交
515 516
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
517
#ifdef CONFIG_PM
518
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
519
#endif
J
Johannes Berg 已提交
520

521
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
522
{
523 524
	const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
	u64 dflt_pwr_limit;
525

526
	if (!backoff)
527 528
		return 0;

529
	dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
530

531 532 533 534 535
	while (backoff->pwr) {
		if (dflt_pwr_limit >= backoff->pwr)
			return backoff->backoff;

		backoff++;
536 537 538 539 540
	}

	return 0;
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
{
	struct iwl_mvm *mvm =
		container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
	struct ieee80211_vif *tx_blocked_vif;
	struct iwl_mvm_vif *mvmvif;

	mutex_lock(&mvm->mutex);

	tx_blocked_vif =
		rcu_dereference_protected(mvm->csa_tx_blocked_vif,
					  lockdep_is_held(&mvm->mutex));

	if (!tx_blocked_vif)
		goto unlock;

	mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
	iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
	RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
unlock:
	mutex_unlock(&mvm->mutex);
}

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
static int iwl_mvm_fwrt_dump_start(void *ctx)
{
	struct iwl_mvm *mvm = ctx;
	int ret;

	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
	if (ret)
		return ret;

	mutex_lock(&mvm->mutex);

	return 0;
}

static void iwl_mvm_fwrt_dump_end(void *ctx)
{
	struct iwl_mvm *mvm = ctx;

	mutex_unlock(&mvm->mutex);

	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
}

587 588 589 590 591
static bool iwl_mvm_fwrt_fw_running(void *ctx)
{
	return iwl_mvm_firmware_running(ctx);
}

592 593 594 595 596 597 598 599 600 601 602 603
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
	struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
	int ret;

	mutex_lock(&mvm->mutex);
	ret = iwl_mvm_send_cmd(mvm, host_cmd);
	mutex_unlock(&mvm->mutex);

	return ret;
}

604 605 606 607 608
static bool iwl_mvm_d3_debug_enable(void *ctx)
{
	return IWL_MVM_D3_DEBUG;
}

609 610 611
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
	.dump_start = iwl_mvm_fwrt_dump_start,
	.dump_end = iwl_mvm_fwrt_dump_end,
612
	.fw_running = iwl_mvm_fwrt_fw_running,
613
	.send_hcmd = iwl_mvm_fwrt_send_hcmd,
614
	.d3_debug_enable = iwl_mvm_d3_debug_enable,
615 616
};

J
Johannes Berg 已提交
617 618 619 620 621 622 623 624 625 626 627 628
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
		      const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
	struct ieee80211_hw *hw;
	struct iwl_op_mode *op_mode;
	struct iwl_mvm *mvm;
	struct iwl_trans_config trans_cfg = {};
	static const u8 no_reclaim_cmds[] = {
		TX_CMD,
	};
	int err, scan_size;
629
	u32 min_backoff;
630
	enum iwl_amsdu_size rb_size_default;
J
Johannes Berg 已提交
631

632 633 634 635 636 637 638
	/*
	 * We use IWL_MVM_STATION_COUNT to check the validity of the station
	 * index all over the driver - check that its value corresponds to the
	 * array size.
	 */
	BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);

J
Johannes Berg 已提交
639 640 641 642 643 644 645 646 647
	/********************************
	 * 1. Allocating and configuring HW data
	 ********************************/
	hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
				sizeof(struct iwl_mvm),
				&iwl_mvm_hw_ops);
	if (!hw)
		return NULL;

648 649
	if (cfg->max_rx_agg_size)
		hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
650 651
	else
		hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
652

653 654
	if (cfg->max_tx_agg_size)
		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
655 656
	else
		hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
657

J
Johannes Berg 已提交
658 659 660 661 662 663 664 665 666
	op_mode = hw->priv;

	mvm = IWL_OP_MODE_GET_MVM(op_mode);
	mvm->dev = trans->dev;
	mvm->trans = trans;
	mvm->cfg = cfg;
	mvm->fw = fw;
	mvm->hw = hw;

667 668
	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
			    dbgfs_dir);
669

670 671
	mvm->init_status = 0;

672 673
	if (iwl_mvm_has_new_rx_api(mvm)) {
		op_mode->ops = &iwl_mvm_ops_mq;
674 675 676 677 678
		trans->rx_mpdu_cmd_hdr_size =
			(trans->cfg->device_family >=
			 IWL_DEVICE_FAMILY_22560) ?
			sizeof(struct iwl_rx_mpdu_desc) :
			IWL_RX_DESC_SIZE_V1;
679 680
	} else {
		op_mode->ops = &iwl_mvm_ops;
681 682
		trans->rx_mpdu_cmd_hdr_size =
			sizeof(struct iwl_rx_mpdu_res_start);
683 684 685 686 687

		if (WARN_ON(trans->num_rx_queues > 1))
			goto out_free;
	}

688
	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
689

690
	mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
691
	mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
692 693 694
	mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
	mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;

695
	mvm->sf_state = SF_UNINIT;
696
	if (iwl_mvm_has_unified_ucode(mvm))
697
		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
698
	else
699
		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
700
	mvm->drop_bcn_ap_mode = true;
701

J
Johannes Berg 已提交
702
	mutex_init(&mvm->mutex);
703
	mutex_init(&mvm->d0i3_suspend_mutex);
J
Johannes Berg 已提交
704 705
	spin_lock_init(&mvm->async_handlers_lock);
	INIT_LIST_HEAD(&mvm->time_event_list);
706
	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
J
Johannes Berg 已提交
707 708
	INIT_LIST_HEAD(&mvm->async_handlers_list);
	spin_lock_init(&mvm->time_event_lock);
709
	INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
J
Johannes Berg 已提交
710 711 712

	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
713
#ifdef CONFIG_PM
714
	INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
715
#endif
716
	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
717
	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
718
	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
719
	INIT_LIST_HEAD(&mvm->add_stream_txqs);
J
Johannes Berg 已提交
720

721
	spin_lock_init(&mvm->d0i3_tx_lock);
722
	spin_lock_init(&mvm->refs_lock);
723 724
	skb_queue_head_init(&mvm->d0i3_tx);
	init_waitqueue_head(&mvm->d0i3_exit_waitq);
725
	init_waitqueue_head(&mvm->rx_sync_waitq);
726

727 728
	atomic_set(&mvm->queue_sync_counter, 0);

J
Johannes Berg 已提交
729 730
	SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);

731 732 733 734 735 736
	spin_lock_init(&mvm->tcm.lock);
	INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
	mvm->tcm.ts = jiffies;
	mvm->tcm.ll_ts = jiffies;
	mvm->tcm.uapsd_nonagg_ts = jiffies;

737 738
	INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);

J
Johannes Berg 已提交
739 740 741 742 743 744 745
	/*
	 * Populate the state variables that the transport layer needs
	 * to know about.
	 */
	trans_cfg.op_mode = op_mode;
	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
746 747 748 749 750 751

	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
		rb_size_default = IWL_AMSDU_2K;
	else
		rb_size_default = IWL_AMSDU_4K;

752
	switch (iwlwifi_mod_params.amsdu_size) {
753
	case IWL_AMSDU_DEF:
754 755
		trans_cfg.rx_buf_size = rb_size_default;
		break;
756 757 758 759 760 761 762 763 764 765 766 767
	case IWL_AMSDU_4K:
		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
		break;
	case IWL_AMSDU_8K:
		trans_cfg.rx_buf_size = IWL_AMSDU_8K;
		break;
	case IWL_AMSDU_12K:
		trans_cfg.rx_buf_size = IWL_AMSDU_12K;
		break;
	default:
		pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
		       iwlwifi_mod_params.amsdu_size);
768
		trans_cfg.rx_buf_size = rb_size_default;
769
	}
770

771 772 773
	BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) !=
		     LDBG_CFG_COMMAND_SIZE);

774
	trans->wide_cmd_header = true;
775 776
	trans_cfg.bc_table_dword =
		mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
J
Johannes Berg 已提交
777

778 779
	trans_cfg.command_groups = iwl_mvm_groups;
	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
J
Johannes Berg 已提交
780

781
	trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
782
	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
783
	trans_cfg.scd_set_active = true;
J
Johannes Berg 已提交
784

785 786 787
	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
					  driver_data[2]);

788
	trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
789

790 791
	/* Set a short watchdog for the command queue */
	trans_cfg.cmd_q_wdg_timeout =
792
		iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
793

J
Johannes Berg 已提交
794 795 796 797 798 799 800 801
	snprintf(mvm->hw->wiphy->fw_version,
		 sizeof(mvm->hw->wiphy->fw_version),
		 "%s", fw->fw_version);

	/* Configure transport layer */
	iwl_trans_configure(mvm->trans, &trans_cfg);

	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
802 803 804
	trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
	trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
	memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
805
	       sizeof(trans->dbg_conf_tlv));
806
	trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
J
Johannes Berg 已提交
807

808 809 810
	trans->iml = mvm->fw->iml;
	trans->iml_len = mvm->fw->iml_len;

J
Johannes Berg 已提交
811 812 813 814 815 816 817 818 819 820 821 822 823
	/* set up notification wait support */
	iwl_notification_wait_init(&mvm->notif_wait);

	/* Init phy db */
	mvm->phy_db = iwl_phy_db_init(trans);
	if (!mvm->phy_db) {
		IWL_ERR(mvm, "Cannot init phy_db\n");
		goto out_free;
	}

	IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
		 mvm->cfg->name, mvm->trans->hw_rev);

824
	if (iwlwifi_mod_params.nvm_file)
825
		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
826 827 828
	else
		IWL_DEBUG_EEPROM(mvm->trans->dev,
				 "working without external nvm file\n");
829

830 831
	err = iwl_trans_start_hw(mvm->trans);
	if (err)
832 833
		goto out_free;

834 835
	mutex_lock(&mvm->mutex);
	iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
836
	err = iwl_run_init_mvm_ucode(mvm, true);
837 838
	if (err)
		iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
839
	if (!iwlmvm_mod_params.init_dbg || !err)
840 841 842
		iwl_mvm_stop_device(mvm);
	iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
	mutex_unlock(&mvm->mutex);
843
	if (err < 0) {
844 845
		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
		goto out_free;
J
Johannes Berg 已提交
846 847
	}

848
	scan_size = iwl_mvm_scan_size(mvm);
849

J
Johannes Berg 已提交
850 851 852 853
	mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
	if (!mvm->scan_cmd)
		goto out_free;

854 855 856
	/* Set EBS as successful as long as not stated otherwise by the FW. */
	mvm->last_ebs_successful = true;

J
Johannes Berg 已提交
857 858 859
	err = iwl_mvm_mac_setup_register(mvm);
	if (err)
		goto out_free;
860
	mvm->hw_registered = true;
J
Johannes Berg 已提交
861

862
	min_backoff = iwl_mvm_min_backoff(mvm);
863 864
	iwl_mvm_thermal_initialize(mvm, min_backoff);

J
Johannes Berg 已提交
865 866 867 868
	err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
	if (err)
		goto out_unregister;

869 870 871 872 873
	if (!iwl_mvm_has_new_rx_stats_api(mvm))
		memset(&mvm->rx_stats_v3, 0,
		       sizeof(struct mvm_statistics_rx_v3));
	else
		memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
874

875 876 877 878
	/* The transport always starts with a taken reference, we can
	 * release it now if d0i3 is supported */
	if (iwl_mvm_is_d0i3_supported(mvm))
		iwl_trans_unref(mvm->trans);
879

880 881
	iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);

J
Johannes Berg 已提交
882 883 884
	return op_mode;

 out_unregister:
885 886 887
	if (iwlmvm_mod_params.init_dbg)
		return op_mode;

J
Johannes Berg 已提交
888
	ieee80211_unregister_hw(mvm->hw);
889
	mvm->hw_registered = false;
890
	iwl_mvm_leds_exit(mvm);
891
	iwl_mvm_thermal_exit(mvm);
J
Johannes Berg 已提交
892
 out_free:
893
	iwl_fw_flush_dump(&mvm->fwrt);
894
	iwl_fw_runtime_free(&mvm->fwrt);
895 896 897

	if (iwlmvm_mod_params.init_dbg)
		return op_mode;
J
Johannes Berg 已提交
898 899
	iwl_phy_db_free(mvm->phy_db);
	kfree(mvm->scan_cmd);
900 901
	iwl_trans_op_mode_leave(trans);

J
Johannes Berg 已提交
902 903 904 905 906 907 908 909 910
	ieee80211_free_hw(mvm->hw);
	return NULL;
}

static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	int i;

911 912 913 914 915 916 917
	/* If d0i3 is supported, we have released the reference that
	 * the transport started with, so we should take it back now
	 * that we are leaving.
	 */
	if (iwl_mvm_is_d0i3_supported(mvm))
		iwl_trans_ref(mvm->trans);

J
Johannes Berg 已提交
918 919
	iwl_mvm_leds_exit(mvm);

920
	iwl_mvm_thermal_exit(mvm);
921

922 923 924 925
	if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
		ieee80211_unregister_hw(mvm->hw);
		mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
	}
J
Johannes Berg 已提交
926 927

	kfree(mvm->scan_cmd);
928 929
	kfree(mvm->mcast_filter_cmd);
	mvm->mcast_filter_cmd = NULL;
J
Johannes Berg 已提交
930

931 932 933
	kfree(mvm->error_recovery_buf);
	mvm->error_recovery_buf = NULL;

934 935 936
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
	kfree(mvm->d3_resume_sram);
#endif
937
	iwl_trans_op_mode_leave(mvm->trans);
J
Johannes Berg 已提交
938 939 940 941

	iwl_phy_db_free(mvm->phy_db);
	mvm->phy_db = NULL;

942
	kfree(mvm->nvm_data);
943
	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
J
Johannes Berg 已提交
944 945
		kfree(mvm->nvm_sections[i].data);

946 947
	cancel_delayed_work_sync(&mvm->tcm.work);

948
	iwl_fw_runtime_free(&mvm->fwrt);
949 950 951
	mutex_destroy(&mvm->mutex);
	mutex_destroy(&mvm->d0i3_suspend_mutex);

J
Johannes Berg 已提交
952 953 954 955 956 957
	ieee80211_free_hw(mvm->hw);
}

struct iwl_async_handler_entry {
	struct list_head list;
	struct iwl_rx_cmd_buffer rxb;
958
	enum iwl_rx_handler_context context;
959
	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
J
Johannes Berg 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
};

void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
{
	struct iwl_async_handler_entry *entry, *tmp;

	spin_lock_bh(&mvm->async_handlers_lock);
	list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
		iwl_free_rxb(&entry->rxb);
		list_del(&entry->list);
		kfree(entry);
	}
	spin_unlock_bh(&mvm->async_handlers_lock);
}

static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm =
		container_of(wk, struct iwl_mvm, async_handlers_wk);
	struct iwl_async_handler_entry *entry, *tmp;
980
	LIST_HEAD(local_list);
J
Johannes Berg 已提交
981 982 983 984 985 986 987 988 989 990 991 992

	/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */

	/*
	 * Sync with Rx path with a lock. Remove all the entries from this list,
	 * add them to a local one (lock free), and then handle them.
	 */
	spin_lock_bh(&mvm->async_handlers_lock);
	list_splice_init(&mvm->async_handlers_list, &local_list);
	spin_unlock_bh(&mvm->async_handlers_lock);

	list_for_each_entry_safe(entry, tmp, &local_list, list) {
993 994
		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
			mutex_lock(&mvm->mutex);
995
		entry->fn(mvm, &entry->rxb);
J
Johannes Berg 已提交
996 997
		iwl_free_rxb(&entry->rxb);
		list_del(&entry->list);
998 999
		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
			mutex_unlock(&mvm->mutex);
J
Johannes Berg 已提交
1000 1001 1002 1003
		kfree(entry);
	}
}

1004 1005 1006 1007 1008 1009 1010
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
					    struct iwl_rx_packet *pkt)
{
	struct iwl_fw_dbg_trigger_tlv *trig;
	struct iwl_fw_dbg_trigger_cmd *cmds_trig;
	int i;

1011 1012 1013
	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
				     FW_DBG_TRIGGER_FW_NOTIF);
	if (!trig)
1014 1015 1016 1017 1018 1019 1020 1021 1022
		return;

	cmds_trig = (void *)trig->data;

	for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
		/* don't collect on CMD 0 */
		if (!cmds_trig->cmds[i].cmd_id)
			break;

1023 1024
		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
1025 1026
			continue;

1027 1028 1029
		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
					"CMD 0x%02x.%02x received",
					pkt->hdr.group_id, pkt->hdr.cmd);
1030 1031 1032 1033
		break;
	}
}

1034 1035 1036
static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
			      struct iwl_rx_cmd_buffer *rxb,
			      struct iwl_rx_packet *pkt)
J
Johannes Berg 已提交
1037
{
1038
	int i;
1039

1040 1041
	iwl_mvm_rx_check_trigger(mvm, pkt);

J
Johannes Berg 已提交
1042 1043 1044 1045 1046 1047 1048 1049 1050
	/*
	 * Do the notification wait before RX handlers so
	 * even if the RX handler consumes the RXB we have
	 * access to it in the notification wait entry.
	 */
	iwl_notification_wait_notify(&mvm->notif_wait, pkt);

	for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
1051 1052
		struct iwl_async_handler_entry *entry;

1053
		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
1054 1055
			continue;

1056
		if (rx_h->context == RX_HANDLER_SYNC) {
1057
			rx_h->fn(mvm, rxb);
1058
			return;
1059
		}
1060 1061 1062 1063

		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
		/* we can't do much... */
		if (!entry)
1064
			return;
1065 1066 1067 1068 1069

		entry->rxb._page = rxb_steal_page(rxb);
		entry->rxb._offset = rxb->_offset;
		entry->rxb._rx_page_order = rxb->_rx_page_order;
		entry->fn = rx_h->fn;
1070
		entry->context = rx_h->context;
1071 1072 1073 1074
		spin_lock(&mvm->async_handlers_lock);
		list_add_tail(&entry->list, &mvm->async_handlers_list);
		spin_unlock(&mvm->async_handlers_lock);
		schedule_work(&mvm->async_handlers_wk);
1075
		break;
J
Johannes Berg 已提交
1076 1077 1078
	}
}

1079 1080 1081 1082 1083 1084
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
		       struct napi_struct *napi,
		       struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1085
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1086

1087
	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1088
		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1089
	else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
	else
		iwl_mvm_rx_common(mvm, rxb, pkt);
}

static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
			  struct napi_struct *napi,
			  struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1101
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1102

1103
	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1104
		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1105 1106
	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
					 RX_QUEUES_NOTIFICATION)))
1107
		iwl_mvm_rx_queue_notif(mvm, rxb, 0);
1108
	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1109
		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1110 1111
	else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
		iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0);
1112 1113 1114 1115
	else
		iwl_mvm_rx_common(mvm, rxb, pkt);
}

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
			     const struct iwl_device_cmd *cmd)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

	/*
	 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
	 * commands that need to block the Tx queues.
	 */
	iwl_trans_block_txq_ptrs(mvm->trans, false);
}

1128 1129 1130 1131 1132 1133
static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
{
	return queue == mvm->aux_queue || queue == mvm->probe_queue ||
		queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
}

1134 1135
static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
				       int hw_queue, bool start)
J
Johannes Berg 已提交
1136 1137
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1138 1139 1140 1141 1142 1143 1144
	struct ieee80211_sta *sta;
	struct ieee80211_txq *txq;
	struct iwl_mvm_txq *mvmtxq;
	int i;
	unsigned long tid_bitmap;
	struct iwl_mvm_sta *mvmsta;
	u8 sta_id;
1145

1146 1147 1148
	sta_id = iwl_mvm_has_new_tx_api(mvm) ?
		mvm->tvqm_info[hw_queue].sta_id :
		mvm->queue_info[hw_queue].ra_sta_id;
1149

1150
	if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
J
Johannes Berg 已提交
1151 1152
		return;

1153 1154 1155 1156 1157 1158 1159
	rcu_read_lock();

	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
	if (IS_ERR_OR_NULL(sta))
		goto out;
	mvmsta = iwl_mvm_sta_from_mac80211(sta);

1160 1161 1162 1163 1164 1165 1166 1167 1168
	if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
		if (!start)
			ieee80211_stop_queues(mvm->hw);
		else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
			ieee80211_wake_queues(mvm->hw);

		goto out;
	}

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	if (iwl_mvm_has_new_tx_api(mvm)) {
		int tid = mvm->tvqm_info[hw_queue].txq_tid;

		tid_bitmap = BIT(tid);
	} else {
		tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
	}

	for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
		int tid = i;

		if (tid == IWL_MAX_TID_COUNT)
			tid = IEEE80211_NUM_TIDS;
1182

1183 1184 1185 1186 1187 1188
		txq = sta->txq[tid];
		mvmtxq = iwl_mvm_txq_from_mac80211(txq);
		mvmtxq->stopped = !start;

		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1189
	}
1190 1191 1192

out:
	rcu_read_unlock();
J
Johannes Berg 已提交
1193 1194
}

1195
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1196
{
1197 1198
	iwl_mvm_queue_state_change(op_mode, hw_queue, false);
}
1199

1200 1201 1202
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
	iwl_mvm_queue_state_change(op_mode, hw_queue, true);
1203 1204
}

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
	bool state = iwl_mvm_is_radio_killed(mvm);

	if (state)
		wake_up(&mvm->rx_sync_waitq);

	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
}

1215 1216 1217 1218 1219 1220 1221
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
{
	if (state)
		set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
	else
		clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);

1222
	iwl_mvm_set_rfkill_state(mvm);
1223 1224
}

1225
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
J
Johannes Berg 已提交
1226 1227
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1228
	bool calibrating = READ_ONCE(mvm->calibrating);
J
Johannes Berg 已提交
1229 1230 1231 1232 1233 1234

	if (state)
		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
	else
		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);

1235
	iwl_mvm_set_rfkill_state(mvm);
1236

1237 1238 1239 1240 1241 1242 1243 1244
	/* iwl_run_init_mvm_ucode is waiting for results, abort it */
	if (calibrating)
		iwl_abort_notification_waits(&mvm->notif_wait);

	/*
	 * Stop the device if we run OPERATIONAL firmware or if we are in the
	 * middle of the calibrations.
	 */
1245
	return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
J
Johannes Berg 已提交
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
}

static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	struct ieee80211_tx_info *info;

	info = IEEE80211_SKB_CB(skb);
	iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
	ieee80211_free_txskb(mvm->hw, skb);
}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
struct iwl_mvm_reprobe {
	struct device *dev;
	struct work_struct work;
};

static void iwl_mvm_reprobe_wk(struct work_struct *wk)
{
	struct iwl_mvm_reprobe *reprobe;

	reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
	if (device_reprobe(reprobe->dev))
		dev_err(reprobe->dev, "reprobe failed!\n");
	kfree(reprobe);
	module_put(THIS_MODULE);
}

1274
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
J
Johannes Berg 已提交
1275 1276 1277
{
	iwl_abort_notification_waits(&mvm->notif_wait);

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	/*
	 * This is a bit racy, but worst case we tell mac80211 about
	 * a stopped/aborted scan when that was already done which
	 * is not a problem. It is necessary to abort any os scan
	 * here because mac80211 requires having the scan cleared
	 * before restarting.
	 * We'll reset the scan_status to NONE in restart cleanup in
	 * the next start() call from mac80211. If restart isn't called
	 * (no fw restart) scan status will stay busy.
	 */
1288
	iwl_mvm_report_scan_aborted(mvm);
1289

J
Johannes Berg 已提交
1290 1291 1292 1293 1294 1295
	/*
	 * If we're restarting already, don't cycle restarts.
	 * If INIT fw asserted, it will likely fail again.
	 * If WoWLAN fw asserted, don't restart either, mac80211
	 * can't recover this since we're already half suspended.
	 */
1296
	if (!mvm->fw_restart && fw_error) {
1297
		iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
1298
					false, 0);
1299
	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
		struct iwl_mvm_reprobe *reprobe;

		IWL_ERR(mvm,
			"Firmware error during reconfiguration - reprobe!\n");

		/*
		 * get a module reference to avoid doing this while unloading
		 * anyway and to avoid scheduling a work with code that's
		 * being removed.
		 */
		if (!try_module_get(THIS_MODULE)) {
			IWL_ERR(mvm, "Module is being unloaded - abort\n");
			return;
		}

		reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
		if (!reprobe) {
			module_put(THIS_MODULE);
			return;
		}
		reprobe->dev = mvm->trans->dev;
		INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
		schedule_work(&reprobe->work);
1323 1324 1325
	} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
			    &mvm->status)) {
		IWL_ERR(mvm, "HW restart already requested, but not started\n");
1326
	} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
1327 1328
		   mvm->hw_registered &&
		   !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1329 1330 1331
		/* don't let the transport/FW power down */
		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
		if (mvm->fw->ucode_capa.error_log_size) {
			u32 src_size = mvm->fw->ucode_capa.error_log_size;
			u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
			u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC);

			if (recover_buf) {
				mvm->error_recovery_buf = recover_buf;
				iwl_trans_read_mem_bytes(mvm->trans,
							 src_addr,
							 recover_buf,
							 src_size);
			}
		}

1346 1347
		if (fw_error && mvm->fw_restart > 0)
			mvm->fw_restart--;
1348
		set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
J
Johannes Berg 已提交
1349 1350 1351 1352
		ieee80211_restart_hw(mvm->hw);
	}
}

1353 1354 1355 1356
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

1357 1358
	if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
		iwl_mvm_dump_nic_error_log(mvm);
1359

1360
	iwl_mvm_nic_restart(mvm, true);
1361 1362
}

J
Johannes Berg 已提交
1363 1364
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
{
1365 1366
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

J
Johannes Berg 已提交
1367
	WARN_ON(1);
1368
	iwl_mvm_nic_restart(mvm, true);
J
Johannes Berg 已提交
1369 1370
}

1371
#ifdef CONFIG_PM
1372 1373
struct iwl_d0i3_iter_data {
	struct iwl_mvm *mvm;
1374
	struct ieee80211_vif *connected_vif;
1375 1376
	u8 ap_sta_id;
	u8 vif_count;
1377 1378
	u8 offloading_tid;
	bool disable_offloading;
1379 1380
};

1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
					struct ieee80211_vif *vif,
					struct iwl_d0i3_iter_data *iter_data)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	struct iwl_mvm_sta *mvmsta;
	u32 available_tids = 0;
	u8 tid;

	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
1391
		    mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
1392 1393
		return false;

1394 1395
	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
	if (!mvmsta)
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
		return false;

	spin_lock_bh(&mvmsta->lock);
	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];

		/*
		 * in case of pending tx packets, don't use this tid
		 * for offloading in order to prevent reuse of the same
		 * qos seq counters.
		 */
1407
		if (iwl_mvm_tid_queued(mvm, tid_data))
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
			continue;

		if (tid_data->state != IWL_AGG_OFF)
			continue;

		available_tids |= BIT(tid);
	}
	spin_unlock_bh(&mvmsta->lock);

	/*
	 * disallow protocol offloading if we have no available tid
	 * (with no pending frames and no active aggregation,
	 * as we don't handle "holes" properly - the scheduler needs the
	 * frame's seq number and TFD index to match)
	 */
	if (!available_tids)
		return true;

	/* for simplicity, just use the first available tid */
	iter_data->offloading_tid = ffs(available_tids) - 1;
	return false;
}

1431 1432 1433
static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
					struct ieee80211_vif *vif)
{
1434 1435 1436
	struct iwl_d0i3_iter_data *data = _data;
	struct iwl_mvm *mvm = data->mvm;
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1437 1438 1439 1440 1441 1442 1443
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;

	IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
	if (vif->type != NL80211_IFTYPE_STATION ||
	    !vif->bss_conf.assoc)
		return;

1444 1445 1446 1447 1448 1449 1450 1451
	/*
	 * in case of pending tx packets or active aggregations,
	 * avoid offloading features in order to prevent reuse of
	 * the same qos seq counters.
	 */
	if (iwl_mvm_disallow_offloading(mvm, vif, data))
		data->disable_offloading = true;

1452
	iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
1453 1454
	iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
				   false, flags);
1455 1456 1457 1458 1459 1460 1461

	/*
	 * on init/association, mvm already configures POWER_TABLE_CMD
	 * and REPLY_MCAST_FILTER_CMD, so currently don't
	 * reconfigure them (we might want to use different
	 * params later on, though).
	 */
1462 1463
	data->ap_sta_id = mvmvif->ap_sta_id;
	data->vif_count++;
1464 1465 1466 1467 1468 1469

	/*
	 * no new commands can be sent at this stage, so it's safe
	 * to save the vif pointer during d0i3 entrance.
	 */
	data->connected_vif = vif;
1470 1471
}

1472
static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
1473
				    struct iwl_wowlan_config_cmd *cmd,
1474 1475 1476 1477 1478
				    struct iwl_d0i3_iter_data *iter_data)
{
	struct ieee80211_sta *ap_sta;
	struct iwl_mvm_sta *mvm_ap_sta;

1479
	if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
1480 1481 1482 1483 1484 1485 1486 1487 1488
		return;

	rcu_read_lock();

	ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
	if (IS_ERR_OR_NULL(ap_sta))
		goto out;

	mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1489
	cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
1490
	cmd->offloading_tid = iter_data->offloading_tid;
S
Sara Sharon 已提交
1491
	cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
1492
		ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
1493 1494 1495 1496
	/*
	 * The d0i3 uCode takes care of the nonqos counters,
	 * so configure only the qos seq ones.
	 */
1497
	iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
1498 1499 1500
out:
	rcu_read_unlock();
}
1501 1502

int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
E
Eliad Peller 已提交
1503 1504
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1505
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1506
	int ret;
1507 1508 1509
	struct iwl_d0i3_iter_data d0i3_iter_data = {
		.mvm = mvm,
	};
1510 1511 1512
	struct iwl_wowlan_config_cmd wowlan_config_cmd = {
		.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
					     IWL_WOWLAN_WAKEUP_BEACON_MISS |
1513
					     IWL_WOWLAN_WAKEUP_LINK_CHANGE),
1514
	};
1515 1516
	struct iwl_d3_manager_config d3_cfg_cmd = {
		.min_sleep_time = cpu_to_le32(1000),
1517
		.wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
1518
	};
E
Eliad Peller 已提交
1519 1520

	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
1521

1522
	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
1523 1524
		return -EINVAL;

1525 1526
	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);

1527 1528 1529 1530 1531 1532 1533 1534 1535
	/*
	 * iwl_mvm_ref_sync takes a reference before checking the flag.
	 * so by checking there is no held reference we prevent a state
	 * in which iwl_mvm_ref_sync continues successfully while we
	 * configure the firmware to enter d0i3
	 */
	if (iwl_mvm_ref_taken(mvm)) {
		IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
		clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1536
		wake_up(&mvm->d0i3_exit_waitq);
1537 1538 1539
		return 1;
	}

1540 1541 1542
	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
						   IEEE80211_IFACE_ITER_NORMAL,
						   iwl_mvm_enter_d0i3_iterator,
1543 1544 1545
						   &d0i3_iter_data);
	if (d0i3_iter_data.vif_count == 1) {
		mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
1546
		mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
1547 1548
	} else {
		WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1549
		mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1550
		mvm->d0i3_offloading = false;
1551
	}
1552

1553
	iwl_mvm_pause_tcm(mvm, true);
1554 1555 1556
	/* make sure we have no running tx while configuring the seqno */
	synchronize_net();

1557
	/* Flush the hw queues, in case something got queued during entry */
1558 1559 1560 1561 1562 1563 1564 1565 1566
	/* TODO new tx api */
	if (iwl_mvm_has_new_tx_api(mvm)) {
		WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
	} else {
		ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
					    flags);
		if (ret)
			return ret;
	}
1567

1568
	/* configure wowlan configuration only if needed */
1569
	if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
1570 1571 1572 1573 1574 1575
		/* wake on beacons only if beacon storing isn't supported */
		if (!fw_has_capa(&mvm->fw->ucode_capa,
				 IWL_UCODE_TLV_CAPA_BEACON_STORING))
			wowlan_config_cmd.wakeup_filter |=
				cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);

1576 1577 1578 1579
		iwl_mvm_wowlan_config_key_params(mvm,
						 d0i3_iter_data.connected_vif,
						 true, flags);

1580 1581 1582 1583 1584 1585 1586 1587 1588
		iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
					&d0i3_iter_data);

		ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
					   sizeof(wowlan_config_cmd),
					   &wowlan_config_cmd);
		if (ret)
			return ret;
	}
1589

1590 1591 1592
	return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
				    flags | CMD_MAKE_TRANS_IDLE,
				    sizeof(d3_cfg_cmd), &d3_cfg_cmd);
E
Eliad Peller 已提交
1593 1594
}

1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
				       struct ieee80211_vif *vif)
{
	struct iwl_mvm *mvm = _data;
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;

	IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
	if (vif->type != NL80211_IFTYPE_STATION ||
	    !vif->bss_conf.assoc)
		return;

	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
}

1609
struct iwl_mvm_d0i3_exit_work_iter_data {
1610
	struct iwl_mvm *mvm;
1611
	struct iwl_wowlan_status *status;
1612 1613 1614
	u32 wakeup_reasons;
};

1615 1616
static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
					struct ieee80211_vif *vif)
1617
{
1618
	struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
1619
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1620
	u32 reasons = data->wakeup_reasons;
1621

1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
	/* consider only the relevant station interface */
	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
	    data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
		return;

	if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
		iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
	else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
		ieee80211_beacon_loss(vif);
	else
		iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
1633 1634
}

1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
{
	struct ieee80211_sta *sta = NULL;
	struct iwl_mvm_sta *mvm_ap_sta;
	int i;
	bool wake_queues = false;

	lockdep_assert_held(&mvm->mutex);

	spin_lock_bh(&mvm->d0i3_tx_lock);

1646
	if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
		goto out;

	IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");

	/* get the sta in order to update seq numbers and re-enqueue skbs */
	sta = rcu_dereference_protected(
			mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
			lockdep_is_held(&mvm->mutex));

	if (IS_ERR_OR_NULL(sta)) {
		sta = NULL;
		goto out;
	}

	if (mvm->d0i3_offloading && qos_seq) {
		/* update qos seq numbers if offloading was enabled */
1663
		mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
		for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
			u16 seq = le16_to_cpu(qos_seq[i]);
			/* firmware stores last-used one, we store next one */
			seq += 0x10;
			mvm_ap_sta->tid_data[i].seq_number = seq;
		}
	}
out:
	/* re-enqueue (or drop) all packets */
	while (!skb_queue_empty(&mvm->d0i3_tx)) {
		struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);

		if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
			ieee80211_free_txskb(mvm->hw, skb);

		/* if the skb_queue is not empty, we need to wake queues */
		wake_queues = true;
	}
	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
	wake_up(&mvm->d0i3_exit_waitq);
1684
	mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1685 1686 1687 1688 1689 1690
	if (wake_queues)
		ieee80211_wake_queues(mvm->hw);

	spin_unlock_bh(&mvm->d0i3_tx_lock);
}

1691 1692 1693
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1694 1695 1696 1697
	struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
		.mvm = mvm,
	};

1698
	struct iwl_wowlan_status *status;
1699
	u32 wakeup_reasons = 0;
1700
	__le16 *qos_seq = NULL;
1701 1702

	mutex_lock(&mvm->mutex);
1703 1704 1705 1706 1707

	status = iwl_mvm_send_wowlan_get_status(mvm);
	if (IS_ERR_OR_NULL(status)) {
		/* set to NULL so we don't need to check before kfree'ing */
		status = NULL;
1708
		goto out;
1709
	}
1710 1711

	wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
1712
	qos_seq = status->qos_seq_ctr;
1713 1714 1715

	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);

1716 1717 1718 1719 1720 1721
	iter_data.wakeup_reasons = wakeup_reasons;
	iter_data.status = status;
	ieee80211_iterate_active_interfaces(mvm->hw,
					    IEEE80211_IFACE_ITER_NORMAL,
					    iwl_mvm_d0i3_exit_work_iter,
					    &iter_data);
1722
out:
1723
	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1724

1725 1726 1727
	IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
		       wakeup_reasons);

1728
	/* qos_seq might point inside resp_pkt, so free it only now */
1729
	kfree(status);
1730

1731 1732 1733
	/* the FW might have updated the regdomain */
	iwl_mvm_update_changed_regdom(mvm);

1734
	iwl_mvm_resume_tcm(mvm);
1735
	iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1736 1737 1738
	mutex_unlock(&mvm->mutex);
}

1739
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
E
Eliad Peller 已提交
1740
{
1741 1742
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
		    CMD_WAKE_UP_TRANS;
1743
	int ret;
E
Eliad Peller 已提交
1744 1745

	IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1746

1747
	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
1748 1749
		return -EINVAL;

1750 1751 1752 1753 1754 1755 1756 1757 1758
	mutex_lock(&mvm->d0i3_suspend_mutex);
	if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
		IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
		__set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
		mutex_unlock(&mvm->d0i3_suspend_mutex);
		return 0;
	}
	mutex_unlock(&mvm->d0i3_suspend_mutex);

1759 1760
	ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
	if (ret)
1761
		goto out;
1762 1763 1764 1765 1766

	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
						   IEEE80211_IFACE_ITER_NORMAL,
						   iwl_mvm_exit_d0i3_iterator,
						   mvm);
1767 1768 1769
out:
	schedule_work(&mvm->d0i3_exit_work);
	return ret;
E
Eliad Peller 已提交
1770 1771
}

1772
int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1773 1774 1775 1776 1777 1778 1779
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

	iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
	return _iwl_mvm_exit_d0i3(mvm);
}

1780 1781 1782 1783 1784 1785 1786
#define IWL_MVM_D0I3_OPS					\
	.enter_d0i3 = iwl_mvm_enter_d0i3,			\
	.exit_d0i3 = iwl_mvm_exit_d0i3,
#else /* CONFIG_PM */
#define IWL_MVM_D0I3_OPS
#endif /* CONFIG_PM */

1787 1788
#define IWL_MVM_COMMON_OPS					\
	/* these could be differentiated */			\
1789
	.async_cb = iwl_mvm_async_cb,				\
1790 1791 1792 1793 1794 1795 1796
	.queue_full = iwl_mvm_stop_sw_queue,			\
	.queue_not_full = iwl_mvm_wake_sw_queue,		\
	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
	.free_skb = iwl_mvm_free_skb,				\
	.nic_error = iwl_mvm_nic_error,				\
	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
	.nic_config = iwl_mvm_nic_config,			\
1797
	IWL_MVM_D0I3_OPS					\
1798 1799 1800 1801
	/* as we only register one, these MUST be common! */	\
	.start = iwl_op_mode_mvm_start,				\
	.stop = iwl_op_mode_mvm_stop

J
Johannes Berg 已提交
1802
static const struct iwl_op_mode_ops iwl_mvm_ops = {
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
	IWL_MVM_COMMON_OPS,
	.rx = iwl_mvm_rx,
};

static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
			      struct napi_struct *napi,
			      struct iwl_rx_cmd_buffer *rxb,
			      unsigned int queue)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1813
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1814
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1815

1816
	if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
1817
		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
1818 1819
	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
					 RX_QUEUES_NOTIFICATION)))
1820
		iwl_mvm_rx_queue_notif(mvm, rxb, queue);
1821
	else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1822
		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1823 1824 1825 1826 1827 1828
}

static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
	IWL_MVM_COMMON_OPS,
	.rx = iwl_mvm_rx_mq,
	.rx_rss = iwl_mvm_rx_mq_rss,
J
Johannes Berg 已提交
1829
};