ops.c 50.2 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
 * Copyright(c) 2018        Intel Corporation
J
Johannes Berg 已提交
12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * The full GNU General Public License is included in this distribution
23
 * in the file called COPYING.
J
Johannes Berg 已提交
24 25
 *
 * Contact Information:
26
 *  Intel Linux Wireless <linuxwifi@intel.com>
J
Johannes Berg 已提交
27 28 29 30
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
31
 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
 * Copyright(c) 2018        Intel Corporation
J
Johannes Berg 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
#include <linux/module.h>
65
#include <linux/vmalloc.h>
J
Johannes Berg 已提交
66 67
#include <net/mac80211.h>

68
#include "fw/notif-wait.h"
J
Johannes Berg 已提交
69 70
#include "iwl-trans.h"
#include "iwl-op-mode.h"
71
#include "fw/img.h"
J
Johannes Berg 已提交
72 73 74 75 76 77 78 79 80 81
#include "iwl-debug.h"
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-phy-db.h"
#include "iwl-eeprom-parse.h"
#include "iwl-csr.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "rs.h"
J
Johannes Berg 已提交
82
#include "fw/api/scan.h"
J
Johannes Berg 已提交
83
#include "time-event.h"
84
#include "fw-api.h"
J
Johannes Berg 已提交
85
#include "fw/api/scan.h"
86
#include "fw/acpi.h"
J
Johannes Berg 已提交
87 88 89 90 91 92 93

#define DRV_DESCRIPTION	"The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");

static const struct iwl_op_mode_ops iwl_mvm_ops;
94
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
J
Johannes Berg 已提交
95 96 97

struct iwl_mvm_mod_params iwlmvm_mod_params = {
	.power_scheme = IWL_POWER_SCHEME_BPS,
98
	.tfd_q_hang_detect = true
J
Johannes Berg 已提交
99 100 101
	/* rest of fields are 0 by default */
};

102
module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
J
Johannes Berg 已提交
103 104
MODULE_PARM_DESC(init_dbg,
		 "set to true to debug an ASSERT in INIT fw (default: false");
105
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
J
Johannes Berg 已提交
106 107
MODULE_PARM_DESC(power_scheme,
		 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
108
module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
109
		   bool, 0444);
110 111
MODULE_PARM_DESC(tfd_q_hang_detect,
		 "TFD queues hang detection (default: true");
J
Johannes Berg 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126

/*
 * module init and exit functions
 */
static int __init iwl_mvm_init(void)
{
	int ret;

	ret = iwl_mvm_rate_control_register();
	if (ret) {
		pr_err("Unable to register rate control algorithm: %d\n", ret);
		return ret;
	}

	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
127
	if (ret)
J
Johannes Berg 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
		pr_err("Unable to register MVM op_mode: %d\n", ret);

	return ret;
}
module_init(iwl_mvm_init);

static void __exit iwl_mvm_exit(void)
{
	iwl_opmode_deregister("iwlmvm");
	iwl_mvm_rate_control_unregister();
}
module_exit(iwl_mvm_exit);

static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
	u32 reg_val = 0;
146 147 148 149 150 151 152 153
	u32 phy_config = iwl_mvm_get_phy_config(mvm);

	radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
			 FW_PHY_CFG_RADIO_TYPE_POS;
	radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
			 FW_PHY_CFG_RADIO_STEP_POS;
	radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
			 FW_PHY_CFG_RADIO_DASH_POS;
J
Johannes Berg 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

	/* SKU control */
	reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
				CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
	reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
				CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;

	/* radio configuration */
	reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
	reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
	reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;

	WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
		 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);

169
	/*
170 171 172 173 174 175
	 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
	 * sampling, and shouldn't be set to any non-zero value.
	 * The same is supposed to be true of the other HW, but unsetting
	 * them (such as the 7260) causes automatic tests to fail on seemingly
	 * unrelated errors. Need to further investigate this, but for now
	 * we'll separate cases.
176
	 */
177
	if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
178
		reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
J
Johannes Berg 已提交
179

180 181 182
	if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
		reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;

183 184 185 186 187 188 189
	iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
				CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
				CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
				CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
190 191
				CSR_HW_IF_CONFIG_REG_BIT_MAC_SI   |
				CSR_HW_IF_CONFIG_REG_D3_DEBUG,
192
				reg_val);
J
Johannes Berg 已提交
193 194 195 196 197 198 199 200 201

	IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
		       radio_cfg_step, radio_cfg_dash);

	/*
	 * W/A : NIC is stuck in a reset state after Early PCIe power off
	 * (PCIe power is lost before PERST# is asserted), causing ME FW
	 * to lose ownership and not being able to obtain it back.
	 */
202
	if (!mvm->trans->cfg->apmg_not_supported)
203 204 205
		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
J
Johannes Berg 已提交
206 207
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/**
 * enum iwl_rx_handler_context context for Rx handler
 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
 *	which can't acquire mvm->mutex.
 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
 *	(and only in this case!), it should be set as ASYNC. In that case,
 *	it will be called from a worker with mvm->mutex held.
 * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
 *	mutex itself, it will be called from a worker without mvm->mutex held.
 */
enum iwl_rx_handler_context {
	RX_HANDLER_SYNC,
	RX_HANDLER_ASYNC_LOCKED,
	RX_HANDLER_ASYNC_UNLOCKED,
};

/**
 * struct iwl_rx_handlers handler for FW notification
 * @cmd_id: command id
 * @context: see &iwl_rx_handler_context
 * @fn: the function is called when notification is received
 */
J
Johannes Berg 已提交
230
struct iwl_rx_handlers {
231
	u16 cmd_id;
232
	enum iwl_rx_handler_context context;
233
	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
J
Johannes Berg 已提交
234 235
};

236 237 238 239
#define RX_HANDLER(_cmd_id, _fn, _context)	\
	{ .cmd_id = _cmd_id, .fn = _fn, .context = _context }
#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)	\
	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
J
Johannes Berg 已提交
240 241 242 243 244 245

/*
 * Handlers for fw notifications
 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
 * This list should be in order of frequency for performance purposes.
 *
246
 * The handler can be one from three contexts, see &iwl_rx_handler_context
J
Johannes Berg 已提交
247 248
 */
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
249 250 251
	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),

252 253 254
	RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
		       iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),

255 256 257 258 259 260
	RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
		   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
		   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
		   RX_HANDLER_ASYNC_LOCKED),
261

262
	RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
263
		   iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
264

265 266 267 268
	RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
		   RX_HANDLER_SYNC),
	RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
		   RX_HANDLER_ASYNC_LOCKED),
269

270
	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
271

272
	RX_HANDLER(SCAN_ITERATION_COMPLETE,
273
		   iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
274
	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
275 276
		   iwl_mvm_rx_lmac_scan_complete_notif,
		   RX_HANDLER_ASYNC_LOCKED),
277
	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
278
		   RX_HANDLER_SYNC),
279
	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
280
		   RX_HANDLER_ASYNC_LOCKED),
281
	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
282
		   iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
283

284 285
	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
		   RX_HANDLER_SYNC),
J
Johannes Berg 已提交
286

287
	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
288
		   RX_HANDLER_SYNC),
289

290
	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
291
	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
292 293 294
		   iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
		   RX_HANDLER_ASYNC_LOCKED),
295
	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
296
		       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
297
	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
298
		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
299

300
	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
301 302 303 304 305
		   RX_HANDLER_ASYNC_LOCKED),
	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
		   RX_HANDLER_SYNC),
	RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
		   RX_HANDLER_ASYNC_LOCKED),
306 307
	RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
		       iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
308
	RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
309
		       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
310
	RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
311
		       iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
312 313
	RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
		       iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
J
Johannes Berg 已提交
314 315
};
#undef RX_HANDLER
316
#undef RX_HANDLER_GRP
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
	HCMD_NAME(MVM_ALIVE),
	HCMD_NAME(REPLY_ERROR),
	HCMD_NAME(ECHO_CMD),
	HCMD_NAME(INIT_COMPLETE_NOTIF),
	HCMD_NAME(PHY_CONTEXT_CMD),
	HCMD_NAME(DBG_CFG),
	HCMD_NAME(SCAN_CFG_CMD),
	HCMD_NAME(SCAN_REQ_UMAC),
	HCMD_NAME(SCAN_ABORT_UMAC),
	HCMD_NAME(SCAN_COMPLETE_UMAC),
	HCMD_NAME(TOF_CMD),
	HCMD_NAME(TOF_NOTIFICATION),
334
	HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	HCMD_NAME(ADD_STA_KEY),
	HCMD_NAME(ADD_STA),
	HCMD_NAME(REMOVE_STA),
	HCMD_NAME(FW_GET_ITEM_CMD),
	HCMD_NAME(TX_CMD),
	HCMD_NAME(SCD_QUEUE_CFG),
	HCMD_NAME(TXPATH_FLUSH),
	HCMD_NAME(MGMT_MCAST_KEY),
	HCMD_NAME(WEP_KEY),
	HCMD_NAME(SHARED_MEM_CFG),
	HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
	HCMD_NAME(MAC_CONTEXT_CMD),
	HCMD_NAME(TIME_EVENT_CMD),
	HCMD_NAME(TIME_EVENT_NOTIFICATION),
	HCMD_NAME(BINDING_CONTEXT_CMD),
	HCMD_NAME(TIME_QUOTA_CMD),
	HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
352
	HCMD_NAME(LEDS_CMD),
353 354 355 356 357 358 359 360 361 362
	HCMD_NAME(LQ_CMD),
	HCMD_NAME(FW_PAGING_BLOCK_CMD),
	HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
	HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
	HCMD_NAME(HOT_SPOT_CMD),
	HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
	HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
	HCMD_NAME(BT_COEX_CI),
	HCMD_NAME(PHY_CONFIGURATION_CMD),
	HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
363
	HCMD_NAME(PHY_DB_CMD),
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
	HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
	HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
	HCMD_NAME(POWER_TABLE_CMD),
	HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
	HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
	HCMD_NAME(DC2DC_CONFIG_CMD),
	HCMD_NAME(NVM_ACCESS_CMD),
	HCMD_NAME(BEACON_NOTIFICATION),
	HCMD_NAME(BEACON_TEMPLATE_CMD),
	HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
	HCMD_NAME(BT_CONFIG),
	HCMD_NAME(STATISTICS_CMD),
	HCMD_NAME(STATISTICS_NOTIFICATION),
	HCMD_NAME(EOSP_NOTIFICATION),
	HCMD_NAME(REDUCE_TX_POWER_CMD),
	HCMD_NAME(CARD_STATE_NOTIFICATION),
	HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
	HCMD_NAME(TDLS_CONFIG_CMD),
	HCMD_NAME(MAC_PM_POWER_TABLE),
	HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
	HCMD_NAME(MFUART_LOAD_NOTIFICATION),
385
	HCMD_NAME(RSS_CONFIG_CMD),
386 387 388
	HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
	HCMD_NAME(REPLY_RX_PHY_CMD),
	HCMD_NAME(REPLY_RX_MPDU_CMD),
389
	HCMD_NAME(FRAME_RELEASE),
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
	HCMD_NAME(BA_NOTIF),
	HCMD_NAME(MCC_UPDATE_CMD),
	HCMD_NAME(MCC_CHUB_UPDATE_CMD),
	HCMD_NAME(MARKER_CMD),
	HCMD_NAME(BT_PROFILE_NOTIFICATION),
	HCMD_NAME(BCAST_FILTER_CMD),
	HCMD_NAME(MCAST_FILTER_CMD),
	HCMD_NAME(REPLY_SF_CFG_CMD),
	HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
	HCMD_NAME(D3_CONFIG_CMD),
	HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
	HCMD_NAME(OFFLOADS_QUERY_CMD),
	HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
	HCMD_NAME(MATCH_FOUND_NOTIFICATION),
	HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
	HCMD_NAME(WOWLAN_PATTERNS),
	HCMD_NAME(WOWLAN_CONFIGURATION),
	HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
	HCMD_NAME(WOWLAN_TKIP_PARAM),
	HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
	HCMD_NAME(WOWLAN_GET_STATUSES),
	HCMD_NAME(SCAN_ITERATION_COMPLETE),
	HCMD_NAME(D0I3_END_CMD),
	HCMD_NAME(LTR_CONFIG),
J
Johannes Berg 已提交
414
};
415

416 417 418 419 420
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
	HCMD_NAME(SHARED_MEM_CFG_CMD),
421
	HCMD_NAME(INIT_EXTENDED_CFG_CMD),
422 423
};

424 425 426 427
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
428
	HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
429 430
};

431 432 433 434 435
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
436
	HCMD_NAME(CTDP_CONFIG_CMD),
437
	HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
438
	HCMD_NAME(GEO_TX_POWER_LIMIT),
439
	HCMD_NAME(CT_KILL_NOTIFICATION),
440 441 442
	HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};

443 444 445 446
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
447
	HCMD_NAME(DQA_ENABLE_CMD),
448
	HCMD_NAME(UPDATE_MU_GROUPS_CMD),
449
	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
450
	HCMD_NAME(STA_HE_CTXT_CMD),
451
	HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
452
	HCMD_NAME(STA_PM_NOTIF),
453
	HCMD_NAME(MU_GROUP_MGMT_NOTIF),
454
	HCMD_NAME(RX_QUEUES_NOTIFICATION),
455 456
};

457 458 459 460 461 462 463
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
	HCMD_NAME(MFU_ASSERT_DUMP_NTF),
};

464 465 466 467 468 469 470
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
	HCMD_NAME(STORED_BEACON_NTF),
};

471 472 473 474 475
/* Please keep this array *SORTED* by hex value.
 * Access is done through binary search
 */
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
	HCMD_NAME(NVM_ACCESS_COMPLETE),
476
	HCMD_NAME(NVM_GET_INFO),
477 478
};

479 480 481
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
482
	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
483
	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
484
	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
485
	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
486
	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
487 488
	[REGULATORY_AND_NVM_GROUP] =
		HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
489 490
};

J
Johannes Berg 已提交
491 492
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
493
#ifdef CONFIG_PM
494
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
495
#endif
J
Johannes Berg 已提交
496

497
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
498
{
499 500
	const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
	u64 dflt_pwr_limit;
501

502
	if (!backoff)
503 504
		return 0;

505
	dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
506

507 508 509 510 511
	while (backoff->pwr) {
		if (dflt_pwr_limit >= backoff->pwr)
			return backoff->backoff;

		backoff++;
512 513 514 515 516
	}

	return 0;
}

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
{
	struct iwl_mvm *mvm =
		container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
	struct ieee80211_vif *tx_blocked_vif;
	struct iwl_mvm_vif *mvmvif;

	mutex_lock(&mvm->mutex);

	tx_blocked_vif =
		rcu_dereference_protected(mvm->csa_tx_blocked_vif,
					  lockdep_is_held(&mvm->mutex));

	if (!tx_blocked_vif)
		goto unlock;

	mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
	iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
	RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
unlock:
	mutex_unlock(&mvm->mutex);
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
static int iwl_mvm_fwrt_dump_start(void *ctx)
{
	struct iwl_mvm *mvm = ctx;
	int ret;

	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
	if (ret)
		return ret;

	mutex_lock(&mvm->mutex);

	return 0;
}

static void iwl_mvm_fwrt_dump_end(void *ctx)
{
	struct iwl_mvm *mvm = ctx;

	mutex_unlock(&mvm->mutex);

	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
}

563 564 565 566 567
static bool iwl_mvm_fwrt_fw_running(void *ctx)
{
	return iwl_mvm_firmware_running(ctx);
}

568 569 570
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
	.dump_start = iwl_mvm_fwrt_dump_start,
	.dump_end = iwl_mvm_fwrt_dump_end,
571
	.fw_running = iwl_mvm_fwrt_fw_running,
572 573
};

J
Johannes Berg 已提交
574 575 576 577 578 579 580 581 582 583 584 585
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
		      const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
	struct ieee80211_hw *hw;
	struct iwl_op_mode *op_mode;
	struct iwl_mvm *mvm;
	struct iwl_trans_config trans_cfg = {};
	static const u8 no_reclaim_cmds[] = {
		TX_CMD,
	};
	int err, scan_size;
586
	u32 min_backoff;
587
	enum iwl_amsdu_size rb_size_default;
J
Johannes Berg 已提交
588

589 590 591 592 593 594 595
	/*
	 * We use IWL_MVM_STATION_COUNT to check the validity of the station
	 * index all over the driver - check that its value corresponds to the
	 * array size.
	 */
	BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);

J
Johannes Berg 已提交
596 597 598 599 600 601 602 603 604
	/********************************
	 * 1. Allocating and configuring HW data
	 ********************************/
	hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
				sizeof(struct iwl_mvm),
				&iwl_mvm_hw_ops);
	if (!hw)
		return NULL;

605 606 607
	if (cfg->max_rx_agg_size)
		hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;

608 609 610
	if (cfg->max_tx_agg_size)
		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;

J
Johannes Berg 已提交
611 612 613 614 615 616 617 618 619
	op_mode = hw->priv;

	mvm = IWL_OP_MODE_GET_MVM(op_mode);
	mvm->dev = trans->dev;
	mvm->trans = trans;
	mvm->cfg = cfg;
	mvm->fw = fw;
	mvm->hw = hw;

620 621
	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
			    dbgfs_dir);
622

623 624
	mvm->init_status = 0;

625 626
	if (iwl_mvm_has_new_rx_api(mvm)) {
		op_mode->ops = &iwl_mvm_ops_mq;
627 628 629 630 631
		trans->rx_mpdu_cmd_hdr_size =
			(trans->cfg->device_family >=
			 IWL_DEVICE_FAMILY_22560) ?
			sizeof(struct iwl_rx_mpdu_desc) :
			IWL_RX_DESC_SIZE_V1;
632 633
	} else {
		op_mode->ops = &iwl_mvm_ops;
634 635
		trans->rx_mpdu_cmd_hdr_size =
			sizeof(struct iwl_rx_mpdu_res_start);
636 637 638 639 640

		if (WARN_ON(trans->num_rx_queues > 1))
			goto out_free;
	}

641
	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
642

643
	mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
644
	mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
645 646 647
	mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
	mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;

648
	mvm->sf_state = SF_UNINIT;
649
	if (iwl_mvm_has_unified_ucode(mvm))
650
		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
651
	else
652
		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
653
	mvm->drop_bcn_ap_mode = true;
654

J
Johannes Berg 已提交
655
	mutex_init(&mvm->mutex);
656
	mutex_init(&mvm->d0i3_suspend_mutex);
J
Johannes Berg 已提交
657 658
	spin_lock_init(&mvm->async_handlers_lock);
	INIT_LIST_HEAD(&mvm->time_event_list);
659
	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
J
Johannes Berg 已提交
660 661
	INIT_LIST_HEAD(&mvm->async_handlers_list);
	spin_lock_init(&mvm->time_event_lock);
662
	spin_lock_init(&mvm->queue_info_lock);
J
Johannes Berg 已提交
663 664 665

	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
666
#ifdef CONFIG_PM
667
	INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
668
#endif
669
	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
670
	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
671
	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
J
Johannes Berg 已提交
672

673
	spin_lock_init(&mvm->d0i3_tx_lock);
674
	spin_lock_init(&mvm->refs_lock);
675 676
	skb_queue_head_init(&mvm->d0i3_tx);
	init_waitqueue_head(&mvm->d0i3_exit_waitq);
677
	init_waitqueue_head(&mvm->rx_sync_waitq);
678

679 680
	atomic_set(&mvm->queue_sync_counter, 0);

J
Johannes Berg 已提交
681 682
	SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);

683 684 685 686 687 688
	spin_lock_init(&mvm->tcm.lock);
	INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
	mvm->tcm.ts = jiffies;
	mvm->tcm.ll_ts = jiffies;
	mvm->tcm.uapsd_nonagg_ts = jiffies;

689 690
	INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);

J
Johannes Berg 已提交
691 692 693 694 695 696 697
	/*
	 * Populate the state variables that the transport layer needs
	 * to know about.
	 */
	trans_cfg.op_mode = op_mode;
	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
698 699 700 701 702 703

	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
		rb_size_default = IWL_AMSDU_2K;
	else
		rb_size_default = IWL_AMSDU_4K;

704
	switch (iwlwifi_mod_params.amsdu_size) {
705
	case IWL_AMSDU_DEF:
706 707
		trans_cfg.rx_buf_size = rb_size_default;
		break;
708 709 710 711 712 713 714 715 716 717 718 719
	case IWL_AMSDU_4K:
		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
		break;
	case IWL_AMSDU_8K:
		trans_cfg.rx_buf_size = IWL_AMSDU_8K;
		break;
	case IWL_AMSDU_12K:
		trans_cfg.rx_buf_size = IWL_AMSDU_12K;
		break;
	default:
		pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
		       iwlwifi_mod_params.amsdu_size);
720
		trans_cfg.rx_buf_size = rb_size_default;
721
	}
722

723
	trans->wide_cmd_header = true;
724 725
	trans_cfg.bc_table_dword =
		mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
J
Johannes Berg 已提交
726

727 728
	trans_cfg.command_groups = iwl_mvm_groups;
	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
J
Johannes Berg 已提交
729

730
	trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
731
	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
732
	trans_cfg.scd_set_active = true;
J
Johannes Berg 已提交
733

734 735 736
	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
					  driver_data[2]);

737
	trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
738

739 740
	/* Set a short watchdog for the command queue */
	trans_cfg.cmd_q_wdg_timeout =
741
		iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
742

J
Johannes Berg 已提交
743 744 745 746 747 748 749 750
	snprintf(mvm->hw->wiphy->fw_version,
		 sizeof(mvm->hw->wiphy->fw_version),
		 "%s", fw->fw_version);

	/* Configure transport layer */
	iwl_trans_configure(mvm->trans, &trans_cfg);

	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
751 752 753
	trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
	trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
	memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
754
	       sizeof(trans->dbg_conf_tlv));
755 756
	trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
	trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
J
Johannes Berg 已提交
757

758 759 760
	trans->iml = mvm->fw->iml;
	trans->iml_len = mvm->fw->iml_len;

J
Johannes Berg 已提交
761 762 763 764 765 766 767 768 769 770 771 772 773
	/* set up notification wait support */
	iwl_notification_wait_init(&mvm->notif_wait);

	/* Init phy db */
	mvm->phy_db = iwl_phy_db_init(trans);
	if (!mvm->phy_db) {
		IWL_ERR(mvm, "Cannot init phy_db\n");
		goto out_free;
	}

	IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
		 mvm->cfg->name, mvm->trans->hw_rev);

774
	if (iwlwifi_mod_params.nvm_file)
775
		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
776 777 778
	else
		IWL_DEBUG_EEPROM(mvm->trans->dev,
				 "working without external nvm file\n");
779

780 781
	err = iwl_trans_start_hw(mvm->trans);
	if (err)
782 783
		goto out_free;

784 785
	mutex_lock(&mvm->mutex);
	iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
786
	err = iwl_run_init_mvm_ucode(mvm, true);
787
	if (!iwlmvm_mod_params.init_dbg || !err)
788 789 790
		iwl_mvm_stop_device(mvm);
	iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
	mutex_unlock(&mvm->mutex);
791
	if (err < 0) {
792 793
		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
		goto out_free;
J
Johannes Berg 已提交
794 795
	}

796
	scan_size = iwl_mvm_scan_size(mvm);
797

J
Johannes Berg 已提交
798 799 800 801
	mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
	if (!mvm->scan_cmd)
		goto out_free;

802 803 804
	/* Set EBS as successful as long as not stated otherwise by the FW. */
	mvm->last_ebs_successful = true;

J
Johannes Berg 已提交
805 806 807
	err = iwl_mvm_mac_setup_register(mvm);
	if (err)
		goto out_free;
808
	mvm->hw_registered = true;
J
Johannes Berg 已提交
809

810
	min_backoff = iwl_mvm_min_backoff(mvm);
811 812
	iwl_mvm_thermal_initialize(mvm, min_backoff);

J
Johannes Berg 已提交
813 814 815 816
	err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
	if (err)
		goto out_unregister;

817 818 819 820 821
	if (!iwl_mvm_has_new_rx_stats_api(mvm))
		memset(&mvm->rx_stats_v3, 0,
		       sizeof(struct mvm_statistics_rx_v3));
	else
		memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
822

823 824 825 826
	/* The transport always starts with a taken reference, we can
	 * release it now if d0i3 is supported */
	if (iwl_mvm_is_d0i3_supported(mvm))
		iwl_trans_unref(mvm->trans);
827

828 829
	iwl_mvm_tof_init(mvm);

J
Johannes Berg 已提交
830 831 832
	return op_mode;

 out_unregister:
833 834 835
	if (iwlmvm_mod_params.init_dbg)
		return op_mode;

J
Johannes Berg 已提交
836
	ieee80211_unregister_hw(mvm->hw);
837
	mvm->hw_registered = false;
838
	iwl_mvm_leds_exit(mvm);
839
	iwl_mvm_thermal_exit(mvm);
J
Johannes Berg 已提交
840
 out_free:
841
	iwl_fw_flush_dump(&mvm->fwrt);
842 843 844

	if (iwlmvm_mod_params.init_dbg)
		return op_mode;
J
Johannes Berg 已提交
845 846
	iwl_phy_db_free(mvm->phy_db);
	kfree(mvm->scan_cmd);
847 848
	iwl_trans_op_mode_leave(trans);

J
Johannes Berg 已提交
849 850 851 852 853 854 855 856 857
	ieee80211_free_hw(mvm->hw);
	return NULL;
}

static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	int i;

858 859 860 861 862 863 864
	/* If d0i3 is supported, we have released the reference that
	 * the transport started with, so we should take it back now
	 * that we are leaving.
	 */
	if (iwl_mvm_is_d0i3_supported(mvm))
		iwl_trans_ref(mvm->trans);

J
Johannes Berg 已提交
865 866
	iwl_mvm_leds_exit(mvm);

867
	iwl_mvm_thermal_exit(mvm);
868

869 870 871 872
	if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
		ieee80211_unregister_hw(mvm->hw);
		mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
	}
J
Johannes Berg 已提交
873 874

	kfree(mvm->scan_cmd);
875 876
	kfree(mvm->mcast_filter_cmd);
	mvm->mcast_filter_cmd = NULL;
J
Johannes Berg 已提交
877

878 879 880
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
	kfree(mvm->d3_resume_sram);
#endif
881
	iwl_trans_op_mode_leave(mvm->trans);
J
Johannes Berg 已提交
882 883 884 885

	iwl_phy_db_free(mvm->phy_db);
	mvm->phy_db = NULL;

886
	kfree(mvm->nvm_data);
887
	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
J
Johannes Berg 已提交
888 889
		kfree(mvm->nvm_sections[i].data);

890 891
	cancel_delayed_work_sync(&mvm->tcm.work);

892 893
	iwl_mvm_tof_clean(mvm);

894 895 896
	mutex_destroy(&mvm->mutex);
	mutex_destroy(&mvm->d0i3_suspend_mutex);

J
Johannes Berg 已提交
897 898 899 900 901 902
	ieee80211_free_hw(mvm->hw);
}

struct iwl_async_handler_entry {
	struct list_head list;
	struct iwl_rx_cmd_buffer rxb;
903
	enum iwl_rx_handler_context context;
904
	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
J
Johannes Berg 已提交
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
};

void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
{
	struct iwl_async_handler_entry *entry, *tmp;

	spin_lock_bh(&mvm->async_handlers_lock);
	list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
		iwl_free_rxb(&entry->rxb);
		list_del(&entry->list);
		kfree(entry);
	}
	spin_unlock_bh(&mvm->async_handlers_lock);
}

static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm =
		container_of(wk, struct iwl_mvm, async_handlers_wk);
	struct iwl_async_handler_entry *entry, *tmp;
925
	LIST_HEAD(local_list);
J
Johannes Berg 已提交
926 927 928 929 930 931 932 933 934 935 936 937

	/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */

	/*
	 * Sync with Rx path with a lock. Remove all the entries from this list,
	 * add them to a local one (lock free), and then handle them.
	 */
	spin_lock_bh(&mvm->async_handlers_lock);
	list_splice_init(&mvm->async_handlers_list, &local_list);
	spin_unlock_bh(&mvm->async_handlers_lock);

	list_for_each_entry_safe(entry, tmp, &local_list, list) {
938 939
		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
			mutex_lock(&mvm->mutex);
940
		entry->fn(mvm, &entry->rxb);
J
Johannes Berg 已提交
941 942
		iwl_free_rxb(&entry->rxb);
		list_del(&entry->list);
943 944
		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
			mutex_unlock(&mvm->mutex);
J
Johannes Berg 已提交
945 946 947 948
		kfree(entry);
	}
}

949 950 951 952 953 954 955
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
					    struct iwl_rx_packet *pkt)
{
	struct iwl_fw_dbg_trigger_tlv *trig;
	struct iwl_fw_dbg_trigger_cmd *cmds_trig;
	int i;

956 957 958
	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
				     FW_DBG_TRIGGER_FW_NOTIF);
	if (!trig)
959 960 961 962 963 964 965 966 967
		return;

	cmds_trig = (void *)trig->data;

	for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
		/* don't collect on CMD 0 */
		if (!cmds_trig->cmds[i].cmd_id)
			break;

968 969
		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
970 971
			continue;

972 973 974
		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
					"CMD 0x%02x.%02x received",
					pkt->hdr.group_id, pkt->hdr.cmd);
975 976 977 978
		break;
	}
}

979 980 981
static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
			      struct iwl_rx_cmd_buffer *rxb,
			      struct iwl_rx_packet *pkt)
J
Johannes Berg 已提交
982
{
983
	int i;
984

985 986
	iwl_mvm_rx_check_trigger(mvm, pkt);

J
Johannes Berg 已提交
987 988 989 990 991 992 993 994 995
	/*
	 * Do the notification wait before RX handlers so
	 * even if the RX handler consumes the RXB we have
	 * access to it in the notification wait entry.
	 */
	iwl_notification_wait_notify(&mvm->notif_wait, pkt);

	for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
996 997
		struct iwl_async_handler_entry *entry;

998
		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
999 1000
			continue;

1001
		if (rx_h->context == RX_HANDLER_SYNC) {
1002
			rx_h->fn(mvm, rxb);
1003
			return;
1004
		}
1005 1006 1007 1008

		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
		/* we can't do much... */
		if (!entry)
1009
			return;
1010 1011 1012 1013 1014

		entry->rxb._page = rxb_steal_page(rxb);
		entry->rxb._offset = rxb->_offset;
		entry->rxb._rx_page_order = rxb->_rx_page_order;
		entry->fn = rx_h->fn;
1015
		entry->context = rx_h->context;
1016 1017 1018 1019
		spin_lock(&mvm->async_handlers_lock);
		list_add_tail(&entry->list, &mvm->async_handlers_list);
		spin_unlock(&mvm->async_handlers_lock);
		schedule_work(&mvm->async_handlers_wk);
1020
		break;
J
Johannes Berg 已提交
1021 1022 1023
	}
}

1024 1025 1026 1027 1028 1029
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
		       struct napi_struct *napi,
		       struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1030
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1031

1032
	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1033
		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1034
	else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
	else
		iwl_mvm_rx_common(mvm, rxb, pkt);
}

static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
			  struct napi_struct *napi,
			  struct iwl_rx_cmd_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1046
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1047

1048
	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1049
		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1050 1051
	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
					 RX_QUEUES_NOTIFICATION)))
1052
		iwl_mvm_rx_queue_notif(mvm, rxb, 0);
1053
	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1054
		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1055 1056 1057 1058
	else
		iwl_mvm_rx_common(mvm, rxb, pkt);
}

1059
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
J
Johannes Berg 已提交
1060
{
1061
	int q;
J
Johannes Berg 已提交
1062

1063
	if (WARN_ON_ONCE(!mq))
J
Johannes Berg 已提交
1064 1065
		return;

1066 1067 1068
	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
		if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
			IWL_DEBUG_TX_QUEUES(mvm,
1069
					    "mac80211 %d already stopped\n", q);
1070 1071 1072 1073 1074
			continue;
		}

		ieee80211_stop_queue(mvm->hw, q);
	}
J
Johannes Berg 已提交
1075 1076
}

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
			     const struct iwl_device_cmd *cmd)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

	/*
	 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
	 * commands that need to block the Tx queues.
	 */
	iwl_trans_block_txq_ptrs(mvm->trans, false);
}

1089
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
J
Johannes Berg 已提交
1090 1091
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1092
	unsigned long mq;
J
Johannes Berg 已提交
1093

1094
	spin_lock_bh(&mvm->queue_info_lock);
1095
	mq = mvm->hw_queue_to_mac80211[hw_queue];
1096
	spin_unlock_bh(&mvm->queue_info_lock);
J
Johannes Berg 已提交
1097

1098 1099 1100 1101 1102 1103 1104
	iwl_mvm_stop_mac_queues(mvm, mq);
}

void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
{
	int q;

1105
	if (WARN_ON_ONCE(!mq))
J
Johannes Berg 已提交
1106 1107
		return;

1108 1109 1110
	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
		if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
			IWL_DEBUG_TX_QUEUES(mvm,
1111
					    "mac80211 %d still stopped\n", q);
1112 1113 1114 1115 1116
			continue;
		}

		ieee80211_wake_queue(mvm->hw, q);
	}
J
Johannes Berg 已提交
1117 1118
}

1119 1120 1121 1122 1123 1124
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	unsigned long mq;

	spin_lock_bh(&mvm->queue_info_lock);
1125
	mq = mvm->hw_queue_to_mac80211[hw_queue];
1126 1127 1128 1129 1130
	spin_unlock_bh(&mvm->queue_info_lock);

	iwl_mvm_start_mac_queues(mvm, mq);
}

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
	bool state = iwl_mvm_is_radio_killed(mvm);

	if (state)
		wake_up(&mvm->rx_sync_waitq);

	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
}

1141 1142 1143 1144 1145 1146 1147
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
{
	if (state)
		set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
	else
		clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);

1148
	iwl_mvm_set_rfkill_state(mvm);
1149 1150
}

1151
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
J
Johannes Berg 已提交
1152 1153
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1154
	bool calibrating = READ_ONCE(mvm->calibrating);
J
Johannes Berg 已提交
1155 1156 1157 1158 1159 1160

	if (state)
		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
	else
		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);

1161
	iwl_mvm_set_rfkill_state(mvm);
1162

1163 1164 1165 1166 1167 1168 1169 1170
	/* iwl_run_init_mvm_ucode is waiting for results, abort it */
	if (calibrating)
		iwl_abort_notification_waits(&mvm->notif_wait);

	/*
	 * Stop the device if we run OPERATIONAL firmware or if we are in the
	 * middle of the calibrations.
	 */
1171
	return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
J
Johannes Berg 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
}

static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
	struct ieee80211_tx_info *info;

	info = IEEE80211_SKB_CB(skb);
	iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
	ieee80211_free_txskb(mvm->hw, skb);
}

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
struct iwl_mvm_reprobe {
	struct device *dev;
	struct work_struct work;
};

static void iwl_mvm_reprobe_wk(struct work_struct *wk)
{
	struct iwl_mvm_reprobe *reprobe;

	reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
	if (device_reprobe(reprobe->dev))
		dev_err(reprobe->dev, "reprobe failed!\n");
	kfree(reprobe);
	module_put(THIS_MODULE);
}

1200
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
J
Johannes Berg 已提交
1201 1202 1203
{
	iwl_abort_notification_waits(&mvm->notif_wait);

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	/*
	 * This is a bit racy, but worst case we tell mac80211 about
	 * a stopped/aborted scan when that was already done which
	 * is not a problem. It is necessary to abort any os scan
	 * here because mac80211 requires having the scan cleared
	 * before restarting.
	 * We'll reset the scan_status to NONE in restart cleanup in
	 * the next start() call from mac80211. If restart isn't called
	 * (no fw restart) scan status will stay busy.
	 */
1214
	iwl_mvm_report_scan_aborted(mvm);
1215

J
Johannes Berg 已提交
1216 1217 1218 1219 1220 1221
	/*
	 * If we're restarting already, don't cycle restarts.
	 * If INIT fw asserted, it will likely fail again.
	 * If WoWLAN fw asserted, don't restart either, mac80211
	 * can't recover this since we're already half suspended.
	 */
1222
	if (!mvm->fw_restart && fw_error) {
1223
		iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
1224 1225
					NULL);
	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
		struct iwl_mvm_reprobe *reprobe;

		IWL_ERR(mvm,
			"Firmware error during reconfiguration - reprobe!\n");

		/*
		 * get a module reference to avoid doing this while unloading
		 * anyway and to avoid scheduling a work with code that's
		 * being removed.
		 */
		if (!try_module_get(THIS_MODULE)) {
			IWL_ERR(mvm, "Module is being unloaded - abort\n");
			return;
		}

		reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
		if (!reprobe) {
			module_put(THIS_MODULE);
			return;
		}
		reprobe->dev = mvm->trans->dev;
		INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
		schedule_work(&reprobe->work);
1249
	} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
1250 1251
		   mvm->hw_registered &&
		   !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1252 1253 1254
		/* don't let the transport/FW power down */
		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);

1255 1256
		if (fw_error && mvm->fw_restart > 0)
			mvm->fw_restart--;
1257
		set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
J
Johannes Berg 已提交
1258 1259 1260 1261
		ieee80211_restart_hw(mvm->hw);
	}
}

1262 1263 1264 1265
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

1266 1267
	if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
		iwl_mvm_dump_nic_error_log(mvm);
1268

1269
	iwl_mvm_nic_restart(mvm, true);
1270 1271
}

J
Johannes Berg 已提交
1272 1273
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
{
1274 1275
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

J
Johannes Berg 已提交
1276
	WARN_ON(1);
1277
	iwl_mvm_nic_restart(mvm, true);
J
Johannes Berg 已提交
1278 1279
}

1280
#ifdef CONFIG_PM
1281 1282
struct iwl_d0i3_iter_data {
	struct iwl_mvm *mvm;
1283
	struct ieee80211_vif *connected_vif;
1284 1285
	u8 ap_sta_id;
	u8 vif_count;
1286 1287
	u8 offloading_tid;
	bool disable_offloading;
1288 1289
};

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
					struct ieee80211_vif *vif,
					struct iwl_d0i3_iter_data *iter_data)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	struct iwl_mvm_sta *mvmsta;
	u32 available_tids = 0;
	u8 tid;

	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
1300
		    mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
1301 1302
		return false;

1303 1304
	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
	if (!mvmsta)
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
		return false;

	spin_lock_bh(&mvmsta->lock);
	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];

		/*
		 * in case of pending tx packets, don't use this tid
		 * for offloading in order to prevent reuse of the same
		 * qos seq counters.
		 */
1316
		if (iwl_mvm_tid_queued(mvm, tid_data))
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
			continue;

		if (tid_data->state != IWL_AGG_OFF)
			continue;

		available_tids |= BIT(tid);
	}
	spin_unlock_bh(&mvmsta->lock);

	/*
	 * disallow protocol offloading if we have no available tid
	 * (with no pending frames and no active aggregation,
	 * as we don't handle "holes" properly - the scheduler needs the
	 * frame's seq number and TFD index to match)
	 */
	if (!available_tids)
		return true;

	/* for simplicity, just use the first available tid */
	iter_data->offloading_tid = ffs(available_tids) - 1;
	return false;
}

1340 1341 1342
static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
					struct ieee80211_vif *vif)
{
1343 1344 1345
	struct iwl_d0i3_iter_data *data = _data;
	struct iwl_mvm *mvm = data->mvm;
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1346 1347 1348 1349 1350 1351 1352
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;

	IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
	if (vif->type != NL80211_IFTYPE_STATION ||
	    !vif->bss_conf.assoc)
		return;

1353 1354 1355 1356 1357 1358 1359 1360
	/*
	 * in case of pending tx packets or active aggregations,
	 * avoid offloading features in order to prevent reuse of
	 * the same qos seq counters.
	 */
	if (iwl_mvm_disallow_offloading(mvm, vif, data))
		data->disable_offloading = true;

1361
	iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
1362 1363
	iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
				   false, flags);
1364 1365 1366 1367 1368 1369 1370

	/*
	 * on init/association, mvm already configures POWER_TABLE_CMD
	 * and REPLY_MCAST_FILTER_CMD, so currently don't
	 * reconfigure them (we might want to use different
	 * params later on, though).
	 */
1371 1372
	data->ap_sta_id = mvmvif->ap_sta_id;
	data->vif_count++;
1373 1374 1375 1376 1377 1378

	/*
	 * no new commands can be sent at this stage, so it's safe
	 * to save the vif pointer during d0i3 entrance.
	 */
	data->connected_vif = vif;
1379 1380
}

1381
static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
1382
				    struct iwl_wowlan_config_cmd *cmd,
1383 1384 1385 1386 1387
				    struct iwl_d0i3_iter_data *iter_data)
{
	struct ieee80211_sta *ap_sta;
	struct iwl_mvm_sta *mvm_ap_sta;

1388
	if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
1389 1390 1391 1392 1393 1394 1395 1396 1397
		return;

	rcu_read_lock();

	ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
	if (IS_ERR_OR_NULL(ap_sta))
		goto out;

	mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1398
	cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
1399
	cmd->offloading_tid = iter_data->offloading_tid;
S
Sara Sharon 已提交
1400
	cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
1401
		ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
1402 1403 1404 1405
	/*
	 * The d0i3 uCode takes care of the nonqos counters,
	 * so configure only the qos seq ones.
	 */
1406
	iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
1407 1408 1409
out:
	rcu_read_unlock();
}
1410 1411

int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
E
Eliad Peller 已提交
1412 1413
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1414
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1415
	int ret;
1416 1417 1418
	struct iwl_d0i3_iter_data d0i3_iter_data = {
		.mvm = mvm,
	};
1419 1420 1421
	struct iwl_wowlan_config_cmd wowlan_config_cmd = {
		.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
					     IWL_WOWLAN_WAKEUP_BEACON_MISS |
1422
					     IWL_WOWLAN_WAKEUP_LINK_CHANGE),
1423
	};
1424 1425
	struct iwl_d3_manager_config d3_cfg_cmd = {
		.min_sleep_time = cpu_to_le32(1000),
1426
		.wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
1427
	};
E
Eliad Peller 已提交
1428 1429

	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
1430

1431
	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
1432 1433
		return -EINVAL;

1434 1435
	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);

1436 1437 1438 1439 1440 1441 1442 1443 1444
	/*
	 * iwl_mvm_ref_sync takes a reference before checking the flag.
	 * so by checking there is no held reference we prevent a state
	 * in which iwl_mvm_ref_sync continues successfully while we
	 * configure the firmware to enter d0i3
	 */
	if (iwl_mvm_ref_taken(mvm)) {
		IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
		clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1445
		wake_up(&mvm->d0i3_exit_waitq);
1446 1447 1448
		return 1;
	}

1449 1450 1451
	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
						   IEEE80211_IFACE_ITER_NORMAL,
						   iwl_mvm_enter_d0i3_iterator,
1452 1453 1454
						   &d0i3_iter_data);
	if (d0i3_iter_data.vif_count == 1) {
		mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
1455
		mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
1456 1457
	} else {
		WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1458
		mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1459
		mvm->d0i3_offloading = false;
1460
	}
1461

1462
	iwl_mvm_pause_tcm(mvm, true);
1463 1464 1465
	/* make sure we have no running tx while configuring the seqno */
	synchronize_net();

1466
	/* Flush the hw queues, in case something got queued during entry */
1467 1468 1469 1470 1471 1472 1473 1474 1475
	/* TODO new tx api */
	if (iwl_mvm_has_new_tx_api(mvm)) {
		WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
	} else {
		ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
					    flags);
		if (ret)
			return ret;
	}
1476

1477
	/* configure wowlan configuration only if needed */
1478
	if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
1479 1480 1481 1482 1483 1484
		/* wake on beacons only if beacon storing isn't supported */
		if (!fw_has_capa(&mvm->fw->ucode_capa,
				 IWL_UCODE_TLV_CAPA_BEACON_STORING))
			wowlan_config_cmd.wakeup_filter |=
				cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);

1485 1486 1487 1488
		iwl_mvm_wowlan_config_key_params(mvm,
						 d0i3_iter_data.connected_vif,
						 true, flags);

1489 1490 1491 1492 1493 1494 1495 1496 1497
		iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
					&d0i3_iter_data);

		ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
					   sizeof(wowlan_config_cmd),
					   &wowlan_config_cmd);
		if (ret)
			return ret;
	}
1498

1499 1500 1501
	return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
				    flags | CMD_MAKE_TRANS_IDLE,
				    sizeof(d3_cfg_cmd), &d3_cfg_cmd);
E
Eliad Peller 已提交
1502 1503
}

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
				       struct ieee80211_vif *vif)
{
	struct iwl_mvm *mvm = _data;
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;

	IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
	if (vif->type != NL80211_IFTYPE_STATION ||
	    !vif->bss_conf.assoc)
		return;

	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
}

1518
struct iwl_mvm_d0i3_exit_work_iter_data {
1519
	struct iwl_mvm *mvm;
1520
	struct iwl_wowlan_status *status;
1521 1522 1523
	u32 wakeup_reasons;
};

1524 1525
static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
					struct ieee80211_vif *vif)
1526
{
1527
	struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
1528
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1529
	u32 reasons = data->wakeup_reasons;
1530

1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
	/* consider only the relevant station interface */
	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
	    data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
		return;

	if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
		iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
	else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
		ieee80211_beacon_loss(vif);
	else
		iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
1542 1543
}

1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
{
	struct ieee80211_sta *sta = NULL;
	struct iwl_mvm_sta *mvm_ap_sta;
	int i;
	bool wake_queues = false;

	lockdep_assert_held(&mvm->mutex);

	spin_lock_bh(&mvm->d0i3_tx_lock);

1555
	if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
		goto out;

	IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");

	/* get the sta in order to update seq numbers and re-enqueue skbs */
	sta = rcu_dereference_protected(
			mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
			lockdep_is_held(&mvm->mutex));

	if (IS_ERR_OR_NULL(sta)) {
		sta = NULL;
		goto out;
	}

	if (mvm->d0i3_offloading && qos_seq) {
		/* update qos seq numbers if offloading was enabled */
1572
		mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
		for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
			u16 seq = le16_to_cpu(qos_seq[i]);
			/* firmware stores last-used one, we store next one */
			seq += 0x10;
			mvm_ap_sta->tid_data[i].seq_number = seq;
		}
	}
out:
	/* re-enqueue (or drop) all packets */
	while (!skb_queue_empty(&mvm->d0i3_tx)) {
		struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);

		if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
			ieee80211_free_txskb(mvm->hw, skb);

		/* if the skb_queue is not empty, we need to wake queues */
		wake_queues = true;
	}
	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
	wake_up(&mvm->d0i3_exit_waitq);
1593
	mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1594 1595 1596 1597 1598 1599
	if (wake_queues)
		ieee80211_wake_queues(mvm->hw);

	spin_unlock_bh(&mvm->d0i3_tx_lock);
}

1600 1601 1602
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1603 1604 1605 1606
	struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
		.mvm = mvm,
	};

1607
	struct iwl_wowlan_status *status;
1608
	u32 wakeup_reasons = 0;
1609
	__le16 *qos_seq = NULL;
1610 1611

	mutex_lock(&mvm->mutex);
1612 1613 1614 1615 1616

	status = iwl_mvm_send_wowlan_get_status(mvm);
	if (IS_ERR_OR_NULL(status)) {
		/* set to NULL so we don't need to check before kfree'ing */
		status = NULL;
1617
		goto out;
1618
	}
1619 1620

	wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
1621
	qos_seq = status->qos_seq_ctr;
1622 1623 1624

	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);

1625 1626 1627 1628 1629 1630
	iter_data.wakeup_reasons = wakeup_reasons;
	iter_data.status = status;
	ieee80211_iterate_active_interfaces(mvm->hw,
					    IEEE80211_IFACE_ITER_NORMAL,
					    iwl_mvm_d0i3_exit_work_iter,
					    &iter_data);
1631
out:
1632
	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1633

1634 1635 1636
	IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
		       wakeup_reasons);

1637
	/* qos_seq might point inside resp_pkt, so free it only now */
1638
	kfree(status);
1639

1640 1641 1642
	/* the FW might have updated the regdomain */
	iwl_mvm_update_changed_regdom(mvm);

1643
	iwl_mvm_resume_tcm(mvm);
1644
	iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1645 1646 1647
	mutex_unlock(&mvm->mutex);
}

1648
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
E
Eliad Peller 已提交
1649
{
1650 1651
	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
		    CMD_WAKE_UP_TRANS;
1652
	int ret;
E
Eliad Peller 已提交
1653 1654

	IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1655

1656
	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
1657 1658
		return -EINVAL;

1659 1660 1661 1662 1663 1664 1665 1666 1667
	mutex_lock(&mvm->d0i3_suspend_mutex);
	if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
		IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
		__set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
		mutex_unlock(&mvm->d0i3_suspend_mutex);
		return 0;
	}
	mutex_unlock(&mvm->d0i3_suspend_mutex);

1668 1669
	ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
	if (ret)
1670
		goto out;
1671 1672 1673 1674 1675

	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
						   IEEE80211_IFACE_ITER_NORMAL,
						   iwl_mvm_exit_d0i3_iterator,
						   mvm);
1676 1677 1678
out:
	schedule_work(&mvm->d0i3_exit_work);
	return ret;
E
Eliad Peller 已提交
1679 1680
}

1681
int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1682 1683 1684 1685 1686 1687 1688
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);

	iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
	return _iwl_mvm_exit_d0i3(mvm);
}

1689 1690 1691 1692 1693 1694 1695
#define IWL_MVM_D0I3_OPS					\
	.enter_d0i3 = iwl_mvm_enter_d0i3,			\
	.exit_d0i3 = iwl_mvm_exit_d0i3,
#else /* CONFIG_PM */
#define IWL_MVM_D0I3_OPS
#endif /* CONFIG_PM */

1696 1697
#define IWL_MVM_COMMON_OPS					\
	/* these could be differentiated */			\
1698
	.async_cb = iwl_mvm_async_cb,				\
1699 1700 1701 1702 1703 1704 1705
	.queue_full = iwl_mvm_stop_sw_queue,			\
	.queue_not_full = iwl_mvm_wake_sw_queue,		\
	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
	.free_skb = iwl_mvm_free_skb,				\
	.nic_error = iwl_mvm_nic_error,				\
	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
	.nic_config = iwl_mvm_nic_config,			\
1706
	IWL_MVM_D0I3_OPS					\
1707 1708 1709 1710
	/* as we only register one, these MUST be common! */	\
	.start = iwl_op_mode_mvm_start,				\
	.stop = iwl_op_mode_mvm_stop

J
Johannes Berg 已提交
1711
static const struct iwl_op_mode_ops iwl_mvm_ops = {
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	IWL_MVM_COMMON_OPS,
	.rx = iwl_mvm_rx,
};

static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
			      struct napi_struct *napi,
			      struct iwl_rx_cmd_buffer *rxb,
			      unsigned int queue)
{
	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1722
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1723
	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1724

1725
	if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
1726
		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
1727 1728
	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
					 RX_QUEUES_NOTIFICATION)))
1729
		iwl_mvm_rx_queue_notif(mvm, rxb, queue);
1730
	else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1731
		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1732 1733 1734 1735 1736 1737
}

static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
	IWL_MVM_COMMON_OPS,
	.rx = iwl_mvm_rx_mq,
	.rx_rss = iwl_mvm_rx_mq_rss,
J
Johannes Berg 已提交
1738
};