sta.c 72.5 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8 9
 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 Intel Deutschland GmbH
J
Johannes Berg 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
27
 * in the file called COPYING.
J
Johannes Berg 已提交
28 29
 *
 * Contact Information:
30
 *  Intel Linux Wireless <linuxwifi@intel.com>
J
Johannes Berg 已提交
31 32 33 34
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
35 36
 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37
 * Copyright(c) 2016 Intel Deutschland GmbH
J
Johannes Berg 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
#include <net/mac80211.h>

#include "mvm.h"
#include "sta.h"
71
#include "rs.h"
J
Johannes Berg 已提交
72

73 74 75 76 77 78 79 80 81 82 83 84
/*
 * New version of ADD_STA_sta command added new fields at the end of the
 * structure, so sending the size of the relevant API's structure is enough to
 * support both API versions.
 */
static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
{
	return iwl_mvm_has_new_rx_api(mvm) ?
		sizeof(struct iwl_mvm_add_sta_cmd) :
		sizeof(struct iwl_mvm_add_sta_cmd_v7);
}

85 86
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
				    enum nl80211_iftype iftype)
J
Johannes Berg 已提交
87 88
{
	int sta_id;
89
	u32 reserved_ids = 0;
J
Johannes Berg 已提交
90

91
	BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
J
Johannes Berg 已提交
92 93 94 95
	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));

	lockdep_assert_held(&mvm->mutex);

96 97 98 99
	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
	if (iftype != NL80211_IFTYPE_STATION)
		reserved_ids = BIT(0);

J
Johannes Berg 已提交
100
	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101 102 103 104
	for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
		if (BIT(sta_id) & reserved_ids)
			continue;

J
Johannes Berg 已提交
105 106 107
		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
					       lockdep_is_held(&mvm->mutex)))
			return sta_id;
108
	}
J
Johannes Berg 已提交
109 110 111
	return IWL_MVM_STATION_COUNT;
}

112 113
/* send station add/update command to firmware */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
114
			   bool update, unsigned int flags)
J
Johannes Berg 已提交
115
{
116
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
117 118 119 120 121 122
	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
		.sta_id = mvm_sta->sta_id,
		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
		.add_modify = update ? 1 : 0,
		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
						 STA_FLG_MIMO_EN_MSK),
123
		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
124
	};
J
Johannes Berg 已提交
125 126 127 128
	int ret;
	u32 status;
	u32 agg_size = 0, mpdu_dens = 0;

129
	if (!update || (flags & STA_MODIFY_QUEUES)) {
130 131
		add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
132 133 134

		if (flags & STA_MODIFY_QUEUES)
			add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
135
	}
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

	switch (sta->bandwidth) {
	case IEEE80211_STA_RX_BW_160:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
		/* fall through */
	case IEEE80211_STA_RX_BW_80:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
		/* fall through */
	case IEEE80211_STA_RX_BW_40:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
		/* fall through */
	case IEEE80211_STA_RX_BW_20:
		if (sta->ht_cap.ht_supported)
			add_sta_cmd.station_flags |=
				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
		break;
	}

	switch (sta->rx_nss) {
	case 1:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
		break;
	case 2:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
		break;
	case 3 ... 8:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
		break;
	}

	switch (sta->smps_mode) {
	case IEEE80211_SMPS_AUTOMATIC:
	case IEEE80211_SMPS_NUM_MODES:
		WARN_ON(1);
		break;
	case IEEE80211_SMPS_STATIC:
		/* override NSS */
		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
		break;
	case IEEE80211_SMPS_DYNAMIC:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
		break;
	case IEEE80211_SMPS_OFF:
		/* nothing */
		break;
	}
J
Johannes Berg 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206

	if (sta->ht_cap.ht_supported) {
		add_sta_cmd.station_flags_msk |=
			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
				    STA_FLG_AGG_MPDU_DENS_MSK);

		mpdu_dens = sta->ht_cap.ampdu_density;
	}

	if (sta->vht_cap.vht_supported) {
		agg_size = sta->vht_cap.cap &
			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
		agg_size >>=
			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
	} else if (sta->ht_cap.ht_supported) {
		agg_size = sta->ht_cap.ampdu_factor;
	}

	add_sta_cmd.station_flags |=
		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
	add_sta_cmd.station_flags |=
		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);

	status = ADD_STA_SUCCESS;
207 208
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
209
					  &add_sta_cmd, &status);
J
Johannes Berg 已提交
210 211 212
	if (ret)
		return ret;

213
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
214 215 216 217 218 219 220 221 222 223 224 225
	case ADD_STA_SUCCESS:
		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "ADD_STA failed\n");
		break;
	}

	return ret;
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
static void iwl_mvm_rx_agg_session_expired(unsigned long data)
{
	struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
	struct iwl_mvm_baid_data *ba_data;
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvm_sta;
	unsigned long timeout;

	rcu_read_lock();

	ba_data = rcu_dereference(*rcu_ptr);

	if (WARN_ON(!ba_data))
		goto unlock;

	if (!ba_data->timeout)
		goto unlock;

	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
	if (time_is_after_jiffies(timeout)) {
		mod_timer(&ba_data->session_timer, timeout);
		goto unlock;
	}

	/* Timer expired */
	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
	ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
					  sta->addr, ba_data->tid);
unlock:
	rcu_read_unlock();
}

259 260 261 262 263
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
				 struct ieee80211_sta *sta)
{
	unsigned long used_hw_queues;
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
264 265
	unsigned int wdg_timeout =
		iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	u32 ac;

	lockdep_assert_held(&mvm->mutex);

	used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);

	/* Find available queues, and allocate them to the ACs */
	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
		u8 queue = find_first_zero_bit(&used_hw_queues,
					       mvm->first_agg_queue);

		if (queue >= mvm->first_agg_queue) {
			IWL_ERR(mvm, "Failed to allocate STA queue\n");
			return -EBUSY;
		}

		__set_bit(queue, &used_hw_queues);
		mvmsta->hw_queue[ac] = queue;
	}

	/* Found a place for all queues - enable them */
	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
		iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
289
				      mvmsta->hw_queue[ac],
290 291
				      iwl_mvm_ac_to_tx_fifo[ac], 0,
				      wdg_timeout);
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
		mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
	}

	return 0;
}

static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
				    struct ieee80211_sta *sta)
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	unsigned long sta_msk;
	int i;

	lockdep_assert_held(&mvm->mutex);

	/* disable the TDLS STA-specific queues */
	sta_msk = mvmsta->tfd_queue_msk;
309
	for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
310
		iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
311 312
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
/* Disable aggregations for a bitmap of TIDs for a given station */
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
					unsigned long disable_agg_tids,
					bool remove_queue)
{
	struct iwl_mvm_add_sta_cmd cmd = {};
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvmsta;
	u32 status;
	u8 sta_id;
	int ret;

	spin_lock_bh(&mvm->queue_info_lock);
	sta_id = mvm->queue_info[queue].ra_sta_id;
	spin_unlock_bh(&mvm->queue_info_lock);

	rcu_read_lock();

	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);

	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
		rcu_read_unlock();
		return -EINVAL;
	}

	mvmsta = iwl_mvm_sta_from_mac80211(sta);

	mvmsta->tid_disable_agg |= disable_agg_tids;

	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
	cmd.sta_id = mvmsta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
	cmd.modify_mask = STA_MODIFY_QUEUES;
	if (disable_agg_tids)
		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
	if (remove_queue)
		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);

	rcu_read_unlock();

	/* Notify FW of queue removal from the STA queues */
	status = ADD_STA_SUCCESS;
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
					  &cmd, &status);

	return ret;
}

/*
 * Remove a queue from a station's resources.
 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
 * doesn't disable the queue
 */
static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
{
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvmsta;
	unsigned long tid_bitmap;
	unsigned long disable_agg_tids = 0;
	u8 sta_id;
	int tid;

	lockdep_assert_held(&mvm->mutex);

	spin_lock_bh(&mvm->queue_info_lock);
	sta_id = mvm->queue_info[queue].ra_sta_id;
	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
	spin_unlock_bh(&mvm->queue_info_lock);

	rcu_read_lock();

	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);

	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
		rcu_read_unlock();
		return 0;
	}

	mvmsta = iwl_mvm_sta_from_mac80211(sta);

	spin_lock_bh(&mvmsta->lock);
	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
		mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;

		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
			disable_agg_tids |= BIT(tid);
	}
	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */

	spin_unlock_bh(&mvmsta->lock);

	rcu_read_unlock();

	spin_lock(&mvm->queue_info_lock);
	/* Unmap MAC queues and TIDs from this queue */
	mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
	mvm->queue_info[queue].hw_queue_refcount = 0;
	mvm->queue_info[queue].tid_bitmap = 0;
	spin_unlock(&mvm->queue_info_lock);

	return disable_agg_tids;
}

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
				   struct ieee80211_sta *sta, u8 ac, int tid,
				   struct ieee80211_hdr *hdr)
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = iwl_mvm_ac_to_tx_fifo[ac],
		.sta_id = mvmsta->sta_id,
		.tid = tid,
		.frame_limit = IWL_FRAME_LIMIT,
	};
	unsigned int wdg_timeout =
		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
	u8 mac_queue = mvmsta->vif->hw_queue[ac];
	int queue = -1;
434 435 436
	bool using_inactive_queue = false;
	unsigned long disable_agg_tids = 0;
	enum iwl_mvm_agg_state queue_state;
437
	int ssn;
438
	int ret;
439 440 441

	lockdep_assert_held(&mvm->mutex);

442
	spin_lock_bh(&mvm->queue_info_lock);
443 444 445 446 447 448 449

	/*
	 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
	 * exists
	 */
	if (!ieee80211_is_data_qos(hdr->frame_control) ||
	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
450 451
		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						IWL_MVM_DQA_MIN_MGMT_QUEUE,
452 453 454 455 456 457 458 459
						IWL_MVM_DQA_MAX_MGMT_QUEUE);
		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
					    queue);

		/* If no such queue is found, we'll use a DATA queue instead */
	}

460 461 462 463 464
	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
	    (mvm->queue_info[mvmsta->reserved_queue].status ==
	     IWL_MVM_QUEUE_RESERVED ||
	     mvm->queue_info[mvmsta->reserved_queue].status ==
	     IWL_MVM_QUEUE_INACTIVE)) {
465
		queue = mvmsta->reserved_queue;
466
		mvm->queue_info[queue].reserved = true;
467 468 469 470
		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
	}

	if (queue < 0)
471 472
		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						IWL_MVM_DQA_MIN_DATA_QUEUE,
473
						IWL_MVM_DQA_MAX_DATA_QUEUE);
474

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	/*
	 * Check if this queue is already allocated but inactive.
	 * In such a case, we'll need to first free this queue before enabling
	 * it again, so we'll mark it as reserved to make sure no new traffic
	 * arrives on it
	 */
	if (queue > 0 &&
	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
		mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
		using_inactive_queue = true;
		IWL_DEBUG_TX_QUEUES(mvm,
				    "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
				    queue, mvmsta->sta_id, tid);
	}

490 491 492 493 494 495
	/*
	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
	 * to make sure no one else takes it.
	 * This will allow avoiding re-acquiring the lock at the end of the
	 * configuration. On error we'll mark it back as free.
	 */
496
	if (queue >= 0)
497
		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
498

499
	spin_unlock_bh(&mvm->queue_info_lock);
500 501 502 503 504 505 506 507 508 509 510

	/* TODO: support shared queues for same RA */
	if (queue < 0)
		return -ENOSPC;

	/*
	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
	 * as aggregatable.
	 * Mark all DATA queues as allowing to be aggregated at some point
	 */
511 512
	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
513

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
	/*
	 * If this queue was previously inactive (idle) - we need to free it
	 * first
	 */
	if (using_inactive_queue) {
		struct iwl_scd_txq_cfg_cmd cmd = {
			.scd_queue = queue,
			.enable = 0,
		};

		disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);

		/* Disable the queue */
		iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
					     true);
		iwl_trans_txq_disable(mvm->trans, queue, false);
		ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
					   &cmd);
		if (ret) {
			IWL_ERR(mvm,
				"Failed to free inactive queue %d (ret=%d)\n",
				queue, ret);

			/* Re-mark the inactive queue as inactive */
			spin_lock_bh(&mvm->queue_info_lock);
			mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
			spin_unlock_bh(&mvm->queue_info_lock);

			return ret;
		}
	}

546 547 548 549 550 551 552 553 554
	IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
			    queue, mvmsta->sta_id, tid);

	ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
	iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
			   wdg_timeout);

	spin_lock_bh(&mvmsta->lock);
	mvmsta->tid_data[tid].txq_id = queue;
555
	mvmsta->tid_data[tid].is_tid_active = true;
556
	mvmsta->tfd_queue_msk |= BIT(queue);
557
	queue_state = mvmsta->tid_data[tid].state;
558 559 560 561 562

	if (mvmsta->reserved_queue == queue)
		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
	spin_unlock_bh(&mvmsta->lock);

563 564 565 566
	ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
	if (ret)
		goto out_err;

567 568 569 570 571
	/* If we need to re-enable aggregations... */
	if (queue_state == IWL_AGG_ON)
		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);

	return ret;
572 573 574 575 576

out_err:
	iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);

	return ret;
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
}

static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
	if (tid == IWL_MAX_TID_COUNT)
		return IEEE80211_AC_VO; /* MGMT */

	return tid_to_mac80211_ac[tid];
}

static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
				       struct ieee80211_sta *sta, int tid)
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	struct sk_buff_head deferred_tx;
	u8 mac_queue;
	bool no_queue = false; /* Marks if there is a problem with the queue */
	u8 ac;

	lockdep_assert_held(&mvm->mutex);

	skb = skb_peek(&tid_data->deferred_tx_frames);
	if (!skb)
		return;
	hdr = (void *)skb->data;

	ac = iwl_mvm_tid_to_ac_queue(tid);
	mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;

	if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
	    iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
		IWL_ERR(mvm,
			"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
			mvmsta->sta_id, tid);

		/*
		 * Mark queue as problematic so later the deferred traffic is
		 * freed, as we can do nothing with it
		 */
		no_queue = true;
	}

	__skb_queue_head_init(&deferred_tx);

624 625
	/* Disable bottom-halves when entering TX path */
	local_bh_disable();
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
	spin_lock(&mvmsta->lock);
	skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
	spin_unlock(&mvmsta->lock);

	while ((skb = __skb_dequeue(&deferred_tx)))
		if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
			ieee80211_free_txskb(mvm->hw, skb);
	local_bh_enable();

	/* Wake queue */
	iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
}

void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
					   add_stream_wk);
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvmsta;
	unsigned long deferred_tid_traffic;
	int sta_id, tid;

648 649 650
	/* Check inactivity of queues */
	iwl_mvm_inactivity_check(mvm);

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	mutex_lock(&mvm->mutex);

	/* Go over all stations with deferred traffic */
	for_each_set_bit(sta_id, mvm->sta_deferred_frames,
			 IWL_MVM_STATION_COUNT) {
		clear_bit(sta_id, mvm->sta_deferred_frames);
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						lockdep_is_held(&mvm->mutex));
		if (IS_ERR_OR_NULL(sta))
			continue;

		mvmsta = iwl_mvm_sta_from_mac80211(sta);
		deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;

		for_each_set_bit(tid, &deferred_tid_traffic,
				 IWL_MAX_TID_COUNT + 1)
			iwl_mvm_tx_deferred_stream(mvm, sta, tid);
	}

	mutex_unlock(&mvm->mutex);
}

static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
674 675
				      struct ieee80211_sta *sta,
				      enum nl80211_iftype vif_type)
676 677 678 679
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	int queue;

680 681 682 683 684 685
	/*
	 * Check for inactive queues, so we don't reach a situation where we
	 * can't add a STA due to a shortage in queues that doesn't really exist
	 */
	iwl_mvm_inactivity_check(mvm);

686 687 688
	spin_lock_bh(&mvm->queue_info_lock);

	/* Make sure we have free resources for this STA */
689 690
	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
691 692
	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
	     IWL_MVM_QUEUE_FREE))
693 694
		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
	else
695 696
		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						IWL_MVM_DQA_MIN_DATA_QUEUE,
697
						IWL_MVM_DQA_MAX_DATA_QUEUE);
698 699 700 701 702
	if (queue < 0) {
		spin_unlock_bh(&mvm->queue_info_lock);
		IWL_ERR(mvm, "No available queues for new station\n");
		return -ENOSPC;
	}
703
	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
704 705 706 707 708 709 710 711 712 713 714

	spin_unlock_bh(&mvm->queue_info_lock);

	mvmsta->reserved_queue = queue;

	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
			    queue, mvmsta->sta_id);

	return 0;
}

J
Johannes Berg 已提交
715 716 717 718 719
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
		    struct ieee80211_vif *vif,
		    struct ieee80211_sta *sta)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
720
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
721
	struct iwl_mvm_rxq_dup_data *dup_data;
J
Johannes Berg 已提交
722 723 724 725 726
	int i, ret, sta_id;

	lockdep_assert_held(&mvm->mutex);

	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
727 728
		sta_id = iwl_mvm_find_free_sta_id(mvm,
						  ieee80211_vif_type_p2p(vif));
J
Johannes Berg 已提交
729 730 731
	else
		sta_id = mvm_sta->sta_id;

732
	if (sta_id == IWL_MVM_STATION_COUNT)
J
Johannes Berg 已提交
733 734 735 736 737 738 739 740 741
		return -ENOSPC;

	spin_lock_init(&mvm_sta->lock);

	mvm_sta->sta_id = sta_id;
	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
						      mvmvif->color);
	mvm_sta->vif = vif;
	mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
742 743
	mvm_sta->tx_protection = 0;
	mvm_sta->tt_tx_protection = false;
J
Johannes Berg 已提交
744 745

	/* HW restart, don't assume the memory has been zeroed */
746
	atomic_set(&mvm->pending_frames[sta_id], 0);
747
	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
J
Johannes Berg 已提交
748
	mvm_sta->tfd_queue_msk = 0;
749 750 751 752 753 754

	/* allocate new queues for a TDLS station */
	if (sta->tdls) {
		ret = iwl_mvm_tdls_sta_init(mvm, sta);
		if (ret)
			return ret;
755
	} else if (!iwl_mvm_is_dqa_supported(mvm)) {
756 757 758 759
		for (i = 0; i < IEEE80211_NUM_ACS; i++)
			if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
				mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
	}
J
Johannes Berg 已提交
760

761
	/* for HW restart - reset everything but the sequence number */
762
	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
763 764 765
		u16 seq = mvm_sta->tid_data[i].seq_number;
		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
		mvm_sta->tid_data[i].seq_number = seq;
766 767 768 769 770 771 772 773 774 775

		if (!iwl_mvm_is_dqa_supported(mvm))
			continue;

		/*
		 * Mark all queues for this STA as unallocated and defer TX
		 * frames until the queue is allocated
		 */
		mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
		skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
776
	}
777
	mvm_sta->deferred_traffic_tid_map = 0;
778
	mvm_sta->agg_tids = 0;
J
Johannes Berg 已提交
779

780 781 782 783 784 785 786 787 788 789
	if (iwl_mvm_has_new_rx_api(mvm) &&
	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
		dup_data = kcalloc(mvm->trans->num_rx_queues,
				   sizeof(*dup_data),
				   GFP_KERNEL);
		if (!dup_data)
			return -ENOMEM;
		mvm_sta->dup_data = dup_data;
	}

790
	if (iwl_mvm_is_dqa_supported(mvm)) {
791 792
		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
						 ieee80211_vif_type_p2p(vif));
793 794 795 796 797
		if (ret)
			goto err;
	}

	ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
J
Johannes Berg 已提交
798
	if (ret)
799
		goto err;
J
Johannes Berg 已提交
800

801 802 803 804 805 806 807 808
	if (vif->type == NL80211_IFTYPE_STATION) {
		if (!sta->tdls) {
			WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
			mvmvif->ap_sta_id = sta_id;
		} else {
			WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
		}
	}
J
Johannes Berg 已提交
809 810 811 812

	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);

	return 0;
813 814 815 816

err:
	iwl_mvm_tdls_sta_deinit(mvm, sta);
	return ret;
J
Johannes Berg 已提交
817 818
}

819 820 821 822
int iwl_mvm_update_sta(struct iwl_mvm *mvm,
		       struct ieee80211_vif *vif,
		       struct ieee80211_sta *sta)
{
823
	return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
824 825
}

J
Johannes Berg 已提交
826 827 828
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
		      bool drain)
{
829
	struct iwl_mvm_add_sta_cmd cmd = {};
J
Johannes Berg 已提交
830 831 832 833 834 835 836 837 838 839 840 841
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
	cmd.sta_id = mvmsta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);

	status = ADD_STA_SUCCESS;
842 843
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
844
					  &cmd, &status);
J
Johannes Berg 已提交
845 846 847
	if (ret)
		return ret;

848
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	case ADD_STA_SUCCESS:
		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
			       mvmsta->sta_id);
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
			mvmsta->sta_id);
		break;
	}

	return ret;
}

/*
 * Remove a station from the FW table. Before sending the command to remove
 * the station validate that the station is indeed known to the driver (sanity
 * only).
 */
static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
{
	struct ieee80211_sta *sta;
	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
		.sta_id = sta_id,
	};
	int ret;

	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
					lockdep_is_held(&mvm->mutex));

	/* Note: internal stations are marked as error values */
	if (!sta) {
		IWL_ERR(mvm, "Invalid station id\n");
		return -EINVAL;
	}

E
Emmanuel Grumbach 已提交
885
	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
J
Johannes Berg 已提交
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
				   sizeof(rm_sta_cmd), &rm_sta_cmd);
	if (ret) {
		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
		return ret;
	}

	return 0;
}

void iwl_mvm_sta_drained_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
	u8 sta_id;

	/*
	 * The mutex is needed because of the SYNC cmd, but not only: if the
	 * work would run concurrently with iwl_mvm_rm_sta, it would run before
	 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
	 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
	 * that later.
	 */
	mutex_lock(&mvm->mutex);

	for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
		int ret;
		struct ieee80211_sta *sta =
			rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						  lockdep_is_held(&mvm->mutex));

915 916 917 918 919 920 921 922 923
		/*
		 * This station is in use or RCU-removed; the latter happens in
		 * managed mode, where mac80211 removes the station before we
		 * can remove it from firmware (we can only do that after the
		 * MAC is marked unassociated), and possibly while the deauth
		 * frame to disconnect from the AP is still queued. Then, the
		 * station pointer is -ENOENT when the last skb is reclaimed.
		 */
		if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
J
Johannes Berg 已提交
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
			continue;

		if (PTR_ERR(sta) == -EINVAL) {
			IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
				sta_id);
			continue;
		}

		if (!sta) {
			IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
				sta_id);
			continue;
		}

		WARN_ON(PTR_ERR(sta) != -EBUSY);
		/* This station was removed and we waited until it got drained,
		 * we can now proceed and remove it.
		 */
		ret = iwl_mvm_rm_sta_common(mvm, sta_id);
		if (ret) {
			IWL_ERR(mvm,
				"Couldn't remove sta %d after it was drained\n",
				sta_id);
			continue;
		}
949
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
J
Johannes Berg 已提交
950
		clear_bit(sta_id, mvm->sta_drained);
951 952 953 954

		if (mvm->tfd_drained[sta_id]) {
			unsigned long i, msk = mvm->tfd_drained[sta_id];

955
			for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
956 957
				iwl_mvm_disable_txq(mvm, i, i,
						    IWL_MAX_TID_COUNT, 0);
958 959 960 961 962

			mvm->tfd_drained[sta_id] = 0;
			IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
				       sta_id, msk);
		}
J
Johannes Berg 已提交
963 964 965 966 967
	}

	mutex_unlock(&mvm->mutex);
}

968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
				       struct ieee80211_vif *vif,
				       struct iwl_mvm_sta *mvm_sta)
{
	int ac;
	int i;

	lockdep_assert_held(&mvm->mutex);

	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
		if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
			continue;

		ac = iwl_mvm_tid_to_ac_queue(i);
		iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
				    vif->hw_queue[ac], i, 0);
		mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
	}
}

J
Johannes Berg 已提交
988 989 990 991 992
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
		   struct ieee80211_vif *vif,
		   struct ieee80211_sta *sta)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
993
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
994 995 996 997
	int ret;

	lockdep_assert_held(&mvm->mutex);

998 999 1000
	if (iwl_mvm_has_new_rx_api(mvm))
		kfree(mvm_sta->dup_data);

1001 1002 1003
	if ((vif->type == NL80211_IFTYPE_STATION &&
	     mvmvif->ap_sta_id == mvm_sta->sta_id) ||
	    iwl_mvm_is_dqa_supported(mvm)){
1004 1005 1006
		ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
		if (ret)
			return ret;
1007
		/* flush its queues here since we are freeing mvm_sta */
1008
		ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
1009 1010 1011 1012 1013 1014 1015
		if (ret)
			return ret;
		ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
						    mvm_sta->tfd_queue_msk);
		if (ret)
			return ret;
		ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1016

1017 1018 1019 1020
		/* If DQA is supported - the queues can be disabled now */
		if (iwl_mvm_is_dqa_supported(mvm))
			iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);

J
Johannes Berg 已提交
1021 1022 1023 1024 1025 1026
		/* if we are associated - we can't remove the AP STA now */
		if (vif->bss_conf.assoc)
			return ret;

		/* unassoc - go ahead - remove the AP STA now */
		mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1027 1028 1029 1030

		/* clear d0i3_ap_sta_id if no longer relevant */
		if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
			mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
J
Johannes Berg 已提交
1031 1032
	}

1033 1034 1035 1036 1037 1038 1039 1040 1041
	/*
	 * This shouldn't happen - the TDLS channel switch should be canceled
	 * before the STA is removed.
	 */
	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
		mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
		cancel_delayed_work(&mvm->tdls_cs.dwork);
	}

1042 1043 1044 1045 1046
	/*
	 * Make sure that the tx response code sees the station as -EBUSY and
	 * calls the drain worker.
	 */
	spin_lock_bh(&mvm_sta->lock);
J
Johannes Berg 已提交
1047 1048 1049 1050
	/*
	 * There are frames pending on the AC queues for this station.
	 * We need to wait until all the frames are drained...
	 */
1051
	if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
J
Johannes Berg 已提交
1052 1053
		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
				   ERR_PTR(-EBUSY));
1054
		spin_unlock_bh(&mvm_sta->lock);
1055 1056 1057 1058 1059 1060 1061 1062 1063

		/* disable TDLS sta queues on drain complete */
		if (sta->tdls) {
			mvm->tfd_drained[mvm_sta->sta_id] =
							mvm_sta->tfd_queue_msk;
			IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
				       mvm_sta->sta_id);
		}

1064
		ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
J
Johannes Berg 已提交
1065
	} else {
1066
		spin_unlock_bh(&mvm_sta->lock);
1067 1068 1069 1070

		if (sta->tdls)
			iwl_mvm_tdls_sta_deinit(mvm, sta);

J
Johannes Berg 已提交
1071
		ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1072
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
J
Johannes Berg 已提交
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	}

	return ret;
}

int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
		      struct ieee80211_vif *vif,
		      u8 sta_id)
{
	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);

	lockdep_assert_held(&mvm->mutex);

1086
	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
J
Johannes Berg 已提交
1087 1088 1089
	return ret;
}

1090 1091 1092
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
			     struct iwl_mvm_int_sta *sta,
			     u32 qmask, enum nl80211_iftype iftype)
J
Johannes Berg 已提交
1093 1094
{
	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1095
		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
J
Johannes Berg 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
			return -ENOSPC;
	}

	sta->tfd_queue_msk = qmask;

	/* put a non-NULL value so iterating over the stations won't stop */
	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
	return 0;
}

1107 1108
static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
				    struct iwl_mvm_int_sta *sta)
J
Johannes Berg 已提交
1109
{
1110
	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
J
Johannes Berg 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119
	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
	sta->sta_id = IWL_MVM_STATION_COUNT;
}

static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
				      struct iwl_mvm_int_sta *sta,
				      const u8 *addr,
				      u16 mac_id, u16 color)
{
1120
	struct iwl_mvm_add_sta_cmd cmd;
J
Johannes Berg 已提交
1121 1122 1123 1124 1125
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

1126
	memset(&cmd, 0, sizeof(cmd));
J
Johannes Berg 已提交
1127 1128 1129 1130 1131
	cmd.sta_id = sta->sta_id;
	cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
							     color));

	cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1132
	cmd.tid_disable_tx = cpu_to_le16(0xffff);
J
Johannes Berg 已提交
1133 1134 1135 1136

	if (addr)
		memcpy(cmd.addr, addr, ETH_ALEN);

1137 1138
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
1139
					  &cmd, &status);
J
Johannes Berg 已提交
1140 1141 1142
	if (ret)
		return ret;

1143
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
	case ADD_STA_SUCCESS:
		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
		return 0;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
			status);
		break;
	}
	return ret;
}

int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
{
1158 1159 1160
	unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
					mvm->cfg->base_params->wd_timeout :
					IWL_WATCHDOG_DISABLED;
J
Johannes Berg 已提交
1161 1162 1163 1164
	int ret;

	lockdep_assert_held(&mvm->mutex);

A
Ariej Marjieh 已提交
1165
	/* Map Aux queue to fifo - needs to happen before adding Aux station */
1166
	iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1167
			      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
A
Ariej Marjieh 已提交
1168 1169 1170

	/* Allocate aux station and assign to it the aux queue */
	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1171
				       NL80211_IFTYPE_UNSPECIFIED);
J
Johannes Berg 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
	if (ret)
		return ret;

	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
					 MAC_INDEX_AUX, 0);

	if (ret)
		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
	return ret;
}

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

	lockdep_assert_held(&mvm->mutex);
	return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
					 mvmvif->id, 0);
}

int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	int ret;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
	if (ret)
		IWL_WARN(mvm, "Failed sending remove station\n");

	return ret;
}

void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
{
	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
}

1210 1211 1212 1213 1214 1215 1216
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
{
	lockdep_assert_held(&mvm->mutex);

	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
}

J
Johannes Berg 已提交
1217 1218 1219 1220 1221 1222 1223 1224
/*
 * Send the add station command for the vif's broadcast station.
 * Assumes that the station was already allocated.
 *
 * @mvm: the mvm component
 * @vif: the interface to which the broadcast station is added
 * @bsta: the broadcast station to add.
 */
1225
int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1226 1227
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1228
	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
J
Johannes Berg 已提交
1229
	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1230
	const u8 *baddr = _baddr;
J
Johannes Berg 已提交
1231 1232 1233

	lockdep_assert_held(&mvm->mutex);

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
	if (iwl_mvm_is_dqa_supported(mvm)) {
		struct iwl_trans_txq_scd_cfg cfg = {
			.fifo = IWL_MVM_TX_FIFO_VO,
			.sta_id = mvmvif->bcast_sta.sta_id,
			.tid = IWL_MAX_TID_COUNT,
			.aggregate = false,
			.frame_limit = IWL_FRAME_LIMIT,
		};
		unsigned int wdg_timeout =
			iwl_mvm_get_wd_timeout(mvm, vif, false, false);
		int queue;

		if ((vif->type == NL80211_IFTYPE_AP) &&
		    (mvmvif->bcast_sta.tfd_queue_msk &
		     BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
			queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1250 1251 1252 1253
		else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
			 (mvmvif->bcast_sta.tfd_queue_msk &
			  BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
			queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1254 1255 1256 1257 1258 1259 1260
		else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
			return -EINVAL;

		iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
				   wdg_timeout);
	}

J
Johannes Berg 已提交
1261 1262 1263
	if (vif->type == NL80211_IFTYPE_ADHOC)
		baddr = vif->bss_conf.bssid;

J
Johannes Berg 已提交
1264 1265 1266 1267 1268 1269 1270 1271 1272
	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
		return -ENOSPC;

	return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
					  mvmvif->id, mvmvif->color);
}

/* Send the FW a request to remove the station from it's internal data
 * structures, but DO NOT remove the entry from the local data structures. */
1273
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1274
{
1275
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
J
Johannes Berg 已提交
1276 1277 1278 1279
	int ret;

	lockdep_assert_held(&mvm->mutex);

1280
	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
J
Johannes Berg 已提交
1281 1282 1283 1284 1285
	if (ret)
		IWL_WARN(mvm, "Failed sending remove station\n");
	return ret;
}

1286 1287 1288
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1289
	u32 qmask = 0;
1290 1291 1292

	lockdep_assert_held(&mvm->mutex);

1293 1294
	if (!iwl_mvm_is_dqa_supported(mvm))
		qmask = iwl_mvm_mac_get_queues_mask(vif);
1295

1296 1297 1298 1299 1300 1301
	if (vif->type == NL80211_IFTYPE_AP) {
		/*
		 * The firmware defines the TFD queue mask to only be relevant
		 * for *unicast* queues, so the multicast (CAB) queue shouldn't
		 * be included.
		 */
1302 1303
		qmask &= ~BIT(vif->cab_queue);

1304 1305
		if (iwl_mvm_is_dqa_supported(mvm))
			qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
1306 1307 1308
	} else if (iwl_mvm_is_dqa_supported(mvm) &&
		   vif->type == NL80211_IFTYPE_P2P_DEVICE) {
		qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
1309 1310
	}

1311 1312 1313 1314
	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
					ieee80211_vif_type_p2p(vif));
}

J
Johannes Berg 已提交
1315 1316 1317 1318 1319 1320 1321
/* Allocate a new station entry for the broadcast station to the given vif,
 * and send it to the FW.
 * Note that each P2P mac should have its own broadcast station.
 *
 * @mvm: the mvm component
 * @vif: the interface to which the broadcast station is added
 * @bsta: the broadcast station to add. */
1322
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1323 1324
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1325
	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
J
Johannes Berg 已提交
1326 1327 1328 1329
	int ret;

	lockdep_assert_held(&mvm->mutex);

1330
	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
J
Johannes Berg 已提交
1331 1332 1333
	if (ret)
		return ret;

1334
	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
J
Johannes Berg 已提交
1335 1336 1337

	if (ret)
		iwl_mvm_dealloc_int_sta(mvm, bsta);
1338

J
Johannes Berg 已提交
1339 1340 1341
	return ret;
}

1342 1343 1344 1345 1346 1347 1348
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
}

J
Johannes Berg 已提交
1349 1350 1351 1352
/*
 * Send the FW a request to remove the station from it's internal data
 * structures, and in addition remove it from the local data structure.
 */
1353
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1354 1355 1356 1357 1358
{
	int ret;

	lockdep_assert_held(&mvm->mutex);

1359 1360 1361
	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);

	iwl_mvm_dealloc_bcast_sta(mvm, vif);
J
Johannes Berg 已提交
1362 1363 1364 1365

	return ret;
}

1366 1367
#define IWL_MAX_RX_BA_SESSIONS 16

1368
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
1369
{
1370 1371 1372 1373
	struct iwl_mvm_delba_notif notif = {
		.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
		.metadata.sync = 1,
		.delba.baid = baid,
1374
	};
1375 1376
	iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
};
1377

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
				 struct iwl_mvm_baid_data *data)
{
	int i;

	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);

	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
		int j;
		struct iwl_mvm_reorder_buffer *reorder_buf =
			&data->reorder_buf[i];

1390 1391 1392
		spin_lock_bh(&reorder_buf->lock);
		if (likely(!reorder_buf->num_stored)) {
			spin_unlock_bh(&reorder_buf->lock);
1393
			continue;
1394
		}
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404

		/*
		 * This shouldn't happen in regular DELBA since the internal
		 * delBA notification should trigger a release of all frames in
		 * the reorder buffer.
		 */
		WARN_ON(1);

		for (j = 0; j < reorder_buf->buf_size; j++)
			__skb_queue_purge(&reorder_buf->entries[j]);
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
		/*
		 * Prevent timer re-arm. This prevents a very far fetched case
		 * where we timed out on the notification. There may be prior
		 * RX frames pending in the RX queue before the notification
		 * that might get processed between now and the actual deletion
		 * and we would re-arm the timer although we are deleting the
		 * reorder buffer.
		 */
		reorder_buf->removed = true;
		spin_unlock_bh(&reorder_buf->lock);
		del_timer_sync(&reorder_buf->reorder_timer);
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
	}
}

static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
					u32 sta_id,
					struct iwl_mvm_baid_data *data,
					u16 ssn, u8 buf_size)
{
	int i;

	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
		struct iwl_mvm_reorder_buffer *reorder_buf =
			&data->reorder_buf[i];
		int j;

		reorder_buf->num_stored = 0;
		reorder_buf->head_sn = ssn;
		reorder_buf->buf_size = buf_size;
1434 1435 1436 1437 1438 1439 1440
		/* rx reorder timer */
		reorder_buf->reorder_timer.function =
			iwl_mvm_reorder_timer_expired;
		reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
		init_timer(&reorder_buf->reorder_timer);
		spin_lock_init(&reorder_buf->lock);
		reorder_buf->mvm = mvm;
1441 1442 1443 1444 1445
		reorder_buf->queue = i;
		reorder_buf->sta_id = sta_id;
		for (j = 0; j < reorder_buf->buf_size; j++)
			__skb_queue_head_init(&reorder_buf->entries[j]);
	}
1446 1447
}

J
Johannes Berg 已提交
1448
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1449
		       int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
J
Johannes Berg 已提交
1450
{
1451
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1452
	struct iwl_mvm_add_sta_cmd cmd = {};
1453
	struct iwl_mvm_baid_data *baid_data = NULL;
J
Johannes Berg 已提交
1454 1455 1456 1457 1458
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

1459 1460 1461 1462 1463
	if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
		return -ENOSPC;
	}

1464 1465 1466 1467 1468
	if (iwl_mvm_has_new_rx_api(mvm) && start) {
		/*
		 * Allocate here so if allocation fails we can bail out early
		 * before starting the BA session in the firmware
		 */
1469 1470 1471 1472
		baid_data = kzalloc(sizeof(*baid_data) +
				    mvm->trans->num_rx_queues *
				    sizeof(baid_data->reorder_buf[0]),
				    GFP_KERNEL);
1473 1474 1475 1476
		if (!baid_data)
			return -ENOMEM;
	}

J
Johannes Berg 已提交
1477 1478 1479
	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
	cmd.sta_id = mvm_sta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
1480 1481 1482
	if (start) {
		cmd.add_immediate_ba_tid = (u8) tid;
		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
1483
		cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
1484 1485 1486
	} else {
		cmd.remove_immediate_ba_tid = (u8) tid;
	}
J
Johannes Berg 已提交
1487 1488 1489 1490
	cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
				  STA_MODIFY_REMOVE_BA_TID;

	status = ADD_STA_SUCCESS;
1491 1492
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
1493
					  &cmd, &status);
J
Johannes Berg 已提交
1494
	if (ret)
1495
		goto out_free;
J
Johannes Berg 已提交
1496

1497
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
	case ADD_STA_SUCCESS:
		IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
			       start ? "start" : "stopp");
		break;
	case ADD_STA_IMMEDIATE_BA_FAILURE:
		IWL_WARN(mvm, "RX BA Session refused by fw\n");
		ret = -ENOSPC;
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
			start ? "start" : "stopp", status);
		break;
	}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
	if (ret)
		goto out_free;

	if (start) {
		u8 baid;

		mvm->rx_ba_sessions++;

		if (!iwl_mvm_has_new_rx_api(mvm))
			return 0;

		if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
			ret = -EINVAL;
			goto out_free;
		}
		baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
			    IWL_ADD_STA_BAID_SHIFT);
		baid_data->baid = baid;
		baid_data->timeout = timeout;
		baid_data->last_rx = jiffies;
		init_timer(&baid_data->session_timer);
		baid_data->session_timer.function =
			iwl_mvm_rx_agg_session_expired;
		baid_data->session_timer.data =
			(unsigned long)&mvm->baid_map[baid];
		baid_data->mvm = mvm;
		baid_data->tid = tid;
		baid_data->sta_id = mvm_sta->sta_id;

		mvm_sta->tid_to_baid[tid] = baid;
		if (timeout)
			mod_timer(&baid_data->session_timer,
				  TU_TO_EXP_TIME(timeout * 2));

1547 1548
		iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
					    baid_data, ssn, buf_size);
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
		/*
		 * protect the BA data with RCU to cover a case where our
		 * internal RX sync mechanism will timeout (not that it's
		 * supposed to happen) and we will free the session data while
		 * RX is being processed in parallel
		 */
		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
	} else if (mvm->rx_ba_sessions > 0) {
		u8 baid = mvm_sta->tid_to_baid[tid];

		/* check that restart flow didn't zero the counter */
		mvm->rx_ba_sessions--;
		if (!iwl_mvm_has_new_rx_api(mvm))
			return 0;

		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
			return -EINVAL;

		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
		if (WARN_ON(!baid_data))
			return -EINVAL;

		/* synchronize all rx queues so we can safely delete */
1573
		iwl_mvm_free_reorder(mvm, baid_data);
1574 1575 1576
		del_timer_sync(&baid_data->session_timer);
		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
		kfree_rcu(baid_data, rcu_head);
1577
	}
1578
	return 0;
1579

1580 1581
out_free:
	kfree(baid_data);
J
Johannes Berg 已提交
1582 1583 1584
	return ret;
}

1585 1586
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
		       int tid, u8 queue, bool start)
J
Johannes Berg 已提交
1587
{
1588
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1589
	struct iwl_mvm_add_sta_cmd cmd = {};
J
Johannes Berg 已提交
1590 1591 1592 1593 1594 1595 1596 1597 1598
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

	if (start) {
		mvm_sta->tfd_queue_msk |= BIT(queue);
		mvm_sta->tid_disable_agg &= ~BIT(tid);
	} else {
1599 1600 1601
		/* In DQA-mode the queue isn't removed on agg termination */
		if (!iwl_mvm_is_dqa_supported(mvm))
			mvm_sta->tfd_queue_msk &= ~BIT(queue);
J
Johannes Berg 已提交
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
		mvm_sta->tid_disable_agg |= BIT(tid);
	}

	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
	cmd.sta_id = mvm_sta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
	cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);

	status = ADD_STA_SUCCESS;
1613 1614
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
1615
					  &cmd, &status);
J
Johannes Berg 已提交
1616 1617 1618
	if (ret)
		return ret;

1619
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
	case ADD_STA_SUCCESS:
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
			start ? "start" : "stopp", status);
		break;
	}

	return ret;
}

1632
const u8 tid_to_mac80211_ac[] = {
J
Johannes Berg 已提交
1633 1634 1635 1636 1637 1638 1639 1640
	IEEE80211_AC_BE,
	IEEE80211_AC_BK,
	IEEE80211_AC_BK,
	IEEE80211_AC_BE,
	IEEE80211_AC_VI,
	IEEE80211_AC_VI,
	IEEE80211_AC_VO,
	IEEE80211_AC_VO,
1641
	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
J
Johannes Berg 已提交
1642 1643
};

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
static const u8 tid_to_ucode_ac[] = {
	AC_BE,
	AC_BK,
	AC_BK,
	AC_BE,
	AC_VI,
	AC_VI,
	AC_VO,
	AC_VO,
};

J
Johannes Berg 已提交
1655 1656 1657
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
1658
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
1659 1660
	struct iwl_mvm_tid_data *tid_data;
	int txq_id;
1661
	int ret;
J
Johannes Berg 已提交
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673

	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
		return -EINVAL;

	if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
		IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
			mvmsta->tid_data[tid].state);
		return -ENXIO;
	}

	lockdep_assert_held(&mvm->mutex);

1674 1675 1676 1677 1678 1679 1680 1681 1682
	spin_lock_bh(&mvmsta->lock);

	/* possible race condition - we entered D0i3 while starting agg */
	if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
		spin_unlock_bh(&mvmsta->lock);
		IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
		return -EIO;
	}

1683 1684
	spin_lock_bh(&mvm->queue_info_lock);

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
	/*
	 * Note the possible cases:
	 *  1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
	 *  2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
	 *	one and mark it as reserved
	 *  3. In DQA mode, but no traffic yet on this TID: same treatment as in
	 *	non-DQA mode, since the TXQ hasn't yet been allocated
	 */
	txq_id = mvmsta->tid_data[tid].txq_id;
	if (!iwl_mvm_is_dqa_supported(mvm) ||
	    mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
1696 1697
		txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						 mvm->first_agg_queue,
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
						 mvm->last_agg_queue);
		if (txq_id < 0) {
			ret = txq_id;
			spin_unlock_bh(&mvm->queue_info_lock);
			IWL_ERR(mvm, "Failed to allocate agg queue\n");
			goto release_locks;
		}

		/* TXQ hasn't yet been enabled, so mark it only as reserved */
		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
1708 1709
	}
	spin_unlock_bh(&mvm->queue_info_lock);
J
Johannes Berg 已提交
1710

1711 1712 1713 1714
	IWL_DEBUG_TX_QUEUES(mvm,
			    "AGG for tid %d will be on queue #%d\n",
			    tid, txq_id);

J
Johannes Berg 已提交
1715
	tid_data = &mvmsta->tid_data[tid];
1716
	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
J
Johannes Berg 已提交
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
	tid_data->txq_id = txq_id;
	*ssn = tid_data->ssn;

	IWL_DEBUG_TX_QUEUES(mvm,
			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
			    mvmsta->sta_id, tid, txq_id, tid_data->ssn,
			    tid_data->next_reclaimed);

	if (tid_data->ssn == tid_data->next_reclaimed) {
		tid_data->state = IWL_AGG_STARTING;
		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
	} else {
		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
	}

1732 1733 1734
	ret = 0;

release_locks:
J
Johannes Berg 已提交
1735 1736
	spin_unlock_bh(&mvmsta->lock);

1737
	return ret;
J
Johannes Berg 已提交
1738 1739 1740
}

int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1741 1742
			    struct ieee80211_sta *sta, u16 tid, u8 buf_size,
			    bool amsdu)
J
Johannes Berg 已提交
1743
{
1744
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
1745
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1746 1747
	unsigned int wdg_timeout =
		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
1748
	int queue, ret;
1749
	bool alloc_queue = true;
J
Johannes Berg 已提交
1750 1751
	u16 ssn;

1752 1753 1754 1755 1756 1757 1758
	struct iwl_trans_txq_scd_cfg cfg = {
		.sta_id = mvmsta->sta_id,
		.tid = tid,
		.frame_limit = buf_size,
		.aggregate = true,
	};

1759 1760 1761
	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
		     != IWL_MAX_TID_COUNT);

J
Johannes Berg 已提交
1762 1763 1764 1765 1766 1767
	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);

	spin_lock_bh(&mvmsta->lock);
	ssn = tid_data->ssn;
	queue = tid_data->txq_id;
	tid_data->state = IWL_AGG_ON;
1768
	mvmsta->agg_tids |= BIT(tid);
J
Johannes Berg 已提交
1769
	tid_data->ssn = 0xffff;
1770
	tid_data->amsdu_in_ampdu_allowed = amsdu;
J
Johannes Berg 已提交
1771 1772
	spin_unlock_bh(&mvmsta->lock);

1773
	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
J
Johannes Berg 已提交
1774

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
	/* In DQA mode, the existing queue might need to be reconfigured */
	if (iwl_mvm_is_dqa_supported(mvm)) {
		spin_lock_bh(&mvm->queue_info_lock);
		/* Maybe there is no need to even alloc a queue... */
		if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
			alloc_queue = false;
		spin_unlock_bh(&mvm->queue_info_lock);

		/*
		 * Only reconfig the SCD for the queue if the window size has
		 * changed from current (become smaller)
		 */
		if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
			/*
			 * If reconfiguring an existing queue, it first must be
			 * drained
			 */
			ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
							    BIT(queue));
			if (ret) {
				IWL_ERR(mvm,
					"Error draining queue before reconfig\n");
				return ret;
			}

			ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
						   mvmsta->sta_id, tid,
						   buf_size, ssn);
			if (ret) {
				IWL_ERR(mvm,
					"Error reconfiguring TXQ #%d\n", queue);
				return ret;
			}
		}
	}

	if (alloc_queue)
		iwl_mvm_enable_txq(mvm, queue,
				   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
				   &cfg, wdg_timeout);
1815

J
Johannes Berg 已提交
1816 1817 1818 1819
	ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
	if (ret)
		return -EIO;

1820 1821
	/* No need to mark as reserved */
	spin_lock_bh(&mvm->queue_info_lock);
1822
	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1823 1824
	spin_unlock_bh(&mvm->queue_info_lock);

J
Johannes Berg 已提交
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
	/*
	 * Even though in theory the peer could have different
	 * aggregation reorder buffer sizes for different sessions,
	 * our ucode doesn't allow for that and has a global limit
	 * for each station. Therefore, use the minimum of all the
	 * aggregation sessions and our default value.
	 */
	mvmsta->max_agg_bufsize =
		min(mvmsta->max_agg_bufsize, buf_size);
	mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;

1836 1837 1838
	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
		     sta->addr, tid);

1839
	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
J
Johannes Berg 已提交
1840 1841 1842 1843 1844
}

int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
			    struct ieee80211_sta *sta, u16 tid)
{
1845
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
1846 1847 1848 1849
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	u16 txq_id;
	int err;

1850 1851 1852 1853 1854 1855 1856 1857 1858 1859

	/*
	 * If mac80211 is cleaning its state, then say that we finished since
	 * our state has been cleared anyway.
	 */
	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		return 0;
	}

J
Johannes Berg 已提交
1860 1861 1862 1863 1864 1865 1866
	spin_lock_bh(&mvmsta->lock);

	txq_id = tid_data->txq_id;

	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
			    mvmsta->sta_id, tid, txq_id, tid_data->state);

1867 1868
	mvmsta->agg_tids &= ~BIT(tid);

1869
	spin_lock_bh(&mvm->queue_info_lock);
1870 1871 1872 1873 1874 1875 1876 1877 1878
	/*
	 * The TXQ is marked as reserved only if no traffic came through yet
	 * This means no traffic has been sent on this TID (agg'd or not), so
	 * we no longer have use for the queue. Since it hasn't even been
	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
	 * free.
	 */
	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
1879 1880
	spin_unlock_bh(&mvm->queue_info_lock);

J
Johannes Berg 已提交
1881 1882
	switch (tid_data->state) {
	case IWL_AGG_ON:
1883
		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
J
Johannes Berg 已提交
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896

		IWL_DEBUG_TX_QUEUES(mvm,
				    "ssn = %d, next_recl = %d\n",
				    tid_data->ssn, tid_data->next_reclaimed);

		/* There are still packets for this RA / TID in the HW */
		if (tid_data->ssn != tid_data->next_reclaimed) {
			tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
			err = 0;
			break;
		}

		tid_data->ssn = 0xffff;
1897 1898 1899 1900 1901 1902 1903
		tid_data->state = IWL_AGG_OFF;
		spin_unlock_bh(&mvmsta->lock);

		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);

		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);

1904 1905 1906 1907 1908
		if (!iwl_mvm_is_dqa_supported(mvm)) {
			int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];

			iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
		}
1909
		return 0;
J
Johannes Berg 已提交
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
	case IWL_AGG_STARTING:
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		/*
		 * The agg session has been stopped before it was set up. This
		 * can happen when the AddBA timer times out for example.
		 */

		/* No barriers since we are under mutex */
		lockdep_assert_held(&mvm->mutex);

		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		tid_data->state = IWL_AGG_OFF;
		err = 0;
		break;
	default:
		IWL_ERR(mvm,
			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
			mvmsta->sta_id, tid, tid_data->state);
		IWL_ERR(mvm,
			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
		err = -EINVAL;
	}

	spin_unlock_bh(&mvmsta->lock);

	return err;
}

1938 1939 1940
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
			    struct ieee80211_sta *sta, u16 tid)
{
1941
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1942 1943
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	u16 txq_id;
1944
	enum iwl_mvm_agg_state old_state;
1945 1946 1947 1948 1949 1950 1951 1952 1953

	/*
	 * First set the agg state to OFF to avoid calling
	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
	 */
	spin_lock_bh(&mvmsta->lock);
	txq_id = tid_data->txq_id;
	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
			    mvmsta->sta_id, tid, txq_id, tid_data->state);
1954
	old_state = tid_data->state;
1955
	tid_data->state = IWL_AGG_OFF;
1956
	mvmsta->agg_tids &= ~BIT(tid);
1957 1958
	spin_unlock_bh(&mvmsta->lock);

1959
	spin_lock_bh(&mvm->queue_info_lock);
1960 1961 1962 1963 1964 1965 1966 1967 1968
	/*
	 * The TXQ is marked as reserved only if no traffic came through yet
	 * This means no traffic has been sent on this TID (agg'd or not), so
	 * we no longer have use for the queue. Since it hasn't even been
	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
	 * free.
	 */
	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
1969 1970
	spin_unlock_bh(&mvm->queue_info_lock);

1971
	if (old_state >= IWL_AGG_ON) {
1972
		iwl_mvm_drain_sta(mvm, mvmsta, true);
1973
		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
1974
			IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
1975 1976 1977
		iwl_trans_wait_tx_queue_empty(mvm->trans,
					      mvmsta->tfd_queue_msk);
		iwl_mvm_drain_sta(mvm, mvmsta, false);
1978

1979 1980
		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);

1981 1982 1983 1984 1985 1986
		if (!iwl_mvm_is_dqa_supported(mvm)) {
			int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];

			iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
					    tid, 0);
		}
1987
	}
1988 1989 1990 1991

	return 0;
}

J
Johannes Berg 已提交
1992 1993
static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
{
1994
	int i, max = -1, max_offs = -1;
J
Johannes Berg 已提交
1995 1996 1997

	lockdep_assert_held(&mvm->mutex);

1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
	/* Pick the unused key offset with the highest 'deleted'
	 * counter. Every time a key is deleted, all the counters
	 * are incremented and the one that was just deleted is
	 * reset to zero. Thus, the highest counter is the one
	 * that was deleted longest ago. Pick that one.
	 */
	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
		if (test_bit(i, mvm->fw_key_table))
			continue;
		if (mvm->fw_key_deleted[i] > max) {
			max = mvm->fw_key_deleted[i];
			max_offs = i;
		}
	}
J
Johannes Berg 已提交
2012

2013
	if (max_offs < 0)
J
Johannes Berg 已提交
2014 2015
		return STA_KEY_IDX_INVALID;

2016
	return max_offs;
J
Johannes Berg 已提交
2017 2018
}

2019 2020 2021
static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
					       struct ieee80211_vif *vif,
					       struct ieee80211_sta *sta)
J
Johannes Berg 已提交
2022
{
2023
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
J
Johannes Berg 已提交
2024

2025 2026
	if (sta)
		return iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
2027 2028 2029 2030 2031 2032 2033

	/*
	 * The device expects GTKs for station interfaces to be
	 * installed as GTKs for the AP station. If we have no
	 * station ID, then use AP's station ID.
	 */
	if (vif->type == NL80211_IFTYPE_STATION &&
2034 2035 2036 2037 2038 2039 2040 2041
	    mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
		u8 sta_id = mvmvif->ap_sta_id;

		/*
		 * It is possible that the 'sta' parameter is NULL,
		 * for example when a GTK is removed - the sta_id will then
		 * be the AP ID, and no station was passed by mac80211.
		 */
2042
		return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
2043
	}
J
Johannes Berg 已提交
2044

2045
	return NULL;
J
Johannes Berg 已提交
2046 2047 2048 2049
}

static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
				struct iwl_mvm_sta *mvm_sta,
2050
				struct ieee80211_key_conf *keyconf, bool mcast,
2051 2052
				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
				u8 key_offset)
J
Johannes Berg 已提交
2053
{
2054
	struct iwl_mvm_add_sta_key_cmd cmd = {};
2055
	__le16 key_flags;
2056 2057
	int ret;
	u32 status;
J
Johannes Berg 已提交
2058 2059
	u16 keyidx;
	int i;
2060
	u8 sta_id = mvm_sta->sta_id;
J
Johannes Berg 已提交
2061 2062 2063 2064 2065 2066 2067 2068 2069

	keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
		 STA_KEY_FLG_KEYID_MSK;
	key_flags = cpu_to_le16(keyidx);
	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);

	switch (keyconf->cipher) {
	case WLAN_CIPHER_SUITE_TKIP:
		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2070
		cmd.tkip_rx_tsc_byte2 = tkip_iv32;
J
Johannes Berg 已提交
2071
		for (i = 0; i < 5; i++)
2072 2073
			cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
		memcpy(cmd.key, keyconf->key, keyconf->keylen);
J
Johannes Berg 已提交
2074 2075 2076
		break;
	case WLAN_CIPHER_SUITE_CCMP:
		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2077
		memcpy(cmd.key, keyconf->key, keyconf->keylen);
J
Johannes Berg 已提交
2078
		break;
2079 2080
	case WLAN_CIPHER_SUITE_WEP104:
		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2081
		/* fall through */
2082 2083 2084 2085
	case WLAN_CIPHER_SUITE_WEP40:
		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
		memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
		break;
J
Johannes Berg 已提交
2086
	default:
2087 2088
		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
		memcpy(cmd.key, keyconf->key, keyconf->keylen);
J
Johannes Berg 已提交
2089 2090
	}

2091
	if (mcast)
J
Johannes Berg 已提交
2092 2093
		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);

2094
	cmd.key_offset = key_offset;
2095
	cmd.key_flags = key_flags;
J
Johannes Berg 已提交
2096 2097 2098
	cmd.sta_id = sta_id;

	status = ADD_STA_SUCCESS;
E
Emmanuel Grumbach 已提交
2099
	if (cmd_flags & CMD_ASYNC)
2100 2101
		ret =  iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
					    sizeof(cmd), &cmd);
E
Emmanuel Grumbach 已提交
2102 2103 2104
	else
		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
						  &cmd, &status);
J
Johannes Berg 已提交
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154

	switch (status) {
	case ADD_STA_SUCCESS:
		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
		break;
	}

	return ret;
}

static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
				 struct ieee80211_key_conf *keyconf,
				 u8 sta_id, bool remove_key)
{
	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};

	/* verify the key details match the required command's expectations */
	if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
		    (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
		    (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
		return -EINVAL;

	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
	igtk_cmd.sta_id = cpu_to_le32(sta_id);

	if (remove_key) {
		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
	} else {
		struct ieee80211_key_seq seq;
		const u8 *pn;

		memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
		pn = seq.aes_cmac.pn;
		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
						       ((u64) pn[4] << 8) |
						       ((u64) pn[3] << 16) |
						       ((u64) pn[2] << 24) |
						       ((u64) pn[1] << 32) |
						       ((u64) pn[0] << 40));
	}

	IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
		       remove_key ? "removing" : "installing",
		       igtk_cmd.sta_id);

E
Emmanuel Grumbach 已提交
2155
	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
J
Johannes Berg 已提交
2156 2157 2158 2159 2160 2161 2162 2163
				    sizeof(igtk_cmd), &igtk_cmd);
}


static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
				       struct ieee80211_vif *vif,
				       struct ieee80211_sta *sta)
{
2164
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
J
Johannes Berg 已提交
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180

	if (sta)
		return sta->addr;

	if (vif->type == NL80211_IFTYPE_STATION &&
	    mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
		u8 sta_id = mvmvif->ap_sta_id;
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						lockdep_is_held(&mvm->mutex));
		return sta->addr;
	}


	return NULL;
}

2181 2182 2183
static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
				 struct ieee80211_vif *vif,
				 struct ieee80211_sta *sta,
2184
				 struct ieee80211_key_conf *keyconf,
2185
				 u8 key_offset,
2186
				 bool mcast)
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
{
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
	int ret;
	const u8 *addr;
	struct ieee80211_key_seq seq;
	u16 p1k[5];

	switch (keyconf->cipher) {
	case WLAN_CIPHER_SUITE_TKIP:
		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
		/* get phase 1 key from mac80211 */
		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
2200
		ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2201
					   seq.tkip.iv32, p1k, 0, key_offset);
2202 2203
		break;
	case WLAN_CIPHER_SUITE_CCMP:
2204 2205 2206
	case WLAN_CIPHER_SUITE_WEP40:
	case WLAN_CIPHER_SUITE_WEP104:
		ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2207
					   0, NULL, 0, key_offset);
2208 2209
		break;
	default:
2210
		ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2211
					   0, NULL, 0, key_offset);
2212 2213 2214 2215 2216 2217
	}

	return ret;
}

static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2218 2219
				    struct ieee80211_key_conf *keyconf,
				    bool mcast)
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
{
	struct iwl_mvm_add_sta_key_cmd cmd = {};
	__le16 key_flags;
	int ret;
	u32 status;

	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
				 STA_KEY_FLG_KEYID_MSK);
	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);

2231
	if (mcast)
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);

	cmd.key_flags = key_flags;
	cmd.key_offset = keyconf->hw_key_idx;
	cmd.sta_id = sta_id;

	status = ADD_STA_SUCCESS;
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
					  &cmd, &status);

	switch (status) {
	case ADD_STA_SUCCESS:
		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
		break;
	}

	return ret;
}

J
Johannes Berg 已提交
2255 2256 2257 2258
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
			struct ieee80211_vif *vif,
			struct ieee80211_sta *sta,
			struct ieee80211_key_conf *keyconf,
2259
			u8 key_offset)
J
Johannes Berg 已提交
2260
{
2261
	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2262
	struct iwl_mvm_sta *mvm_sta;
2263
	u8 sta_id;
J
Johannes Berg 已提交
2264
	int ret;
2265
	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
J
Johannes Berg 已提交
2266 2267 2268 2269

	lockdep_assert_held(&mvm->mutex);

	/* Get the station id from the mvm local station table */
2270 2271 2272
	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
	if (!mvm_sta) {
		IWL_ERR(mvm, "Failed to find station\n");
J
Johannes Berg 已提交
2273 2274
		return -EINVAL;
	}
2275
	sta_id = mvm_sta->sta_id;
J
Johannes Berg 已提交
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294

	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
		goto end;
	}

	/*
	 * It is possible that the 'sta' parameter is NULL, and thus
	 * there is a need to retrieve  the sta from the local station table.
	 */
	if (!sta) {
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						lockdep_is_held(&mvm->mutex));
		if (IS_ERR_OR_NULL(sta)) {
			IWL_ERR(mvm, "Invalid station id\n");
			return -EINVAL;
		}
	}

2295
	if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
J
Johannes Berg 已提交
2296 2297
		return -EINVAL;

2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
	/* If the key_offset is not pre-assigned, we need to find a
	 * new offset to use.  In normal cases, the offset is not
	 * pre-assigned, but during HW_RESTART we want to reuse the
	 * same indices, so we pass them when this function is called.
	 *
	 * In D3 entry, we need to hardcoded the indices (because the
	 * firmware hardcodes the PTK offset to 0).  In this case, we
	 * need to make sure we don't overwrite the hw_key_idx in the
	 * keyconf structure, because otherwise we cannot configure
	 * the original ones back when resuming.
	 */
	if (key_offset == STA_KEY_IDX_INVALID) {
		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
		if (key_offset == STA_KEY_IDX_INVALID)
J
Johannes Berg 已提交
2312
			return -ENOSPC;
2313
		keyconf->hw_key_idx = key_offset;
J
Johannes Berg 已提交
2314 2315
	}

2316
	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
2317
	if (ret)
2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
		goto end;

	/*
	 * For WEP, the same key is used for multicast and unicast. Upload it
	 * again, using the same key offset, and now pointing the other one
	 * to the same key slot (offset).
	 * If this fails, remove the original as well.
	 */
	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
2328 2329
		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
					    key_offset, !mcast);
2330 2331
		if (ret) {
			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2332
			goto end;
2333 2334
		}
	}
J
Johannes Berg 已提交
2335

2336 2337
	__set_bit(key_offset, mvm->fw_key_table);

J
Johannes Berg 已提交
2338 2339 2340
end:
	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
2341
		      sta ? sta->addr : zero_addr, ret);
J
Johannes Berg 已提交
2342 2343 2344 2345 2346 2347 2348 2349
	return ret;
}

int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
			   struct ieee80211_vif *vif,
			   struct ieee80211_sta *sta,
			   struct ieee80211_key_conf *keyconf)
{
2350
	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2351 2352
	struct iwl_mvm_sta *mvm_sta;
	u8 sta_id = IWL_MVM_STATION_COUNT;
2353
	int ret, i;
J
Johannes Berg 已提交
2354 2355 2356

	lockdep_assert_held(&mvm->mutex);

2357 2358
	/* Get the station from the mvm local station table */
	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
J
Johannes Berg 已提交
2359 2360 2361 2362 2363 2364 2365

	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
		      keyconf->keyidx, sta_id);

	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);

2366
	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
J
Johannes Berg 已提交
2367 2368 2369 2370 2371
		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
			keyconf->hw_key_idx);
		return -ENOENT;
	}

2372 2373 2374 2375 2376 2377 2378
	/* track which key was deleted last */
	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
		if (mvm->fw_key_deleted[i] < U8_MAX)
			mvm->fw_key_deleted[i]++;
	}
	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;

2379
	if (!mvm_sta) {
J
Johannes Berg 已提交
2380 2381 2382 2383
		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
		return 0;
	}

2384 2385
	sta_id = mvm_sta->sta_id;

2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
	if (ret)
		return ret;

	/* delete WEP key twice to get rid of (now useless) offset */
	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);

	return ret;
J
Johannes Berg 已提交
2396 2397 2398 2399 2400 2401 2402 2403
}

void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
			     struct ieee80211_vif *vif,
			     struct ieee80211_key_conf *keyconf,
			     struct ieee80211_sta *sta, u32 iv32,
			     u16 *phase1key)
{
B
Beni Lev 已提交
2404
	struct iwl_mvm_sta *mvm_sta;
2405
	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
J
Johannes Berg 已提交
2406

B
Beni Lev 已提交
2407 2408
	rcu_read_lock();

2409 2410
	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
	if (WARN_ON_ONCE(!mvm_sta))
2411
		goto unlock;
2412
	iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2413
			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
2414 2415

 unlock:
B
Beni Lev 已提交
2416
	rcu_read_unlock();
J
Johannes Berg 已提交
2417 2418
}

2419 2420
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
				struct ieee80211_sta *sta)
J
Johannes Berg 已提交
2421
{
2422
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2423
	struct iwl_mvm_add_sta_cmd cmd = {
J
Johannes Berg 已提交
2424
		.add_modify = STA_MODE_MODIFY,
2425
		.sta_id = mvmsta->sta_id,
2426
		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
2427
		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
J
Johannes Berg 已提交
2428 2429 2430
	};
	int ret;

2431 2432
	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
J
Johannes Berg 已提交
2433 2434 2435 2436
	if (ret)
		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}

2437 2438
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
				       struct ieee80211_sta *sta,
J
Johannes Berg 已提交
2439
				       enum ieee80211_frame_release_type reason,
2440 2441
				       u16 cnt, u16 tids, bool more_data,
				       bool agg)
J
Johannes Berg 已提交
2442
{
2443
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2444
	struct iwl_mvm_add_sta_cmd cmd = {
J
Johannes Berg 已提交
2445
		.add_modify = STA_MODE_MODIFY,
2446
		.sta_id = mvmsta->sta_id,
J
Johannes Berg 已提交
2447 2448
		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
		.sleep_tx_count = cpu_to_le16(cnt),
2449
		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
J
Johannes Berg 已提交
2450
	};
2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
	int tid, ret;
	unsigned long _tids = tids;

	/* convert TIDs to ACs - we don't support TSPEC so that's OK
	 * Note that this field is reserved and unused by firmware not
	 * supporting GO uAPSD, so it's safe to always do this.
	 */
	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);

	/* If we're releasing frames from aggregation queues then check if the
	 * all queues combined that we're releasing frames from have
	 *  - more frames than the service period, in which case more_data
	 *    needs to be set
	 *  - fewer than 'cnt' frames, in which case we need to adjust the
	 *    firmware command (but do that unconditionally)
	 */
	if (agg) {
		int remaining = cnt;
2470
		int sleep_tx_count;
2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494

		spin_lock_bh(&mvmsta->lock);
		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
			struct iwl_mvm_tid_data *tid_data;
			u16 n_queued;

			tid_data = &mvmsta->tid_data[tid];
			if (WARN(tid_data->state != IWL_AGG_ON &&
				 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
				 "TID %d state is %d\n",
				 tid, tid_data->state)) {
				spin_unlock_bh(&mvmsta->lock);
				ieee80211_sta_eosp(sta);
				return;
			}

			n_queued = iwl_mvm_tid_queued(tid_data);
			if (n_queued > remaining) {
				more_data = true;
				remaining = 0;
				break;
			}
			remaining -= n_queued;
		}
2495 2496 2497
		sleep_tx_count = cnt - remaining;
		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
			mvmsta->sleep_tx_count = sleep_tx_count;
2498 2499
		spin_unlock_bh(&mvmsta->lock);

2500
		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516
		if (WARN_ON(cnt - remaining == 0)) {
			ieee80211_sta_eosp(sta);
			return;
		}
	}

	/* Note: this is ignored by firmware not supporting GO uAPSD */
	if (more_data)
		cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);

	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
		mvmsta->next_status_eosp = true;
		cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
	} else {
		cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
	}
J
Johannes Berg 已提交
2517

2518 2519 2520 2521 2522
	/* block the Tx queues until the FW updated the sleep Tx count */
	iwl_trans_block_txq_ptrs(mvm->trans, true);

	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
2523
				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
J
Johannes Berg 已提交
2524 2525 2526
	if (ret)
		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
2527

2528 2529
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
			   struct iwl_rx_cmd_buffer *rxb)
2530 2531 2532 2533 2534 2535 2536
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
	struct ieee80211_sta *sta;
	u32 sta_id = le32_to_cpu(notif->sta_id);

	if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
2537
		return;
2538 2539 2540 2541 2542 2543 2544

	rcu_read_lock();
	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
	if (!IS_ERR_OR_NULL(sta))
		ieee80211_sta_eosp(sta);
	rcu_read_unlock();
}
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557

void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
				   struct iwl_mvm_sta *mvmsta, bool disable)
{
	struct iwl_mvm_add_sta_cmd cmd = {
		.add_modify = STA_MODE_MODIFY,
		.sta_id = mvmsta->sta_id,
		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
	};
	int ret;

2558 2559
	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
2560 2561 2562
	if (ret)
		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579

void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
				      struct ieee80211_sta *sta,
				      bool disable)
{
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);

	spin_lock_bh(&mvm_sta->lock);

	if (mvm_sta->disable_tx == disable) {
		spin_unlock_bh(&mvm_sta->lock);
		return;
	}

	mvm_sta->disable_tx = disable;

	/*
S
Sara Sharon 已提交
2580 2581
	 * Tell mac80211 to start/stop queuing tx for this station,
	 * but don't stop queuing if there are still pending frames
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
	 * for this station.
	 */
	if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
		ieee80211_sta_block_awake(mvm->hw, sta, disable);

	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);

	spin_unlock_bh(&mvm_sta->lock);
}

void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
				       struct iwl_mvm_vif *mvmvif,
				       bool disable)
{
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvm_sta;
	int i;

	lockdep_assert_held(&mvm->mutex);

	/* Block/unblock all the stations of the given mvmvif */
	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
						lockdep_is_held(&mvm->mutex));
		if (IS_ERR_OR_NULL(sta))
			continue;

		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
		if (mvm_sta->mac_id_n_color !=
		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
			continue;

		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
	}
}
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631

void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	struct iwl_mvm_sta *mvmsta;

	rcu_read_lock();

	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);

	if (!WARN_ON(!mvmsta))
		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);

	rcu_read_unlock();
}