sta.c 81.1 KB
Newer Older
J
Johannes Berg 已提交
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8 9
 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 Intel Deutschland GmbH
J
Johannes Berg 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
27
 * in the file called COPYING.
J
Johannes Berg 已提交
28 29
 *
 * Contact Information:
30
 *  Intel Linux Wireless <linuxwifi@intel.com>
J
Johannes Berg 已提交
31 32 33 34
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
35 36
 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37
 * Copyright(c) 2016 Intel Deutschland GmbH
J
Johannes Berg 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
#include <net/mac80211.h>

#include "mvm.h"
#include "sta.h"
71
#include "rs.h"
J
Johannes Berg 已提交
72

73 74 75 76 77 78 79 80 81 82 83 84
/*
 * New version of ADD_STA_sta command added new fields at the end of the
 * structure, so sending the size of the relevant API's structure is enough to
 * support both API versions.
 */
static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
{
	return iwl_mvm_has_new_rx_api(mvm) ?
		sizeof(struct iwl_mvm_add_sta_cmd) :
		sizeof(struct iwl_mvm_add_sta_cmd_v7);
}

85 86
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
				    enum nl80211_iftype iftype)
J
Johannes Berg 已提交
87 88
{
	int sta_id;
89
	u32 reserved_ids = 0;
J
Johannes Berg 已提交
90

91
	BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
J
Johannes Berg 已提交
92 93 94 95
	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));

	lockdep_assert_held(&mvm->mutex);

96 97 98 99
	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
	if (iftype != NL80211_IFTYPE_STATION)
		reserved_ids = BIT(0);

J
Johannes Berg 已提交
100
	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101 102 103 104
	for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
		if (BIT(sta_id) & reserved_ids)
			continue;

J
Johannes Berg 已提交
105 106 107
		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
					       lockdep_is_held(&mvm->mutex)))
			return sta_id;
108
	}
J
Johannes Berg 已提交
109 110 111
	return IWL_MVM_STATION_COUNT;
}

112 113
/* send station add/update command to firmware */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
114
			   bool update, unsigned int flags)
J
Johannes Berg 已提交
115
{
116
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
117 118 119 120 121 122
	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
		.sta_id = mvm_sta->sta_id,
		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
		.add_modify = update ? 1 : 0,
		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
						 STA_FLG_MIMO_EN_MSK),
123
		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
124
	};
J
Johannes Berg 已提交
125 126 127 128
	int ret;
	u32 status;
	u32 agg_size = 0, mpdu_dens = 0;

129
	if (!update || (flags & STA_MODIFY_QUEUES)) {
130 131
		add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
132 133 134

		if (flags & STA_MODIFY_QUEUES)
			add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
135
	}
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

	switch (sta->bandwidth) {
	case IEEE80211_STA_RX_BW_160:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
		/* fall through */
	case IEEE80211_STA_RX_BW_80:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
		/* fall through */
	case IEEE80211_STA_RX_BW_40:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
		/* fall through */
	case IEEE80211_STA_RX_BW_20:
		if (sta->ht_cap.ht_supported)
			add_sta_cmd.station_flags |=
				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
		break;
	}

	switch (sta->rx_nss) {
	case 1:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
		break;
	case 2:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
		break;
	case 3 ... 8:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
		break;
	}

	switch (sta->smps_mode) {
	case IEEE80211_SMPS_AUTOMATIC:
	case IEEE80211_SMPS_NUM_MODES:
		WARN_ON(1);
		break;
	case IEEE80211_SMPS_STATIC:
		/* override NSS */
		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
		break;
	case IEEE80211_SMPS_DYNAMIC:
		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
		break;
	case IEEE80211_SMPS_OFF:
		/* nothing */
		break;
	}
J
Johannes Berg 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206

	if (sta->ht_cap.ht_supported) {
		add_sta_cmd.station_flags_msk |=
			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
				    STA_FLG_AGG_MPDU_DENS_MSK);

		mpdu_dens = sta->ht_cap.ampdu_density;
	}

	if (sta->vht_cap.vht_supported) {
		agg_size = sta->vht_cap.cap &
			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
		agg_size >>=
			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
	} else if (sta->ht_cap.ht_supported) {
		agg_size = sta->ht_cap.ampdu_factor;
	}

	add_sta_cmd.station_flags |=
		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
	add_sta_cmd.station_flags |=
		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);

	status = ADD_STA_SUCCESS;
207 208
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
209
					  &add_sta_cmd, &status);
J
Johannes Berg 已提交
210 211 212
	if (ret)
		return ret;

213
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
214 215 216 217 218 219 220 221 222 223 224 225
	case ADD_STA_SUCCESS:
		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "ADD_STA failed\n");
		break;
	}

	return ret;
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
static void iwl_mvm_rx_agg_session_expired(unsigned long data)
{
	struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
	struct iwl_mvm_baid_data *ba_data;
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvm_sta;
	unsigned long timeout;

	rcu_read_lock();

	ba_data = rcu_dereference(*rcu_ptr);

	if (WARN_ON(!ba_data))
		goto unlock;

	if (!ba_data->timeout)
		goto unlock;

	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
	if (time_is_after_jiffies(timeout)) {
		mod_timer(&ba_data->session_timer, timeout);
		goto unlock;
	}

	/* Timer expired */
	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
	ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
					  sta->addr, ba_data->tid);
unlock:
	rcu_read_unlock();
}

259 260 261 262 263
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
				 struct ieee80211_sta *sta)
{
	unsigned long used_hw_queues;
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
264 265
	unsigned int wdg_timeout =
		iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	u32 ac;

	lockdep_assert_held(&mvm->mutex);

	used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);

	/* Find available queues, and allocate them to the ACs */
	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
		u8 queue = find_first_zero_bit(&used_hw_queues,
					       mvm->first_agg_queue);

		if (queue >= mvm->first_agg_queue) {
			IWL_ERR(mvm, "Failed to allocate STA queue\n");
			return -EBUSY;
		}

		__set_bit(queue, &used_hw_queues);
		mvmsta->hw_queue[ac] = queue;
	}

	/* Found a place for all queues - enable them */
	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
		iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
289
				      mvmsta->hw_queue[ac],
290 291
				      iwl_mvm_ac_to_tx_fifo[ac], 0,
				      wdg_timeout);
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
		mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
	}

	return 0;
}

static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
				    struct ieee80211_sta *sta)
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	unsigned long sta_msk;
	int i;

	lockdep_assert_held(&mvm->mutex);

	/* disable the TDLS STA-specific queues */
	sta_msk = mvmsta->tfd_queue_msk;
309
	for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
310
		iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
311 312
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
/* Disable aggregations for a bitmap of TIDs for a given station */
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
					unsigned long disable_agg_tids,
					bool remove_queue)
{
	struct iwl_mvm_add_sta_cmd cmd = {};
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvmsta;
	u32 status;
	u8 sta_id;
	int ret;

	spin_lock_bh(&mvm->queue_info_lock);
	sta_id = mvm->queue_info[queue].ra_sta_id;
	spin_unlock_bh(&mvm->queue_info_lock);

	rcu_read_lock();

	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);

	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
		rcu_read_unlock();
		return -EINVAL;
	}

	mvmsta = iwl_mvm_sta_from_mac80211(sta);

	mvmsta->tid_disable_agg |= disable_agg_tids;

	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
	cmd.sta_id = mvmsta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
	cmd.modify_mask = STA_MODIFY_QUEUES;
	if (disable_agg_tids)
		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
	if (remove_queue)
		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);

	rcu_read_unlock();

	/* Notify FW of queue removal from the STA queues */
	status = ADD_STA_SUCCESS;
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
					  &cmd, &status);

	return ret;
}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
{
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvmsta;
	unsigned long tid_bitmap;
	unsigned long agg_tids = 0;
	s8 sta_id;
	int tid;

	lockdep_assert_held(&mvm->mutex);

	spin_lock_bh(&mvm->queue_info_lock);
	sta_id = mvm->queue_info[queue].ra_sta_id;
	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
	spin_unlock_bh(&mvm->queue_info_lock);

	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
					lockdep_is_held(&mvm->mutex));

	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
		return -EINVAL;

	mvmsta = iwl_mvm_sta_from_mac80211(sta);

	spin_lock_bh(&mvmsta->lock);
	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
			agg_tids |= BIT(tid);
	}
	spin_unlock_bh(&mvmsta->lock);

	return agg_tids;
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
/*
 * Remove a queue from a station's resources.
 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
 * doesn't disable the queue
 */
static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
{
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvmsta;
	unsigned long tid_bitmap;
	unsigned long disable_agg_tids = 0;
	u8 sta_id;
	int tid;

	lockdep_assert_held(&mvm->mutex);

	spin_lock_bh(&mvm->queue_info_lock);
	sta_id = mvm->queue_info[queue].ra_sta_id;
	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
	spin_unlock_bh(&mvm->queue_info_lock);

	rcu_read_lock();

	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);

	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
		rcu_read_unlock();
		return 0;
	}

	mvmsta = iwl_mvm_sta_from_mac80211(sta);

	spin_lock_bh(&mvmsta->lock);
431
	/* Unmap MAC queues and TIDs from this queue */
432 433 434
	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
			disable_agg_tids |= BIT(tid);
435
		mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
436 437
	}

438
	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
439 440 441 442
	spin_unlock_bh(&mvmsta->lock);

	rcu_read_unlock();

443
	spin_lock_bh(&mvm->queue_info_lock);
444 445 446 447
	/* Unmap MAC queues and TIDs from this queue */
	mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
	mvm->queue_info[queue].hw_queue_refcount = 0;
	mvm->queue_info[queue].tid_bitmap = 0;
448
	spin_unlock_bh(&mvm->queue_info_lock);
449 450 451 452

	return disable_agg_tids;
}

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
				    unsigned long tfd_queue_mask, u8 ac)
{
	int queue = 0;
	u8 ac_to_queue[IEEE80211_NUM_ACS];
	int i;

	lockdep_assert_held(&mvm->queue_info_lock);

	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));

	/* See what ACs the existing queues for this STA have */
	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
		/* Only DATA queues can be shared */
		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
			continue;

		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
	}

	/*
	 * The queue to share is chosen only from DATA queues as follows (in
	 * descending priority):
	 * 1. An AC_BE queue
	 * 2. Same AC queue
	 * 3. Highest AC queue that is lower than new AC
	 * 4. Any existing AC (there always is at least 1 DATA queue)
	 */

	/* Priority 1: An AC_BE queue */
	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
		queue = ac_to_queue[IEEE80211_AC_BE];
	/* Priority 2: Same AC queue */
	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
		queue = ac_to_queue[ac];
	/* Priority 3a: If new AC is VO and VI exists - use VI */
	else if (ac == IEEE80211_AC_VO &&
		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
		queue = ac_to_queue[IEEE80211_AC_VI];
	/* Priority 3b: No BE so only AC less than the new one is BK */
	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
		queue = ac_to_queue[IEEE80211_AC_BK];
	/* Priority 4a: No BE nor BK - use VI if exists */
	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
		queue = ac_to_queue[IEEE80211_AC_VI];
	/* Priority 4b: No BE, BK nor VI - use VO if exists */
	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
		queue = ac_to_queue[IEEE80211_AC_VO];

	/* Make sure queue found (or not) is legal */
	if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
	       queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
	      (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
	       queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
	      (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
		IWL_ERR(mvm, "No DATA queues available to share\n");
		queue = -ENOSPC;
	}

	return queue;
}

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
/*
 * If a given queue has a higher AC than the TID stream that is being added to
 * it, the queue needs to be redirected to the lower AC. This function does that
 * in such a case, otherwise - if no redirection required - it does nothing,
 * unless the %force param is true.
 */
static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
				      int ac, int ssn, unsigned int wdg_timeout,
				      bool force)
{
	struct iwl_scd_txq_cfg_cmd cmd = {
		.scd_queue = queue,
		.enable = 0,
	};
	bool shared_queue;
	unsigned long mq;
	int ret;

	/*
	 * If the AC is lower than current one - FIFO needs to be redirected to
	 * the lowest one of the streams in the queue. Check if this is needed
	 * here.
	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
	 * we need to check if the numerical value of X is LARGER than of Y.
	 */
	spin_lock_bh(&mvm->queue_info_lock);
	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
		spin_unlock_bh(&mvm->queue_info_lock);

		IWL_DEBUG_TX_QUEUES(mvm,
				    "No redirection needed on TXQ #%d\n",
				    queue);
		return 0;
	}

	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
	mq = mvm->queue_info[queue].hw_queue_to_mac80211;
	shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
	spin_unlock_bh(&mvm->queue_info_lock);

	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
			    queue, iwl_mvm_ac_to_tx_fifo[ac]);

	/* Stop MAC queues and wait for this queue to empty */
	iwl_mvm_stop_mac_queues(mvm, mq);
	ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
	if (ret) {
		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
			queue);
		ret = -EIO;
		goto out;
	}

	/* Before redirecting the queue we need to de-activate it */
	iwl_trans_txq_disable(mvm->trans, queue, false);
	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
	if (ret)
		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
			ret);

	/* Make sure the SCD wrptr is correctly set before reconfiguring */
	iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
			     cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
			     ssn, wdg_timeout);

	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */

	/* Redirect to lower AC */
	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
			     cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
			     ssn);

	/* Update AC marking of the queue */
	spin_lock_bh(&mvm->queue_info_lock);
	mvm->queue_info[queue].mac80211_ac = ac;
	spin_unlock_bh(&mvm->queue_info_lock);

	/*
	 * Mark queue as shared in transport if shared
	 * Note this has to be done after queue enablement because enablement
	 * can also set this value, and there is no indication there to shared
	 * queues
	 */
	if (shared_queue)
		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);

out:
	/* Continue using the MAC queues */
	iwl_mvm_start_mac_queues(mvm, mq);

	return ret;
}

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
				   struct ieee80211_sta *sta, u8 ac, int tid,
				   struct ieee80211_hdr *hdr)
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = iwl_mvm_ac_to_tx_fifo[ac],
		.sta_id = mvmsta->sta_id,
		.tid = tid,
		.frame_limit = IWL_FRAME_LIMIT,
	};
	unsigned int wdg_timeout =
		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
	u8 mac_queue = mvmsta->vif->hw_queue[ac];
	int queue = -1;
626 627 628
	bool using_inactive_queue = false;
	unsigned long disable_agg_tids = 0;
	enum iwl_mvm_agg_state queue_state;
629
	bool shared_queue = false;
630
	int ssn;
631
	unsigned long tfd_queue_mask;
632
	int ret;
633 634 635

	lockdep_assert_held(&mvm->mutex);

636 637 638 639
	spin_lock_bh(&mvmsta->lock);
	tfd_queue_mask = mvmsta->tfd_queue_msk;
	spin_unlock_bh(&mvmsta->lock);

640
	spin_lock_bh(&mvm->queue_info_lock);
641 642 643 644 645 646 647

	/*
	 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
	 * exists
	 */
	if (!ieee80211_is_data_qos(hdr->frame_control) ||
	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
648 649
		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						IWL_MVM_DQA_MIN_MGMT_QUEUE,
650 651 652 653 654 655 656 657
						IWL_MVM_DQA_MAX_MGMT_QUEUE);
		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
					    queue);

		/* If no such queue is found, we'll use a DATA queue instead */
	}

658 659 660 661 662
	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
	    (mvm->queue_info[mvmsta->reserved_queue].status ==
	     IWL_MVM_QUEUE_RESERVED ||
	     mvm->queue_info[mvmsta->reserved_queue].status ==
	     IWL_MVM_QUEUE_INACTIVE)) {
663
		queue = mvmsta->reserved_queue;
664
		mvm->queue_info[queue].reserved = true;
665 666 667 668
		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
	}

	if (queue < 0)
669 670
		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						IWL_MVM_DQA_MIN_DATA_QUEUE,
671
						IWL_MVM_DQA_MAX_DATA_QUEUE);
672

673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
	/*
	 * Check if this queue is already allocated but inactive.
	 * In such a case, we'll need to first free this queue before enabling
	 * it again, so we'll mark it as reserved to make sure no new traffic
	 * arrives on it
	 */
	if (queue > 0 &&
	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
		mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
		using_inactive_queue = true;
		IWL_DEBUG_TX_QUEUES(mvm,
				    "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
				    queue, mvmsta->sta_id, tid);
	}

688 689 690 691 692 693 694 695 696
	/* No free queue - we'll have to share */
	if (queue <= 0) {
		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
		if (queue > 0) {
			shared_queue = true;
			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
		}
	}

697 698 699 700 701 702
	/*
	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
	 * to make sure no one else takes it.
	 * This will allow avoiding re-acquiring the lock at the end of the
	 * configuration. On error we'll mark it back as free.
	 */
703
	if ((queue > 0) && !shared_queue)
704
		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
705

706
	spin_unlock_bh(&mvm->queue_info_lock);
707

708 709 710 711
	/* This shouldn't happen - out of queues */
	if (WARN_ON(queue <= 0)) {
		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
			tid, cfg.sta_id);
712
		return -ENOSPC;
713
	}
714 715 716 717 718 719 720

	/*
	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
	 * as aggregatable.
	 * Mark all DATA queues as allowing to be aggregated at some point
	 */
721 722
	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
723

724 725 726 727 728 729 730 731 732
	/*
	 * If this queue was previously inactive (idle) - we need to free it
	 * first
	 */
	if (using_inactive_queue) {
		struct iwl_scd_txq_cfg_cmd cmd = {
			.scd_queue = queue,
			.enable = 0,
		};
733
		u8 ac;
734 735 736

		disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);

737 738 739 740 741 742
		spin_lock_bh(&mvm->queue_info_lock);
		ac = mvm->queue_info[queue].mac80211_ac;
		cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
		cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
		spin_unlock_bh(&mvm->queue_info_lock);

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
		/* Disable the queue */
		iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
					     true);
		iwl_trans_txq_disable(mvm->trans, queue, false);
		ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
					   &cmd);
		if (ret) {
			IWL_ERR(mvm,
				"Failed to free inactive queue %d (ret=%d)\n",
				queue, ret);

			/* Re-mark the inactive queue as inactive */
			spin_lock_bh(&mvm->queue_info_lock);
			mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
			spin_unlock_bh(&mvm->queue_info_lock);

			return ret;
		}
	}

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	IWL_DEBUG_TX_QUEUES(mvm,
			    "Allocating %squeue #%d to sta %d on tid %d\n",
			    shared_queue ? "shared " : "", queue,
			    mvmsta->sta_id, tid);

	if (shared_queue) {
		/* Disable any open aggs on this queue */
		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);

		if (disable_agg_tids) {
			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
					    queue);
			iwl_mvm_invalidate_sta_queue(mvm, queue,
						     disable_agg_tids, false);
		}
	}
779 780 781 782 783

	ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
	iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
			   wdg_timeout);

784 785 786 787 788 789 790 791 792
	/*
	 * Mark queue as shared in transport if shared
	 * Note this has to be done after queue enablement because enablement
	 * can also set this value, and there is no indication there to shared
	 * queues
	 */
	if (shared_queue)
		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);

793 794
	spin_lock_bh(&mvmsta->lock);
	mvmsta->tid_data[tid].txq_id = queue;
795
	mvmsta->tid_data[tid].is_tid_active = true;
796
	mvmsta->tfd_queue_msk |= BIT(queue);
797
	queue_state = mvmsta->tid_data[tid].state;
798 799 800 801 802

	if (mvmsta->reserved_queue == queue)
		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
	spin_unlock_bh(&mvmsta->lock);

803 804 805 806
	if (!shared_queue) {
		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
		if (ret)
			goto out_err;
807

808 809 810 811 812 813
		/* If we need to re-enable aggregations... */
		if (queue_state == IWL_AGG_ON) {
			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
			if (ret)
				goto out_err;
		}
814 815 816 817 818 819
	} else {
		/* Redirect queue, if needed */
		ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
						 wdg_timeout, false);
		if (ret)
			goto out_err;
820
	}
821

822
	return 0;
823 824 825 826 827

out_err:
	iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);

	return ret;
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
}

static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
	if (tid == IWL_MAX_TID_COUNT)
		return IEEE80211_AC_VO; /* MGMT */

	return tid_to_mac80211_ac[tid];
}

static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
				       struct ieee80211_sta *sta, int tid)
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	struct sk_buff_head deferred_tx;
	u8 mac_queue;
	bool no_queue = false; /* Marks if there is a problem with the queue */
	u8 ac;

	lockdep_assert_held(&mvm->mutex);

	skb = skb_peek(&tid_data->deferred_tx_frames);
	if (!skb)
		return;
	hdr = (void *)skb->data;

	ac = iwl_mvm_tid_to_ac_queue(tid);
	mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;

	if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
	    iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
		IWL_ERR(mvm,
			"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
			mvmsta->sta_id, tid);

		/*
		 * Mark queue as problematic so later the deferred traffic is
		 * freed, as we can do nothing with it
		 */
		no_queue = true;
	}

	__skb_queue_head_init(&deferred_tx);

875 876
	/* Disable bottom-halves when entering TX path */
	local_bh_disable();
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
	spin_lock(&mvmsta->lock);
	skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
	spin_unlock(&mvmsta->lock);

	while ((skb = __skb_dequeue(&deferred_tx)))
		if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
			ieee80211_free_txskb(mvm->hw, skb);
	local_bh_enable();

	/* Wake queue */
	iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
}

void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
					   add_stream_wk);
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvmsta;
	unsigned long deferred_tid_traffic;
	int sta_id, tid;

899 900 901
	/* Check inactivity of queues */
	iwl_mvm_inactivity_check(mvm);

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
	mutex_lock(&mvm->mutex);

	/* Go over all stations with deferred traffic */
	for_each_set_bit(sta_id, mvm->sta_deferred_frames,
			 IWL_MVM_STATION_COUNT) {
		clear_bit(sta_id, mvm->sta_deferred_frames);
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						lockdep_is_held(&mvm->mutex));
		if (IS_ERR_OR_NULL(sta))
			continue;

		mvmsta = iwl_mvm_sta_from_mac80211(sta);
		deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;

		for_each_set_bit(tid, &deferred_tid_traffic,
				 IWL_MAX_TID_COUNT + 1)
			iwl_mvm_tx_deferred_stream(mvm, sta, tid);
	}

	mutex_unlock(&mvm->mutex);
}

static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
925 926
				      struct ieee80211_sta *sta,
				      enum nl80211_iftype vif_type)
927 928 929 930
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	int queue;

931 932 933 934 935 936
	/*
	 * Check for inactive queues, so we don't reach a situation where we
	 * can't add a STA due to a shortage in queues that doesn't really exist
	 */
	iwl_mvm_inactivity_check(mvm);

937 938 939
	spin_lock_bh(&mvm->queue_info_lock);

	/* Make sure we have free resources for this STA */
940 941
	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
942 943
	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
	     IWL_MVM_QUEUE_FREE))
944 945
		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
	else
946 947
		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						IWL_MVM_DQA_MIN_DATA_QUEUE,
948
						IWL_MVM_DQA_MAX_DATA_QUEUE);
949 950 951 952 953
	if (queue < 0) {
		spin_unlock_bh(&mvm->queue_info_lock);
		IWL_ERR(mvm, "No available queues for new station\n");
		return -ENOSPC;
	}
954
	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
955 956 957 958 959 960 961 962 963 964 965

	spin_unlock_bh(&mvm->queue_info_lock);

	mvmsta->reserved_queue = queue;

	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
			    queue, mvmsta->sta_id);

	return 0;
}

J
Johannes Berg 已提交
966 967 968 969 970
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
		    struct ieee80211_vif *vif,
		    struct ieee80211_sta *sta)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
971
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
972
	struct iwl_mvm_rxq_dup_data *dup_data;
J
Johannes Berg 已提交
973 974 975 976 977
	int i, ret, sta_id;

	lockdep_assert_held(&mvm->mutex);

	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
978 979
		sta_id = iwl_mvm_find_free_sta_id(mvm,
						  ieee80211_vif_type_p2p(vif));
J
Johannes Berg 已提交
980 981 982
	else
		sta_id = mvm_sta->sta_id;

983
	if (sta_id == IWL_MVM_STATION_COUNT)
J
Johannes Berg 已提交
984 985 986 987 988 989 990 991 992
		return -ENOSPC;

	spin_lock_init(&mvm_sta->lock);

	mvm_sta->sta_id = sta_id;
	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
						      mvmvif->color);
	mvm_sta->vif = vif;
	mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
993 994
	mvm_sta->tx_protection = 0;
	mvm_sta->tt_tx_protection = false;
J
Johannes Berg 已提交
995 996

	/* HW restart, don't assume the memory has been zeroed */
997
	atomic_set(&mvm->pending_frames[sta_id], 0);
998
	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
J
Johannes Berg 已提交
999
	mvm_sta->tfd_queue_msk = 0;
1000

1001 1002 1003 1004 1005
	/*
	 * Allocate new queues for a TDLS station, unless we're in DQA mode,
	 * and then they'll be allocated dynamically
	 */
	if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
1006 1007 1008
		ret = iwl_mvm_tdls_sta_init(mvm, sta);
		if (ret)
			return ret;
1009
	} else if (!iwl_mvm_is_dqa_supported(mvm)) {
1010 1011 1012 1013
		for (i = 0; i < IEEE80211_NUM_ACS; i++)
			if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
				mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
	}
J
Johannes Berg 已提交
1014

1015
	/* for HW restart - reset everything but the sequence number */
1016
	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1017 1018 1019
		u16 seq = mvm_sta->tid_data[i].seq_number;
		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
		mvm_sta->tid_data[i].seq_number = seq;
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

		if (!iwl_mvm_is_dqa_supported(mvm))
			continue;

		/*
		 * Mark all queues for this STA as unallocated and defer TX
		 * frames until the queue is allocated
		 */
		mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
		skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1030
	}
1031
	mvm_sta->deferred_traffic_tid_map = 0;
1032
	mvm_sta->agg_tids = 0;
J
Johannes Berg 已提交
1033

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	if (iwl_mvm_has_new_rx_api(mvm) &&
	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
		dup_data = kcalloc(mvm->trans->num_rx_queues,
				   sizeof(*dup_data),
				   GFP_KERNEL);
		if (!dup_data)
			return -ENOMEM;
		mvm_sta->dup_data = dup_data;
	}

1044
	if (iwl_mvm_is_dqa_supported(mvm)) {
1045 1046
		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
						 ieee80211_vif_type_p2p(vif));
1047 1048 1049 1050 1051
		if (ret)
			goto err;
	}

	ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
J
Johannes Berg 已提交
1052
	if (ret)
1053
		goto err;
J
Johannes Berg 已提交
1054

1055 1056 1057 1058 1059 1060 1061 1062
	if (vif->type == NL80211_IFTYPE_STATION) {
		if (!sta->tdls) {
			WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
			mvmvif->ap_sta_id = sta_id;
		} else {
			WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
		}
	}
J
Johannes Berg 已提交
1063 1064 1065 1066

	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);

	return 0;
1067 1068

err:
1069 1070
	if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
		iwl_mvm_tdls_sta_deinit(mvm, sta);
1071
	return ret;
J
Johannes Berg 已提交
1072 1073
}

1074 1075 1076 1077
int iwl_mvm_update_sta(struct iwl_mvm *mvm,
		       struct ieee80211_vif *vif,
		       struct ieee80211_sta *sta)
{
1078
	return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
1079 1080
}

J
Johannes Berg 已提交
1081 1082 1083
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
		      bool drain)
{
1084
	struct iwl_mvm_add_sta_cmd cmd = {};
J
Johannes Berg 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
	cmd.sta_id = mvmsta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);

	status = ADD_STA_SUCCESS;
1097 1098
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
1099
					  &cmd, &status);
J
Johannes Berg 已提交
1100 1101 1102
	if (ret)
		return ret;

1103
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
	case ADD_STA_SUCCESS:
		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
			       mvmsta->sta_id);
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
			mvmsta->sta_id);
		break;
	}

	return ret;
}

/*
 * Remove a station from the FW table. Before sending the command to remove
 * the station validate that the station is indeed known to the driver (sanity
 * only).
 */
static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
{
	struct ieee80211_sta *sta;
	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
		.sta_id = sta_id,
	};
	int ret;

	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
					lockdep_is_held(&mvm->mutex));

	/* Note: internal stations are marked as error values */
	if (!sta) {
		IWL_ERR(mvm, "Invalid station id\n");
		return -EINVAL;
	}

E
Emmanuel Grumbach 已提交
1140
	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
J
Johannes Berg 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
				   sizeof(rm_sta_cmd), &rm_sta_cmd);
	if (ret) {
		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
		return ret;
	}

	return 0;
}

void iwl_mvm_sta_drained_wk(struct work_struct *wk)
{
	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
	u8 sta_id;

	/*
	 * The mutex is needed because of the SYNC cmd, but not only: if the
	 * work would run concurrently with iwl_mvm_rm_sta, it would run before
	 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
	 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
	 * that later.
	 */
	mutex_lock(&mvm->mutex);

	for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
		int ret;
		struct ieee80211_sta *sta =
			rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						  lockdep_is_held(&mvm->mutex));

1170 1171 1172 1173 1174 1175 1176 1177 1178
		/*
		 * This station is in use or RCU-removed; the latter happens in
		 * managed mode, where mac80211 removes the station before we
		 * can remove it from firmware (we can only do that after the
		 * MAC is marked unassociated), and possibly while the deauth
		 * frame to disconnect from the AP is still queued. Then, the
		 * station pointer is -ENOENT when the last skb is reclaimed.
		 */
		if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
J
Johannes Berg 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
			continue;

		if (PTR_ERR(sta) == -EINVAL) {
			IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
				sta_id);
			continue;
		}

		if (!sta) {
			IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
				sta_id);
			continue;
		}

		WARN_ON(PTR_ERR(sta) != -EBUSY);
		/* This station was removed and we waited until it got drained,
		 * we can now proceed and remove it.
		 */
		ret = iwl_mvm_rm_sta_common(mvm, sta_id);
		if (ret) {
			IWL_ERR(mvm,
				"Couldn't remove sta %d after it was drained\n",
				sta_id);
			continue;
		}
1204
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
J
Johannes Berg 已提交
1205
		clear_bit(sta_id, mvm->sta_drained);
1206 1207 1208 1209

		if (mvm->tfd_drained[sta_id]) {
			unsigned long i, msk = mvm->tfd_drained[sta_id];

1210
			for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
1211 1212
				iwl_mvm_disable_txq(mvm, i, i,
						    IWL_MAX_TID_COUNT, 0);
1213 1214 1215 1216 1217

			mvm->tfd_drained[sta_id] = 0;
			IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
				       sta_id, msk);
		}
J
Johannes Berg 已提交
1218 1219 1220 1221 1222
	}

	mutex_unlock(&mvm->mutex);
}

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
				       struct ieee80211_vif *vif,
				       struct iwl_mvm_sta *mvm_sta)
{
	int ac;
	int i;

	lockdep_assert_held(&mvm->mutex);

	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
		if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
			continue;

		ac = iwl_mvm_tid_to_ac_queue(i);
		iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
				    vif->hw_queue[ac], i, 0);
		mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
	}
}

J
Johannes Berg 已提交
1243 1244 1245 1246 1247
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
		   struct ieee80211_vif *vif,
		   struct ieee80211_sta *sta)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1248
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
1249 1250 1251 1252
	int ret;

	lockdep_assert_held(&mvm->mutex);

1253 1254 1255
	if (iwl_mvm_has_new_rx_api(mvm))
		kfree(mvm_sta->dup_data);

1256 1257 1258
	if ((vif->type == NL80211_IFTYPE_STATION &&
	     mvmvif->ap_sta_id == mvm_sta->sta_id) ||
	    iwl_mvm_is_dqa_supported(mvm)){
1259 1260 1261
		ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
		if (ret)
			return ret;
1262
		/* flush its queues here since we are freeing mvm_sta */
1263
		ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
1264 1265 1266 1267 1268 1269 1270
		if (ret)
			return ret;
		ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
						    mvm_sta->tfd_queue_msk);
		if (ret)
			return ret;
		ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1271

1272 1273 1274 1275
		/* If DQA is supported - the queues can be disabled now */
		if (iwl_mvm_is_dqa_supported(mvm))
			iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);

1276 1277 1278 1279 1280
		if (vif->type == NL80211_IFTYPE_STATION &&
		    mvmvif->ap_sta_id == mvm_sta->sta_id) {
			/* if associated - we can't remove the AP STA now */
			if (vif->bss_conf.assoc)
				return ret;
J
Johannes Berg 已提交
1281

1282 1283
			/* unassoc - go ahead - remove the AP STA now */
			mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1284

1285 1286 1287 1288
			/* clear d0i3_ap_sta_id if no longer relevant */
			if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
				mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
		}
J
Johannes Berg 已提交
1289 1290
	}

1291 1292 1293 1294 1295 1296 1297 1298 1299
	/*
	 * This shouldn't happen - the TDLS channel switch should be canceled
	 * before the STA is removed.
	 */
	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
		mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
		cancel_delayed_work(&mvm->tdls_cs.dwork);
	}

1300 1301 1302 1303 1304
	/*
	 * Make sure that the tx response code sees the station as -EBUSY and
	 * calls the drain worker.
	 */
	spin_lock_bh(&mvm_sta->lock);
J
Johannes Berg 已提交
1305 1306 1307 1308
	/*
	 * There are frames pending on the AC queues for this station.
	 * We need to wait until all the frames are drained...
	 */
1309
	if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
J
Johannes Berg 已提交
1310 1311
		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
				   ERR_PTR(-EBUSY));
1312
		spin_unlock_bh(&mvm_sta->lock);
1313 1314 1315 1316 1317 1318 1319 1320 1321

		/* disable TDLS sta queues on drain complete */
		if (sta->tdls) {
			mvm->tfd_drained[mvm_sta->sta_id] =
							mvm_sta->tfd_queue_msk;
			IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
				       mvm_sta->sta_id);
		}

1322
		ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
J
Johannes Berg 已提交
1323
	} else {
1324
		spin_unlock_bh(&mvm_sta->lock);
1325

1326
		if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1327 1328
			iwl_mvm_tdls_sta_deinit(mvm, sta);

J
Johannes Berg 已提交
1329
		ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1330
		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
J
Johannes Berg 已提交
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	}

	return ret;
}

int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
		      struct ieee80211_vif *vif,
		      u8 sta_id)
{
	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);

	lockdep_assert_held(&mvm->mutex);

1344
	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
J
Johannes Berg 已提交
1345 1346 1347
	return ret;
}

1348 1349 1350
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
			     struct iwl_mvm_int_sta *sta,
			     u32 qmask, enum nl80211_iftype iftype)
J
Johannes Berg 已提交
1351 1352
{
	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1353
		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
J
Johannes Berg 已提交
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
			return -ENOSPC;
	}

	sta->tfd_queue_msk = qmask;

	/* put a non-NULL value so iterating over the stations won't stop */
	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
	return 0;
}

1365 1366
static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
				    struct iwl_mvm_int_sta *sta)
J
Johannes Berg 已提交
1367
{
1368
	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
J
Johannes Berg 已提交
1369 1370 1371 1372 1373 1374 1375 1376 1377
	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
	sta->sta_id = IWL_MVM_STATION_COUNT;
}

static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
				      struct iwl_mvm_int_sta *sta,
				      const u8 *addr,
				      u16 mac_id, u16 color)
{
1378
	struct iwl_mvm_add_sta_cmd cmd;
J
Johannes Berg 已提交
1379 1380 1381 1382 1383
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

1384
	memset(&cmd, 0, sizeof(cmd));
J
Johannes Berg 已提交
1385 1386 1387 1388 1389
	cmd.sta_id = sta->sta_id;
	cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
							     color));

	cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1390
	cmd.tid_disable_tx = cpu_to_le16(0xffff);
J
Johannes Berg 已提交
1391 1392 1393 1394

	if (addr)
		memcpy(cmd.addr, addr, ETH_ALEN);

1395 1396
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
1397
					  &cmd, &status);
J
Johannes Berg 已提交
1398 1399 1400
	if (ret)
		return ret;

1401
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	case ADD_STA_SUCCESS:
		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
		return 0;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
			status);
		break;
	}
	return ret;
}

int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
{
1416 1417 1418
	unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
					mvm->cfg->base_params->wd_timeout :
					IWL_WATCHDOG_DISABLED;
J
Johannes Berg 已提交
1419 1420 1421 1422
	int ret;

	lockdep_assert_held(&mvm->mutex);

A
Ariej Marjieh 已提交
1423
	/* Map Aux queue to fifo - needs to happen before adding Aux station */
1424 1425 1426
	if (!iwl_mvm_is_dqa_supported(mvm))
		iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
				      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
A
Ariej Marjieh 已提交
1427 1428 1429

	/* Allocate aux station and assign to it the aux queue */
	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1430
				       NL80211_IFTYPE_UNSPECIFIED);
J
Johannes Berg 已提交
1431 1432 1433
	if (ret)
		return ret;

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
	if (iwl_mvm_is_dqa_supported(mvm)) {
		struct iwl_trans_txq_scd_cfg cfg = {
			.fifo = IWL_MVM_TX_FIFO_MCAST,
			.sta_id = mvm->aux_sta.sta_id,
			.tid = IWL_MAX_TID_COUNT,
			.aggregate = false,
			.frame_limit = IWL_FRAME_LIMIT,
		};

		iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
				   wdg_timeout);
	}

J
Johannes Berg 已提交
1447 1448 1449 1450 1451 1452 1453 1454
	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
					 MAC_INDEX_AUX, 0);

	if (ret)
		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
	return ret;
}

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

	lockdep_assert_held(&mvm->mutex);
	return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
					 mvmvif->id, 0);
}

int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	int ret;

	lockdep_assert_held(&mvm->mutex);

	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
	if (ret)
		IWL_WARN(mvm, "Failed sending remove station\n");

	return ret;
}

void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
{
	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
}

1482 1483 1484 1485 1486 1487 1488
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
{
	lockdep_assert_held(&mvm->mutex);

	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
}

J
Johannes Berg 已提交
1489 1490 1491 1492 1493 1494 1495 1496
/*
 * Send the add station command for the vif's broadcast station.
 * Assumes that the station was already allocated.
 *
 * @mvm: the mvm component
 * @vif: the interface to which the broadcast station is added
 * @bsta: the broadcast station to add.
 */
1497
int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1498 1499
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1500
	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
J
Johannes Berg 已提交
1501
	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1502
	const u8 *baddr = _baddr;
J
Johannes Berg 已提交
1503 1504 1505

	lockdep_assert_held(&mvm->mutex);

1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
	if (iwl_mvm_is_dqa_supported(mvm)) {
		struct iwl_trans_txq_scd_cfg cfg = {
			.fifo = IWL_MVM_TX_FIFO_VO,
			.sta_id = mvmvif->bcast_sta.sta_id,
			.tid = IWL_MAX_TID_COUNT,
			.aggregate = false,
			.frame_limit = IWL_FRAME_LIMIT,
		};
		unsigned int wdg_timeout =
			iwl_mvm_get_wd_timeout(mvm, vif, false, false);
		int queue;

		if ((vif->type == NL80211_IFTYPE_AP) &&
		    (mvmvif->bcast_sta.tfd_queue_msk &
		     BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
			queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1522 1523 1524 1525
		else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
			 (mvmvif->bcast_sta.tfd_queue_msk &
			  BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
			queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1526 1527 1528 1529 1530 1531 1532
		else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
			return -EINVAL;

		iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
				   wdg_timeout);
	}

J
Johannes Berg 已提交
1533 1534 1535
	if (vif->type == NL80211_IFTYPE_ADHOC)
		baddr = vif->bss_conf.bssid;

J
Johannes Berg 已提交
1536 1537 1538 1539 1540 1541 1542 1543 1544
	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
		return -ENOSPC;

	return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
					  mvmvif->id, mvmvif->color);
}

/* Send the FW a request to remove the station from it's internal data
 * structures, but DO NOT remove the entry from the local data structures. */
1545
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1546
{
1547
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
J
Johannes Berg 已提交
1548 1549 1550 1551
	int ret;

	lockdep_assert_held(&mvm->mutex);

1552
	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
J
Johannes Berg 已提交
1553 1554 1555 1556 1557
	if (ret)
		IWL_WARN(mvm, "Failed sending remove station\n");
	return ret;
}

1558 1559 1560
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1561
	u32 qmask = 0;
1562 1563 1564

	lockdep_assert_held(&mvm->mutex);

1565 1566
	if (!iwl_mvm_is_dqa_supported(mvm))
		qmask = iwl_mvm_mac_get_queues_mask(vif);
1567

1568 1569 1570 1571 1572 1573
	if (vif->type == NL80211_IFTYPE_AP) {
		/*
		 * The firmware defines the TFD queue mask to only be relevant
		 * for *unicast* queues, so the multicast (CAB) queue shouldn't
		 * be included.
		 */
1574 1575
		qmask &= ~BIT(vif->cab_queue);

1576 1577
		if (iwl_mvm_is_dqa_supported(mvm))
			qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
1578 1579 1580
	} else if (iwl_mvm_is_dqa_supported(mvm) &&
		   vif->type == NL80211_IFTYPE_P2P_DEVICE) {
		qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
1581 1582
	}

1583 1584 1585 1586
	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
					ieee80211_vif_type_p2p(vif));
}

J
Johannes Berg 已提交
1587 1588 1589 1590 1591 1592 1593
/* Allocate a new station entry for the broadcast station to the given vif,
 * and send it to the FW.
 * Note that each P2P mac should have its own broadcast station.
 *
 * @mvm: the mvm component
 * @vif: the interface to which the broadcast station is added
 * @bsta: the broadcast station to add. */
1594
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1595 1596
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1597
	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
J
Johannes Berg 已提交
1598 1599 1600 1601
	int ret;

	lockdep_assert_held(&mvm->mutex);

1602
	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
J
Johannes Berg 已提交
1603 1604 1605
	if (ret)
		return ret;

1606
	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
J
Johannes Berg 已提交
1607 1608 1609

	if (ret)
		iwl_mvm_dealloc_int_sta(mvm, bsta);
1610

J
Johannes Berg 已提交
1611 1612 1613
	return ret;
}

1614 1615 1616 1617 1618 1619 1620
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
}

J
Johannes Berg 已提交
1621 1622 1623 1624
/*
 * Send the FW a request to remove the station from it's internal data
 * structures, and in addition remove it from the local data structure.
 */
1625
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
J
Johannes Berg 已提交
1626 1627 1628 1629 1630
{
	int ret;

	lockdep_assert_held(&mvm->mutex);

1631 1632 1633
	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);

	iwl_mvm_dealloc_bcast_sta(mvm, vif);
J
Johannes Berg 已提交
1634 1635 1636 1637

	return ret;
}

1638 1639
#define IWL_MAX_RX_BA_SESSIONS 16

1640
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
1641
{
1642 1643 1644 1645
	struct iwl_mvm_delba_notif notif = {
		.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
		.metadata.sync = 1,
		.delba.baid = baid,
1646
	};
1647 1648
	iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
};
1649

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
				 struct iwl_mvm_baid_data *data)
{
	int i;

	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);

	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
		int j;
		struct iwl_mvm_reorder_buffer *reorder_buf =
			&data->reorder_buf[i];

1662 1663 1664
		spin_lock_bh(&reorder_buf->lock);
		if (likely(!reorder_buf->num_stored)) {
			spin_unlock_bh(&reorder_buf->lock);
1665
			continue;
1666
		}
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676

		/*
		 * This shouldn't happen in regular DELBA since the internal
		 * delBA notification should trigger a release of all frames in
		 * the reorder buffer.
		 */
		WARN_ON(1);

		for (j = 0; j < reorder_buf->buf_size; j++)
			__skb_queue_purge(&reorder_buf->entries[j]);
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
		/*
		 * Prevent timer re-arm. This prevents a very far fetched case
		 * where we timed out on the notification. There may be prior
		 * RX frames pending in the RX queue before the notification
		 * that might get processed between now and the actual deletion
		 * and we would re-arm the timer although we are deleting the
		 * reorder buffer.
		 */
		reorder_buf->removed = true;
		spin_unlock_bh(&reorder_buf->lock);
		del_timer_sync(&reorder_buf->reorder_timer);
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
	}
}

static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
					u32 sta_id,
					struct iwl_mvm_baid_data *data,
					u16 ssn, u8 buf_size)
{
	int i;

	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
		struct iwl_mvm_reorder_buffer *reorder_buf =
			&data->reorder_buf[i];
		int j;

		reorder_buf->num_stored = 0;
		reorder_buf->head_sn = ssn;
		reorder_buf->buf_size = buf_size;
1706 1707 1708 1709 1710 1711 1712
		/* rx reorder timer */
		reorder_buf->reorder_timer.function =
			iwl_mvm_reorder_timer_expired;
		reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
		init_timer(&reorder_buf->reorder_timer);
		spin_lock_init(&reorder_buf->lock);
		reorder_buf->mvm = mvm;
1713 1714 1715 1716 1717
		reorder_buf->queue = i;
		reorder_buf->sta_id = sta_id;
		for (j = 0; j < reorder_buf->buf_size; j++)
			__skb_queue_head_init(&reorder_buf->entries[j]);
	}
1718 1719
}

J
Johannes Berg 已提交
1720
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1721
		       int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
J
Johannes Berg 已提交
1722
{
1723
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1724
	struct iwl_mvm_add_sta_cmd cmd = {};
1725
	struct iwl_mvm_baid_data *baid_data = NULL;
J
Johannes Berg 已提交
1726 1727 1728 1729 1730
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

1731 1732 1733 1734 1735
	if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
		return -ENOSPC;
	}

1736 1737 1738 1739 1740
	if (iwl_mvm_has_new_rx_api(mvm) && start) {
		/*
		 * Allocate here so if allocation fails we can bail out early
		 * before starting the BA session in the firmware
		 */
1741 1742 1743 1744
		baid_data = kzalloc(sizeof(*baid_data) +
				    mvm->trans->num_rx_queues *
				    sizeof(baid_data->reorder_buf[0]),
				    GFP_KERNEL);
1745 1746 1747 1748
		if (!baid_data)
			return -ENOMEM;
	}

J
Johannes Berg 已提交
1749 1750 1751
	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
	cmd.sta_id = mvm_sta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
1752 1753 1754
	if (start) {
		cmd.add_immediate_ba_tid = (u8) tid;
		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
1755
		cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
1756 1757 1758
	} else {
		cmd.remove_immediate_ba_tid = (u8) tid;
	}
J
Johannes Berg 已提交
1759 1760 1761 1762
	cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
				  STA_MODIFY_REMOVE_BA_TID;

	status = ADD_STA_SUCCESS;
1763 1764
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
1765
					  &cmd, &status);
J
Johannes Berg 已提交
1766
	if (ret)
1767
		goto out_free;
J
Johannes Berg 已提交
1768

1769
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
	case ADD_STA_SUCCESS:
		IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
			       start ? "start" : "stopp");
		break;
	case ADD_STA_IMMEDIATE_BA_FAILURE:
		IWL_WARN(mvm, "RX BA Session refused by fw\n");
		ret = -ENOSPC;
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
			start ? "start" : "stopp", status);
		break;
	}

1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
	if (ret)
		goto out_free;

	if (start) {
		u8 baid;

		mvm->rx_ba_sessions++;

		if (!iwl_mvm_has_new_rx_api(mvm))
			return 0;

		if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
			ret = -EINVAL;
			goto out_free;
		}
		baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
			    IWL_ADD_STA_BAID_SHIFT);
		baid_data->baid = baid;
		baid_data->timeout = timeout;
		baid_data->last_rx = jiffies;
		init_timer(&baid_data->session_timer);
		baid_data->session_timer.function =
			iwl_mvm_rx_agg_session_expired;
		baid_data->session_timer.data =
			(unsigned long)&mvm->baid_map[baid];
		baid_data->mvm = mvm;
		baid_data->tid = tid;
		baid_data->sta_id = mvm_sta->sta_id;

		mvm_sta->tid_to_baid[tid] = baid;
		if (timeout)
			mod_timer(&baid_data->session_timer,
				  TU_TO_EXP_TIME(timeout * 2));

1819 1820
		iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
					    baid_data, ssn, buf_size);
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
		/*
		 * protect the BA data with RCU to cover a case where our
		 * internal RX sync mechanism will timeout (not that it's
		 * supposed to happen) and we will free the session data while
		 * RX is being processed in parallel
		 */
		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
	} else if (mvm->rx_ba_sessions > 0) {
		u8 baid = mvm_sta->tid_to_baid[tid];

		/* check that restart flow didn't zero the counter */
		mvm->rx_ba_sessions--;
		if (!iwl_mvm_has_new_rx_api(mvm))
			return 0;

		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
			return -EINVAL;

		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
		if (WARN_ON(!baid_data))
			return -EINVAL;

		/* synchronize all rx queues so we can safely delete */
1845
		iwl_mvm_free_reorder(mvm, baid_data);
1846 1847 1848
		del_timer_sync(&baid_data->session_timer);
		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
		kfree_rcu(baid_data, rcu_head);
1849
	}
1850
	return 0;
1851

1852 1853
out_free:
	kfree(baid_data);
J
Johannes Berg 已提交
1854 1855 1856
	return ret;
}

1857 1858
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
		       int tid, u8 queue, bool start)
J
Johannes Berg 已提交
1859
{
1860
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1861
	struct iwl_mvm_add_sta_cmd cmd = {};
J
Johannes Berg 已提交
1862 1863 1864 1865 1866 1867 1868 1869 1870
	int ret;
	u32 status;

	lockdep_assert_held(&mvm->mutex);

	if (start) {
		mvm_sta->tfd_queue_msk |= BIT(queue);
		mvm_sta->tid_disable_agg &= ~BIT(tid);
	} else {
1871 1872 1873
		/* In DQA-mode the queue isn't removed on agg termination */
		if (!iwl_mvm_is_dqa_supported(mvm))
			mvm_sta->tfd_queue_msk &= ~BIT(queue);
J
Johannes Berg 已提交
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
		mvm_sta->tid_disable_agg |= BIT(tid);
	}

	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
	cmd.sta_id = mvm_sta->sta_id;
	cmd.add_modify = STA_MODE_MODIFY;
	cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);

	status = ADD_STA_SUCCESS;
1885 1886
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
					  iwl_mvm_add_sta_cmd_size(mvm),
1887
					  &cmd, &status);
J
Johannes Berg 已提交
1888 1889 1890
	if (ret)
		return ret;

1891
	switch (status & IWL_ADD_STA_STATUS_MASK) {
J
Johannes Berg 已提交
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
	case ADD_STA_SUCCESS:
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
			start ? "start" : "stopp", status);
		break;
	}

	return ret;
}

1904
const u8 tid_to_mac80211_ac[] = {
J
Johannes Berg 已提交
1905 1906 1907 1908 1909 1910 1911 1912
	IEEE80211_AC_BE,
	IEEE80211_AC_BK,
	IEEE80211_AC_BK,
	IEEE80211_AC_BE,
	IEEE80211_AC_VI,
	IEEE80211_AC_VI,
	IEEE80211_AC_VO,
	IEEE80211_AC_VO,
1913
	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
J
Johannes Berg 已提交
1914 1915
};

1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
static const u8 tid_to_ucode_ac[] = {
	AC_BE,
	AC_BK,
	AC_BK,
	AC_BE,
	AC_VI,
	AC_VI,
	AC_VO,
	AC_VO,
};

J
Johannes Berg 已提交
1927 1928 1929
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
1930
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
1931 1932
	struct iwl_mvm_tid_data *tid_data;
	int txq_id;
1933
	int ret;
J
Johannes Berg 已提交
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945

	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
		return -EINVAL;

	if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
		IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
			mvmsta->tid_data[tid].state);
		return -ENXIO;
	}

	lockdep_assert_held(&mvm->mutex);

1946 1947 1948 1949 1950 1951 1952 1953 1954
	spin_lock_bh(&mvmsta->lock);

	/* possible race condition - we entered D0i3 while starting agg */
	if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
		spin_unlock_bh(&mvmsta->lock);
		IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
		return -EIO;
	}

1955 1956
	spin_lock_bh(&mvm->queue_info_lock);

1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
	/*
	 * Note the possible cases:
	 *  1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
	 *  2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
	 *	one and mark it as reserved
	 *  3. In DQA mode, but no traffic yet on this TID: same treatment as in
	 *	non-DQA mode, since the TXQ hasn't yet been allocated
	 */
	txq_id = mvmsta->tid_data[tid].txq_id;
	if (!iwl_mvm_is_dqa_supported(mvm) ||
	    mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
1968 1969
		txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
						 mvm->first_agg_queue,
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979
						 mvm->last_agg_queue);
		if (txq_id < 0) {
			ret = txq_id;
			spin_unlock_bh(&mvm->queue_info_lock);
			IWL_ERR(mvm, "Failed to allocate agg queue\n");
			goto release_locks;
		}

		/* TXQ hasn't yet been enabled, so mark it only as reserved */
		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
1980 1981
	}
	spin_unlock_bh(&mvm->queue_info_lock);
J
Johannes Berg 已提交
1982

1983 1984 1985 1986
	IWL_DEBUG_TX_QUEUES(mvm,
			    "AGG for tid %d will be on queue #%d\n",
			    tid, txq_id);

J
Johannes Berg 已提交
1987
	tid_data = &mvmsta->tid_data[tid];
1988
	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
J
Johannes Berg 已提交
1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
	tid_data->txq_id = txq_id;
	*ssn = tid_data->ssn;

	IWL_DEBUG_TX_QUEUES(mvm,
			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
			    mvmsta->sta_id, tid, txq_id, tid_data->ssn,
			    tid_data->next_reclaimed);

	if (tid_data->ssn == tid_data->next_reclaimed) {
		tid_data->state = IWL_AGG_STARTING;
		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
	} else {
		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
	}

2004 2005 2006
	ret = 0;

release_locks:
J
Johannes Berg 已提交
2007 2008
	spin_unlock_bh(&mvmsta->lock);

2009
	return ret;
J
Johannes Berg 已提交
2010 2011 2012
}

int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2013 2014
			    struct ieee80211_sta *sta, u16 tid, u8 buf_size,
			    bool amsdu)
J
Johannes Berg 已提交
2015
{
2016
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
2017
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2018 2019
	unsigned int wdg_timeout =
		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2020
	int queue, ret;
2021
	bool alloc_queue = true;
J
Johannes Berg 已提交
2022 2023
	u16 ssn;

2024 2025 2026 2027 2028 2029 2030
	struct iwl_trans_txq_scd_cfg cfg = {
		.sta_id = mvmsta->sta_id,
		.tid = tid,
		.frame_limit = buf_size,
		.aggregate = true,
	};

2031 2032 2033
	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
		     != IWL_MAX_TID_COUNT);

J
Johannes Berg 已提交
2034 2035 2036 2037 2038 2039
	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);

	spin_lock_bh(&mvmsta->lock);
	ssn = tid_data->ssn;
	queue = tid_data->txq_id;
	tid_data->state = IWL_AGG_ON;
2040
	mvmsta->agg_tids |= BIT(tid);
J
Johannes Berg 已提交
2041
	tid_data->ssn = 0xffff;
2042
	tid_data->amsdu_in_ampdu_allowed = amsdu;
J
Johannes Berg 已提交
2043 2044
	spin_unlock_bh(&mvmsta->lock);

2045
	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
J
Johannes Berg 已提交
2046

2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
	/* In DQA mode, the existing queue might need to be reconfigured */
	if (iwl_mvm_is_dqa_supported(mvm)) {
		spin_lock_bh(&mvm->queue_info_lock);
		/* Maybe there is no need to even alloc a queue... */
		if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
			alloc_queue = false;
		spin_unlock_bh(&mvm->queue_info_lock);

		/*
		 * Only reconfig the SCD for the queue if the window size has
		 * changed from current (become smaller)
		 */
		if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
			/*
			 * If reconfiguring an existing queue, it first must be
			 * drained
			 */
			ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
							    BIT(queue));
			if (ret) {
				IWL_ERR(mvm,
					"Error draining queue before reconfig\n");
				return ret;
			}

			ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
						   mvmsta->sta_id, tid,
						   buf_size, ssn);
			if (ret) {
				IWL_ERR(mvm,
					"Error reconfiguring TXQ #%d\n", queue);
				return ret;
			}
		}
	}

	if (alloc_queue)
		iwl_mvm_enable_txq(mvm, queue,
				   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
				   &cfg, wdg_timeout);
2087

J
Johannes Berg 已提交
2088 2089 2090 2091
	ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
	if (ret)
		return -EIO;

2092 2093
	/* No need to mark as reserved */
	spin_lock_bh(&mvm->queue_info_lock);
2094
	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2095 2096
	spin_unlock_bh(&mvm->queue_info_lock);

J
Johannes Berg 已提交
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
	/*
	 * Even though in theory the peer could have different
	 * aggregation reorder buffer sizes for different sessions,
	 * our ucode doesn't allow for that and has a global limit
	 * for each station. Therefore, use the minimum of all the
	 * aggregation sessions and our default value.
	 */
	mvmsta->max_agg_bufsize =
		min(mvmsta->max_agg_bufsize, buf_size);
	mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;

2108 2109 2110
	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
		     sta->addr, tid);

2111
	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
J
Johannes Berg 已提交
2112 2113 2114 2115 2116
}

int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
			    struct ieee80211_sta *sta, u16 tid)
{
2117
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
2118 2119 2120 2121
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	u16 txq_id;
	int err;

2122 2123 2124 2125 2126 2127 2128 2129 2130 2131

	/*
	 * If mac80211 is cleaning its state, then say that we finished since
	 * our state has been cleared anyway.
	 */
	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		return 0;
	}

J
Johannes Berg 已提交
2132 2133 2134 2135 2136 2137 2138
	spin_lock_bh(&mvmsta->lock);

	txq_id = tid_data->txq_id;

	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
			    mvmsta->sta_id, tid, txq_id, tid_data->state);

2139 2140
	mvmsta->agg_tids &= ~BIT(tid);

2141
	spin_lock_bh(&mvm->queue_info_lock);
2142 2143 2144 2145 2146 2147 2148 2149 2150
	/*
	 * The TXQ is marked as reserved only if no traffic came through yet
	 * This means no traffic has been sent on this TID (agg'd or not), so
	 * we no longer have use for the queue. Since it hasn't even been
	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
	 * free.
	 */
	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2151 2152
	spin_unlock_bh(&mvm->queue_info_lock);

J
Johannes Berg 已提交
2153 2154
	switch (tid_data->state) {
	case IWL_AGG_ON:
2155
		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
J
Johannes Berg 已提交
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168

		IWL_DEBUG_TX_QUEUES(mvm,
				    "ssn = %d, next_recl = %d\n",
				    tid_data->ssn, tid_data->next_reclaimed);

		/* There are still packets for this RA / TID in the HW */
		if (tid_data->ssn != tid_data->next_reclaimed) {
			tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
			err = 0;
			break;
		}

		tid_data->ssn = 0xffff;
2169 2170 2171 2172 2173 2174 2175
		tid_data->state = IWL_AGG_OFF;
		spin_unlock_bh(&mvmsta->lock);

		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);

		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);

2176 2177 2178 2179 2180
		if (!iwl_mvm_is_dqa_supported(mvm)) {
			int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];

			iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
		}
2181
		return 0;
J
Johannes Berg 已提交
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209
	case IWL_AGG_STARTING:
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		/*
		 * The agg session has been stopped before it was set up. This
		 * can happen when the AddBA timer times out for example.
		 */

		/* No barriers since we are under mutex */
		lockdep_assert_held(&mvm->mutex);

		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		tid_data->state = IWL_AGG_OFF;
		err = 0;
		break;
	default:
		IWL_ERR(mvm,
			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
			mvmsta->sta_id, tid, tid_data->state);
		IWL_ERR(mvm,
			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
		err = -EINVAL;
	}

	spin_unlock_bh(&mvmsta->lock);

	return err;
}

2210 2211 2212
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
			    struct ieee80211_sta *sta, u16 tid)
{
2213
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2214 2215
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	u16 txq_id;
2216
	enum iwl_mvm_agg_state old_state;
2217 2218 2219 2220 2221 2222 2223 2224 2225

	/*
	 * First set the agg state to OFF to avoid calling
	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
	 */
	spin_lock_bh(&mvmsta->lock);
	txq_id = tid_data->txq_id;
	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
			    mvmsta->sta_id, tid, txq_id, tid_data->state);
2226
	old_state = tid_data->state;
2227
	tid_data->state = IWL_AGG_OFF;
2228
	mvmsta->agg_tids &= ~BIT(tid);
2229 2230
	spin_unlock_bh(&mvmsta->lock);

2231
	spin_lock_bh(&mvm->queue_info_lock);
2232 2233 2234 2235 2236 2237 2238 2239 2240
	/*
	 * The TXQ is marked as reserved only if no traffic came through yet
	 * This means no traffic has been sent on this TID (agg'd or not), so
	 * we no longer have use for the queue. Since it hasn't even been
	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
	 * free.
	 */
	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2241 2242
	spin_unlock_bh(&mvm->queue_info_lock);

2243
	if (old_state >= IWL_AGG_ON) {
2244
		iwl_mvm_drain_sta(mvm, mvmsta, true);
2245
		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2246
			IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2247 2248 2249
		iwl_trans_wait_tx_queue_empty(mvm->trans,
					      mvmsta->tfd_queue_msk);
		iwl_mvm_drain_sta(mvm, mvmsta, false);
2250

2251 2252
		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);

2253 2254 2255 2256 2257 2258
		if (!iwl_mvm_is_dqa_supported(mvm)) {
			int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];

			iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
					    tid, 0);
		}
2259
	}
2260 2261 2262 2263

	return 0;
}

J
Johannes Berg 已提交
2264 2265
static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
{
2266
	int i, max = -1, max_offs = -1;
J
Johannes Berg 已提交
2267 2268 2269

	lockdep_assert_held(&mvm->mutex);

2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
	/* Pick the unused key offset with the highest 'deleted'
	 * counter. Every time a key is deleted, all the counters
	 * are incremented and the one that was just deleted is
	 * reset to zero. Thus, the highest counter is the one
	 * that was deleted longest ago. Pick that one.
	 */
	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
		if (test_bit(i, mvm->fw_key_table))
			continue;
		if (mvm->fw_key_deleted[i] > max) {
			max = mvm->fw_key_deleted[i];
			max_offs = i;
		}
	}
J
Johannes Berg 已提交
2284

2285
	if (max_offs < 0)
J
Johannes Berg 已提交
2286 2287
		return STA_KEY_IDX_INVALID;

2288
	return max_offs;
J
Johannes Berg 已提交
2289 2290
}

2291 2292 2293
static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
					       struct ieee80211_vif *vif,
					       struct ieee80211_sta *sta)
J
Johannes Berg 已提交
2294
{
2295
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
J
Johannes Berg 已提交
2296

2297 2298
	if (sta)
		return iwl_mvm_sta_from_mac80211(sta);
J
Johannes Berg 已提交
2299 2300 2301 2302 2303 2304 2305

	/*
	 * The device expects GTKs for station interfaces to be
	 * installed as GTKs for the AP station. If we have no
	 * station ID, then use AP's station ID.
	 */
	if (vif->type == NL80211_IFTYPE_STATION &&
2306 2307 2308 2309 2310 2311 2312 2313
	    mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
		u8 sta_id = mvmvif->ap_sta_id;

		/*
		 * It is possible that the 'sta' parameter is NULL,
		 * for example when a GTK is removed - the sta_id will then
		 * be the AP ID, and no station was passed by mac80211.
		 */
2314
		return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
2315
	}
J
Johannes Berg 已提交
2316

2317
	return NULL;
J
Johannes Berg 已提交
2318 2319 2320 2321
}

static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
				struct iwl_mvm_sta *mvm_sta,
2322
				struct ieee80211_key_conf *keyconf, bool mcast,
2323 2324
				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
				u8 key_offset)
J
Johannes Berg 已提交
2325
{
2326
	struct iwl_mvm_add_sta_key_cmd cmd = {};
2327
	__le16 key_flags;
2328 2329
	int ret;
	u32 status;
J
Johannes Berg 已提交
2330 2331
	u16 keyidx;
	int i;
2332
	u8 sta_id = mvm_sta->sta_id;
J
Johannes Berg 已提交
2333 2334 2335 2336 2337 2338 2339 2340 2341

	keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
		 STA_KEY_FLG_KEYID_MSK;
	key_flags = cpu_to_le16(keyidx);
	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);

	switch (keyconf->cipher) {
	case WLAN_CIPHER_SUITE_TKIP:
		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2342
		cmd.tkip_rx_tsc_byte2 = tkip_iv32;
J
Johannes Berg 已提交
2343
		for (i = 0; i < 5; i++)
2344 2345
			cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
		memcpy(cmd.key, keyconf->key, keyconf->keylen);
J
Johannes Berg 已提交
2346 2347 2348
		break;
	case WLAN_CIPHER_SUITE_CCMP:
		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2349
		memcpy(cmd.key, keyconf->key, keyconf->keylen);
J
Johannes Berg 已提交
2350
		break;
2351 2352
	case WLAN_CIPHER_SUITE_WEP104:
		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2353
		/* fall through */
2354 2355 2356 2357
	case WLAN_CIPHER_SUITE_WEP40:
		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
		memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
		break;
2358 2359 2360 2361 2362 2363 2364
	case WLAN_CIPHER_SUITE_GCMP_256:
		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
		/* fall through */
	case WLAN_CIPHER_SUITE_GCMP:
		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
		memcpy(cmd.key, keyconf->key, keyconf->keylen);
		break;
J
Johannes Berg 已提交
2365
	default:
2366 2367
		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
		memcpy(cmd.key, keyconf->key, keyconf->keylen);
J
Johannes Berg 已提交
2368 2369
	}

2370
	if (mcast)
J
Johannes Berg 已提交
2371 2372
		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);

2373
	cmd.key_offset = key_offset;
2374
	cmd.key_flags = key_flags;
J
Johannes Berg 已提交
2375 2376 2377
	cmd.sta_id = sta_id;

	status = ADD_STA_SUCCESS;
E
Emmanuel Grumbach 已提交
2378
	if (cmd_flags & CMD_ASYNC)
2379 2380
		ret =  iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
					    sizeof(cmd), &cmd);
E
Emmanuel Grumbach 已提交
2381 2382 2383
	else
		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
						  &cmd, &status);
J
Johannes Berg 已提交
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433

	switch (status) {
	case ADD_STA_SUCCESS:
		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
		break;
	}

	return ret;
}

static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
				 struct ieee80211_key_conf *keyconf,
				 u8 sta_id, bool remove_key)
{
	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};

	/* verify the key details match the required command's expectations */
	if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
		    (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
		    (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
		return -EINVAL;

	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
	igtk_cmd.sta_id = cpu_to_le32(sta_id);

	if (remove_key) {
		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
	} else {
		struct ieee80211_key_seq seq;
		const u8 *pn;

		memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
		pn = seq.aes_cmac.pn;
		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
						       ((u64) pn[4] << 8) |
						       ((u64) pn[3] << 16) |
						       ((u64) pn[2] << 24) |
						       ((u64) pn[1] << 32) |
						       ((u64) pn[0] << 40));
	}

	IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
		       remove_key ? "removing" : "installing",
		       igtk_cmd.sta_id);

E
Emmanuel Grumbach 已提交
2434
	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
J
Johannes Berg 已提交
2435 2436 2437 2438 2439 2440 2441 2442
				    sizeof(igtk_cmd), &igtk_cmd);
}


static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
				       struct ieee80211_vif *vif,
				       struct ieee80211_sta *sta)
{
2443
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
J
Johannes Berg 已提交
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459

	if (sta)
		return sta->addr;

	if (vif->type == NL80211_IFTYPE_STATION &&
	    mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
		u8 sta_id = mvmvif->ap_sta_id;
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						lockdep_is_held(&mvm->mutex));
		return sta->addr;
	}


	return NULL;
}

2460 2461 2462
static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
				 struct ieee80211_vif *vif,
				 struct ieee80211_sta *sta,
2463
				 struct ieee80211_key_conf *keyconf,
2464
				 u8 key_offset,
2465
				 bool mcast)
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
{
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
	int ret;
	const u8 *addr;
	struct ieee80211_key_seq seq;
	u16 p1k[5];

	switch (keyconf->cipher) {
	case WLAN_CIPHER_SUITE_TKIP:
		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
		/* get phase 1 key from mac80211 */
		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
2479
		ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2480
					   seq.tkip.iv32, p1k, 0, key_offset);
2481 2482
		break;
	case WLAN_CIPHER_SUITE_CCMP:
2483 2484
	case WLAN_CIPHER_SUITE_WEP40:
	case WLAN_CIPHER_SUITE_WEP104:
2485 2486
	case WLAN_CIPHER_SUITE_GCMP:
	case WLAN_CIPHER_SUITE_GCMP_256:
2487
		ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2488
					   0, NULL, 0, key_offset);
2489 2490
		break;
	default:
2491
		ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2492
					   0, NULL, 0, key_offset);
2493 2494 2495 2496 2497 2498
	}

	return ret;
}

static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2499 2500
				    struct ieee80211_key_conf *keyconf,
				    bool mcast)
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
{
	struct iwl_mvm_add_sta_key_cmd cmd = {};
	__le16 key_flags;
	int ret;
	u32 status;

	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
				 STA_KEY_FLG_KEYID_MSK);
	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);

2512
	if (mcast)
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);

	cmd.key_flags = key_flags;
	cmd.key_offset = keyconf->hw_key_idx;
	cmd.sta_id = sta_id;

	status = ADD_STA_SUCCESS;
	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
					  &cmd, &status);

	switch (status) {
	case ADD_STA_SUCCESS:
		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
		break;
	default:
		ret = -EIO;
		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
		break;
	}

	return ret;
}

J
Johannes Berg 已提交
2536 2537 2538 2539
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
			struct ieee80211_vif *vif,
			struct ieee80211_sta *sta,
			struct ieee80211_key_conf *keyconf,
2540
			u8 key_offset)
J
Johannes Berg 已提交
2541
{
2542
	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2543
	struct iwl_mvm_sta *mvm_sta;
2544
	u8 sta_id;
J
Johannes Berg 已提交
2545
	int ret;
2546
	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
J
Johannes Berg 已提交
2547 2548 2549 2550

	lockdep_assert_held(&mvm->mutex);

	/* Get the station id from the mvm local station table */
2551 2552 2553
	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
	if (!mvm_sta) {
		IWL_ERR(mvm, "Failed to find station\n");
J
Johannes Berg 已提交
2554 2555
		return -EINVAL;
	}
2556
	sta_id = mvm_sta->sta_id;
J
Johannes Berg 已提交
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575

	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
		goto end;
	}

	/*
	 * It is possible that the 'sta' parameter is NULL, and thus
	 * there is a need to retrieve  the sta from the local station table.
	 */
	if (!sta) {
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
						lockdep_is_held(&mvm->mutex));
		if (IS_ERR_OR_NULL(sta)) {
			IWL_ERR(mvm, "Invalid station id\n");
			return -EINVAL;
		}
	}

2576
	if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
J
Johannes Berg 已提交
2577 2578
		return -EINVAL;

2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
	/* If the key_offset is not pre-assigned, we need to find a
	 * new offset to use.  In normal cases, the offset is not
	 * pre-assigned, but during HW_RESTART we want to reuse the
	 * same indices, so we pass them when this function is called.
	 *
	 * In D3 entry, we need to hardcoded the indices (because the
	 * firmware hardcodes the PTK offset to 0).  In this case, we
	 * need to make sure we don't overwrite the hw_key_idx in the
	 * keyconf structure, because otherwise we cannot configure
	 * the original ones back when resuming.
	 */
	if (key_offset == STA_KEY_IDX_INVALID) {
		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
		if (key_offset == STA_KEY_IDX_INVALID)
J
Johannes Berg 已提交
2593
			return -ENOSPC;
2594
		keyconf->hw_key_idx = key_offset;
J
Johannes Berg 已提交
2595 2596
	}

2597
	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
2598
	if (ret)
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
		goto end;

	/*
	 * For WEP, the same key is used for multicast and unicast. Upload it
	 * again, using the same key offset, and now pointing the other one
	 * to the same key slot (offset).
	 * If this fails, remove the original as well.
	 */
	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
2609 2610
		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
					    key_offset, !mcast);
2611 2612
		if (ret) {
			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2613
			goto end;
2614 2615
		}
	}
J
Johannes Berg 已提交
2616

2617 2618
	__set_bit(key_offset, mvm->fw_key_table);

J
Johannes Berg 已提交
2619 2620 2621
end:
	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
2622
		      sta ? sta->addr : zero_addr, ret);
J
Johannes Berg 已提交
2623 2624 2625 2626 2627 2628 2629 2630
	return ret;
}

int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
			   struct ieee80211_vif *vif,
			   struct ieee80211_sta *sta,
			   struct ieee80211_key_conf *keyconf)
{
2631
	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2632 2633
	struct iwl_mvm_sta *mvm_sta;
	u8 sta_id = IWL_MVM_STATION_COUNT;
2634
	int ret, i;
J
Johannes Berg 已提交
2635 2636 2637

	lockdep_assert_held(&mvm->mutex);

2638 2639
	/* Get the station from the mvm local station table */
	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
J
Johannes Berg 已提交
2640 2641 2642 2643 2644 2645 2646

	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
		      keyconf->keyidx, sta_id);

	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);

2647
	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
J
Johannes Berg 已提交
2648 2649 2650 2651 2652
		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
			keyconf->hw_key_idx);
		return -ENOENT;
	}

2653 2654 2655 2656 2657 2658 2659
	/* track which key was deleted last */
	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
		if (mvm->fw_key_deleted[i] < U8_MAX)
			mvm->fw_key_deleted[i]++;
	}
	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;

2660
	if (!mvm_sta) {
J
Johannes Berg 已提交
2661 2662 2663 2664
		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
		return 0;
	}

2665 2666
	sta_id = mvm_sta->sta_id;

2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
	if (ret)
		return ret;

	/* delete WEP key twice to get rid of (now useless) offset */
	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);

	return ret;
J
Johannes Berg 已提交
2677 2678 2679 2680 2681 2682 2683 2684
}

void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
			     struct ieee80211_vif *vif,
			     struct ieee80211_key_conf *keyconf,
			     struct ieee80211_sta *sta, u32 iv32,
			     u16 *phase1key)
{
B
Beni Lev 已提交
2685
	struct iwl_mvm_sta *mvm_sta;
2686
	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
J
Johannes Berg 已提交
2687

B
Beni Lev 已提交
2688 2689
	rcu_read_lock();

2690 2691
	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
	if (WARN_ON_ONCE(!mvm_sta))
2692
		goto unlock;
2693
	iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2694
			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
2695 2696

 unlock:
B
Beni Lev 已提交
2697
	rcu_read_unlock();
J
Johannes Berg 已提交
2698 2699
}

2700 2701
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
				struct ieee80211_sta *sta)
J
Johannes Berg 已提交
2702
{
2703
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2704
	struct iwl_mvm_add_sta_cmd cmd = {
J
Johannes Berg 已提交
2705
		.add_modify = STA_MODE_MODIFY,
2706
		.sta_id = mvmsta->sta_id,
2707
		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
2708
		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
J
Johannes Berg 已提交
2709 2710 2711
	};
	int ret;

2712 2713
	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
J
Johannes Berg 已提交
2714 2715 2716 2717
	if (ret)
		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}

2718 2719
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
				       struct ieee80211_sta *sta,
J
Johannes Berg 已提交
2720
				       enum ieee80211_frame_release_type reason,
2721 2722
				       u16 cnt, u16 tids, bool more_data,
				       bool agg)
J
Johannes Berg 已提交
2723
{
2724
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2725
	struct iwl_mvm_add_sta_cmd cmd = {
J
Johannes Berg 已提交
2726
		.add_modify = STA_MODE_MODIFY,
2727
		.sta_id = mvmsta->sta_id,
J
Johannes Berg 已提交
2728 2729
		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
		.sleep_tx_count = cpu_to_le16(cnt),
2730
		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
J
Johannes Berg 已提交
2731
	};
2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
	int tid, ret;
	unsigned long _tids = tids;

	/* convert TIDs to ACs - we don't support TSPEC so that's OK
	 * Note that this field is reserved and unused by firmware not
	 * supporting GO uAPSD, so it's safe to always do this.
	 */
	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);

	/* If we're releasing frames from aggregation queues then check if the
	 * all queues combined that we're releasing frames from have
	 *  - more frames than the service period, in which case more_data
	 *    needs to be set
	 *  - fewer than 'cnt' frames, in which case we need to adjust the
	 *    firmware command (but do that unconditionally)
	 */
	if (agg) {
		int remaining = cnt;
2751
		int sleep_tx_count;
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775

		spin_lock_bh(&mvmsta->lock);
		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
			struct iwl_mvm_tid_data *tid_data;
			u16 n_queued;

			tid_data = &mvmsta->tid_data[tid];
			if (WARN(tid_data->state != IWL_AGG_ON &&
				 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
				 "TID %d state is %d\n",
				 tid, tid_data->state)) {
				spin_unlock_bh(&mvmsta->lock);
				ieee80211_sta_eosp(sta);
				return;
			}

			n_queued = iwl_mvm_tid_queued(tid_data);
			if (n_queued > remaining) {
				more_data = true;
				remaining = 0;
				break;
			}
			remaining -= n_queued;
		}
2776 2777 2778
		sleep_tx_count = cnt - remaining;
		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
			mvmsta->sleep_tx_count = sleep_tx_count;
2779 2780
		spin_unlock_bh(&mvmsta->lock);

2781
		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
		if (WARN_ON(cnt - remaining == 0)) {
			ieee80211_sta_eosp(sta);
			return;
		}
	}

	/* Note: this is ignored by firmware not supporting GO uAPSD */
	if (more_data)
		cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);

	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
		mvmsta->next_status_eosp = true;
		cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
	} else {
		cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
	}
J
Johannes Berg 已提交
2798

2799 2800 2801 2802 2803
	/* block the Tx queues until the FW updated the sleep Tx count */
	iwl_trans_block_txq_ptrs(mvm->trans, true);

	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
2804
				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
J
Johannes Berg 已提交
2805 2806 2807
	if (ret)
		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
2808

2809 2810
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
			   struct iwl_rx_cmd_buffer *rxb)
2811 2812 2813 2814 2815 2816 2817
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
	struct ieee80211_sta *sta;
	u32 sta_id = le32_to_cpu(notif->sta_id);

	if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
2818
		return;
2819 2820 2821 2822 2823 2824 2825

	rcu_read_lock();
	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
	if (!IS_ERR_OR_NULL(sta))
		ieee80211_sta_eosp(sta);
	rcu_read_unlock();
}
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838

void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
				   struct iwl_mvm_sta *mvmsta, bool disable)
{
	struct iwl_mvm_add_sta_cmd cmd = {
		.add_modify = STA_MODE_MODIFY,
		.sta_id = mvmsta->sta_id,
		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
	};
	int ret;

2839 2840
	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
2841 2842 2843
	if (ret)
		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860

void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
				      struct ieee80211_sta *sta,
				      bool disable)
{
	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);

	spin_lock_bh(&mvm_sta->lock);

	if (mvm_sta->disable_tx == disable) {
		spin_unlock_bh(&mvm_sta->lock);
		return;
	}

	mvm_sta->disable_tx = disable;

	/*
S
Sara Sharon 已提交
2861 2862
	 * Tell mac80211 to start/stop queuing tx for this station,
	 * but don't stop queuing if there are still pending frames
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
	 * for this station.
	 */
	if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
		ieee80211_sta_block_awake(mvm->hw, sta, disable);

	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);

	spin_unlock_bh(&mvm_sta->lock);
}

void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
				       struct iwl_mvm_vif *mvmvif,
				       bool disable)
{
	struct ieee80211_sta *sta;
	struct iwl_mvm_sta *mvm_sta;
	int i;

	lockdep_assert_held(&mvm->mutex);

	/* Block/unblock all the stations of the given mvmvif */
	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
						lockdep_is_held(&mvm->mutex));
		if (IS_ERR_OR_NULL(sta))
			continue;

		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
		if (mvm_sta->mac_id_n_color !=
		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
			continue;

		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
	}
}
2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912

void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
	struct iwl_mvm_sta *mvmsta;

	rcu_read_lock();

	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);

	if (!WARN_ON(!mvmsta))
		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);

	rcu_read_unlock();
}