iwl-agn-lib.c 67.2 KB
Newer Older
1 2 3 4
/******************************************************************************
 *
 * GPL LICENSE SUMMARY
 *
W
Wey-Yi Guy 已提交
5
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30 31 32 33 34 35 36 37 38 39 40
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>

#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
41
#include "iwl-sta.h"
42

W
Wey-Yi Guy 已提交
43
static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
44 45 46 47 48
{
	return le32_to_cpup((__le32 *)&tx_resp->status +
			    tx_resp->frame_count) & MAX_SN;
}

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
{
	status &= TX_STATUS_MSK;

	switch (status) {
	case TX_STATUS_POSTPONE_DELAY:
		priv->_agn.reply_tx_stats.pp_delay++;
		break;
	case TX_STATUS_POSTPONE_FEW_BYTES:
		priv->_agn.reply_tx_stats.pp_few_bytes++;
		break;
	case TX_STATUS_POSTPONE_BT_PRIO:
		priv->_agn.reply_tx_stats.pp_bt_prio++;
		break;
	case TX_STATUS_POSTPONE_QUIET_PERIOD:
		priv->_agn.reply_tx_stats.pp_quiet_period++;
		break;
	case TX_STATUS_POSTPONE_CALC_TTAK:
		priv->_agn.reply_tx_stats.pp_calc_ttak++;
		break;
	case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
		priv->_agn.reply_tx_stats.int_crossed_retry++;
		break;
	case TX_STATUS_FAIL_SHORT_LIMIT:
		priv->_agn.reply_tx_stats.short_limit++;
		break;
	case TX_STATUS_FAIL_LONG_LIMIT:
		priv->_agn.reply_tx_stats.long_limit++;
		break;
	case TX_STATUS_FAIL_FIFO_UNDERRUN:
		priv->_agn.reply_tx_stats.fifo_underrun++;
		break;
	case TX_STATUS_FAIL_DRAIN_FLOW:
		priv->_agn.reply_tx_stats.drain_flow++;
		break;
	case TX_STATUS_FAIL_RFKILL_FLUSH:
		priv->_agn.reply_tx_stats.rfkill_flush++;
		break;
	case TX_STATUS_FAIL_LIFE_EXPIRE:
		priv->_agn.reply_tx_stats.life_expire++;
		break;
	case TX_STATUS_FAIL_DEST_PS:
		priv->_agn.reply_tx_stats.dest_ps++;
		break;
	case TX_STATUS_FAIL_HOST_ABORTED:
		priv->_agn.reply_tx_stats.host_abort++;
		break;
	case TX_STATUS_FAIL_BT_RETRY:
		priv->_agn.reply_tx_stats.bt_retry++;
		break;
	case TX_STATUS_FAIL_STA_INVALID:
		priv->_agn.reply_tx_stats.sta_invalid++;
		break;
	case TX_STATUS_FAIL_FRAG_DROPPED:
		priv->_agn.reply_tx_stats.frag_drop++;
		break;
	case TX_STATUS_FAIL_TID_DISABLE:
		priv->_agn.reply_tx_stats.tid_disable++;
		break;
	case TX_STATUS_FAIL_FIFO_FLUSHED:
		priv->_agn.reply_tx_stats.fifo_flush++;
		break;
	case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
		priv->_agn.reply_tx_stats.insuff_cf_poll++;
		break;
114
	case TX_STATUS_FAIL_PASSIVE_NO_RX:
115 116
		priv->_agn.reply_tx_stats.fail_hw_drop++;
		break;
117
	case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
118 119 120 121 122 123 124 125
		priv->_agn.reply_tx_stats.sta_color_mismatch++;
		break;
	default:
		priv->_agn.reply_tx_stats.unknown++;
		break;
	}
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
{
	status &= AGG_TX_STATUS_MSK;

	switch (status) {
	case AGG_TX_STATE_UNDERRUN_MSK:
		priv->_agn.reply_agg_tx_stats.underrun++;
		break;
	case AGG_TX_STATE_BT_PRIO_MSK:
		priv->_agn.reply_agg_tx_stats.bt_prio++;
		break;
	case AGG_TX_STATE_FEW_BYTES_MSK:
		priv->_agn.reply_agg_tx_stats.few_bytes++;
		break;
	case AGG_TX_STATE_ABORT_MSK:
		priv->_agn.reply_agg_tx_stats.abort++;
		break;
	case AGG_TX_STATE_LAST_SENT_TTL_MSK:
		priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
		break;
	case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
		priv->_agn.reply_agg_tx_stats.last_sent_try++;
		break;
	case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
		priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
		break;
	case AGG_TX_STATE_SCD_QUERY_MSK:
		priv->_agn.reply_agg_tx_stats.scd_query++;
		break;
	case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
		priv->_agn.reply_agg_tx_stats.bad_crc32++;
		break;
	case AGG_TX_STATE_RESPONSE_MSK:
		priv->_agn.reply_agg_tx_stats.response++;
		break;
	case AGG_TX_STATE_DUMP_TX_MSK:
		priv->_agn.reply_agg_tx_stats.dump_tx++;
		break;
	case AGG_TX_STATE_DELAY_TX_MSK:
		priv->_agn.reply_agg_tx_stats.delay_tx++;
		break;
	default:
		priv->_agn.reply_agg_tx_stats.unknown++;
		break;
	}
}

W
Wey-Yi Guy 已提交
173 174
static void iwlagn_set_tx_status(struct iwl_priv *priv,
				 struct ieee80211_tx_info *info,
175
				 struct iwl_rxon_context *ctx,
W
Wey-Yi Guy 已提交
176
				 struct iwlagn_tx_resp *tx_resp,
W
Wey-Yi Guy 已提交
177 178 179 180 181 182 183 184 185 186
				 int txq_id, bool is_agg)
{
	u16  status = le16_to_cpu(tx_resp->status.status);

	info->status.rates[0].count = tx_resp->failure_frame + 1;
	if (is_agg)
		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
	info->flags |= iwl_tx_status_to_mac80211(status);
	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
				    info);
187 188
	if (!iwl_is_tx_success(status))
		iwlagn_count_tx_err_status(priv, status);
W
Wey-Yi Guy 已提交
189

190 191 192 193 194 195 196
	if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
	    iwl_is_associated_ctx(ctx) && ctx->vif &&
	    ctx->vif->type == NL80211_IFTYPE_STATION) {
		ctx->last_tx_rejected = true;
		iwl_stop_queue(priv, &priv->txq[txq_id]);
	}

W
Wey-Yi Guy 已提交
197 198 199 200 201 202 203 204
	IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
			   "0x%x retries %d\n",
			   txq_id,
			   iwl_get_tx_fail_reason(status), status,
			   le32_to_cpu(tx_resp->rate_n_flags),
			   tx_resp->failure_frame);
}

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
#ifdef CONFIG_IWLWIFI_DEBUG
#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x

const char *iwl_get_agg_tx_fail_reason(u16 status)
{
	status &= AGG_TX_STATUS_MSK;
	switch (status) {
	case AGG_TX_STATE_TRANSMITTED:
		return "SUCCESS";
		AGG_TX_STATE_FAIL(UNDERRUN_MSK);
		AGG_TX_STATE_FAIL(BT_PRIO_MSK);
		AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
		AGG_TX_STATE_FAIL(ABORT_MSK);
		AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
		AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
		AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
		AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
		AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
		AGG_TX_STATE_FAIL(RESPONSE_MSK);
		AGG_TX_STATE_FAIL(DUMP_TX_MSK);
		AGG_TX_STATE_FAIL(DELAY_TX_MSK);
	}

	return "UNKNOWN";
}
#endif /* CONFIG_IWLWIFI_DEBUG */

232 233
static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
				      struct iwl_ht_agg *agg,
W
Wey-Yi Guy 已提交
234
				      struct iwlagn_tx_resp *tx_resp,
235 236 237 238 239 240 241 242 243 244 245 246 247
				      int txq_id, u16 start_idx)
{
	u16 status;
	struct agg_tx_status *frame_status = &tx_resp->status;
	struct ieee80211_hdr *hdr = NULL;
	int i, sh, idx;
	u16 seq;

	if (agg->wait_for_ba)
		IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");

	agg->frame_count = tx_resp->frame_count;
	agg->start_idx = start_idx;
W
Wey-Yi Guy 已提交
248
	agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
249 250 251 252
	agg->bitmap = 0;

	/* # frames attempted by Tx command */
	if (agg->frame_count == 1) {
253 254
		struct iwl_tx_info *txb;

255 256 257 258 259
		/* Only one frame was attempted; no block-ack will arrive */
		idx = start_idx;

		IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
				   agg->frame_count, agg->start_idx, idx);
260 261 262
		txb = &priv->txq[txq_id].txb[idx];
		iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
				     txb->ctx, tx_resp, txq_id, true);
263 264 265 266
		agg->wait_for_ba = 0;
	} else {
		/* Two or more frames were attempted; expect block-ack */
		u64 bitmap = 0;
267 268 269 270 271 272

		/*
		 * Start is the lowest frame sent. It may not be the first
		 * frame in the batch; we figure this out dynamically during
		 * the following loop.
		 */
273 274 275 276 277 278 279 280 281 282
		int start = agg->start_idx;

		/* Construct bit-map of pending frames within Tx window */
		for (i = 0; i < agg->frame_count; i++) {
			u16 sc;
			status = le16_to_cpu(frame_status[i].status);
			seq  = le16_to_cpu(frame_status[i].sequence);
			idx = SEQ_TO_INDEX(seq);
			txq_id = SEQ_TO_QUEUE(seq);

283 284 285
			if (status & AGG_TX_STATUS_MSK)
				iwlagn_count_agg_tx_err_status(priv, status);

286 287 288 289 290 291
			if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
				      AGG_TX_STATE_ABORT_MSK))
				continue;

			IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
					   agg->frame_count, txq_id, idx);
292 293 294 295 296
			IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
					   "try-count (0x%08x)\n",
					   iwl_get_agg_tx_fail_reason(status),
					   status & AGG_TX_STATUS_MSK,
					   status & AGG_TX_TRY_MSK);
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318

			hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
			if (!hdr) {
				IWL_ERR(priv,
					"BUG_ON idx doesn't point to valid skb"
					" idx=%d, txq_id=%d\n", idx, txq_id);
				return -1;
			}

			sc = le16_to_cpu(hdr->seq_ctrl);
			if (idx != (SEQ_TO_SN(sc) & 0xff)) {
				IWL_ERR(priv,
					"BUG_ON idx doesn't match seq control"
					" idx=%d, seq_idx=%d, seq=%d\n",
					  idx, SEQ_TO_SN(sc),
					  hdr->seq_ctrl);
				return -1;
			}

			IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
					   i, idx, SEQ_TO_SN(sc));

319 320 321 322 323 324 325 326 327
			/*
			 * sh -> how many frames ahead of the starting frame is
			 * the current one?
			 *
			 * Note that all frames sent in the batch must be in a
			 * 64-frame window, so this number should be in [0,63].
			 * If outside of this window, then we've found a new
			 * "first" frame in the batch and need to change start.
			 */
328
			sh = idx - start;
329 330 331 332 333 334 335 336 337 338

			/*
			 * If >= 64, out of window. start must be at the front
			 * of the circular buffer, idx must be near the end of
			 * the buffer, and idx is the new "first" frame. Shift
			 * the indices around.
			 */
			if (sh >= 64) {
				/* Shift bitmap by start - idx, wrapped */
				sh = 0x100 - idx + start;
339
				bitmap = bitmap << sh;
340
				/* Now idx is the new start so sh = 0 */
341 342
				sh = 0;
				start = idx;
343 344 345 346 347 348 349 350 351 352 353 354
			/*
			 * If <= -64 then wraps the 256-pkt circular buffer
			 * (e.g., start = 255 and idx = 0, sh should be 1)
			 */
			} else if (sh <= -64) {
				sh  = 0x100 - start + idx;
			/*
			 * If < 0 but > -64, out of window. idx is before start
			 * but not wrapped. Shift the indices around.
			 */
			} else if (sh < 0) {
				/* Shift by how far start is ahead of idx */
355 356
				sh = start - idx;
				bitmap = bitmap << sh;
357 358
				/* Now idx is the new start so sh = 0 */
				start = idx;
359 360
				sh = 0;
			}
361
			/* Sequence number start + sh was sent in this batch */
362 363 364 365 366
			bitmap |= 1ULL << sh;
			IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
					   start, (unsigned long long)bitmap);
		}

367 368 369 370
		/*
		 * Store the bitmap and possibly the new start, if we wrapped
		 * the buffer above
		 */
371 372 373 374 375 376 377 378 379 380 381 382
		agg->bitmap = bitmap;
		agg->start_idx = start;
		IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
				   agg->frame_count, agg->start_idx,
				   (unsigned long long)agg->bitmap);

		if (bitmap)
			agg->wait_for_ba = 1;
	}
	return 0;
}

383 384 385 386
void iwl_check_abort_status(struct iwl_priv *priv,
			    u8 frame_count, u32 status)
{
	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
387 388 389
		IWL_ERR(priv, "Tx flush command to flush out all frames\n");
		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
			queue_work(priv->workqueue, &priv->tx_flush);
390 391 392
	}
}

393 394 395 396 397 398 399 400 401
static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
				struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct ieee80211_tx_info *info;
W
Wey-Yi Guy 已提交
402
	struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
403 404
	struct iwl_tx_info *txb;
	u32 status = le16_to_cpu(tx_resp->status.status);
405 406 407
	int tid;
	int sta_id;
	int freed;
408
	unsigned long flags;
409 410

	if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
411 412 413
		IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
			  "index %d is out of range [0-%d] %d %d\n", __func__,
			  txq_id, index, txq->q.n_bd, txq->q.write_ptr,
414 415 416 417
			  txq->q.read_ptr);
		return;
	}

418
	txq->time_stamp = jiffies;
419 420
	txb = &txq->txb[txq->q.read_ptr];
	info = IEEE80211_SKB_CB(txb->skb);
421 422
	memset(&info->status, 0, sizeof(info->status));

W
Wey-Yi Guy 已提交
423 424 425 426
	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
		IWLAGN_TX_RES_TID_POS;
	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
		IWLAGN_TX_RES_RA_POS;
427

428
	spin_lock_irqsave(&priv->sta_lock, flags);
429 430
	if (txq->sched_retry) {
		const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
431
		struct iwl_ht_agg *agg;
432 433

		agg = &priv->stations[sta_id].tid[tid].agg;
434 435 436 437 438
		/*
		 * If the BT kill count is non-zero, we'll get this
		 * notification again.
		 */
		if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
439 440
		    priv->cfg->bt_params &&
		    priv->cfg->bt_params->advanced_bt_coexist) {
441
			IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
442
		}
443 444 445 446 447 448 449 450 451 452 453 454
		iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);

		/* check if BAR is needed */
		if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;

		if (txq->q.read_ptr != (scd_ssn & 0xff)) {
			index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
			IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
					"scd_ssn=%d idx=%d txq=%d swq=%d\n",
					scd_ssn , index, txq_id, txq->swq_id);

455
			freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
456 457 458 459
			iwl_free_tfds_in_queue(priv, sta_id, tid, freed);

			if (priv->mac80211_registered &&
			    (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
460
			    (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
461
				iwl_wake_queue(priv, txq);
462 463
		}
	} else {
464 465
		iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
				     txq_id, false);
466
		freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
467 468 469
		iwl_free_tfds_in_queue(priv, sta_id, tid, freed);

		if (priv->mac80211_registered &&
470 471
		    iwl_queue_space(&txq->q) > txq->q.low_mark &&
		    status != TX_STATUS_FAIL_PASSIVE_NO_RX)
472
			iwl_wake_queue(priv, txq);
473 474
	}

475
	iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
476

477
	iwl_check_abort_status(priv, tx_resp->frame_count, status);
478
	spin_unlock_irqrestore(&priv->sta_lock, flags);
479 480 481 482 483 484 485 486
}

void iwlagn_rx_handler_setup(struct iwl_priv *priv)
{
	/* init calibration handlers */
	priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
					iwlagn_rx_calib_result;
	priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
487 488 489 490 491

	/* set up notification wait support */
	spin_lock_init(&priv->_agn.notif_wait_lock);
	INIT_LIST_HEAD(&priv->_agn.notif_waits);
	init_waitqueue_head(&priv->_agn.notif_waitq);
492 493 494 495
}

void iwlagn_setup_deferred_work(struct iwl_priv *priv)
{
496 497 498 499
	/*
	 * nothing need to be done here anymore
	 * still keep for future use if needed
	 */
500 501 502 503 504 505 506 507 508 509
}

int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
	return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
		(addr < IWLAGN_RTC_DATA_UPPER_BOUND);
}

int iwlagn_send_tx_power(struct iwl_priv *priv)
{
510
	struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
511 512
	u8 tx_ant_cfg_cmd;

513 514 515 516
	if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
		      "TX Power requested while scanning!\n"))
		return -EAGAIN;

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
	/* half dBm need to multiply */
	tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);

	if (priv->tx_power_lmt_in_half_dbm &&
	    priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
		/*
		 * For the newer devices which using enhanced/extend tx power
		 * table in EEPROM, the format is in half dBm. driver need to
		 * convert to dBm format before report to mac80211.
		 * By doing so, there is a possibility of 1/2 dBm resolution
		 * lost. driver will perform "round-up" operation before
		 * reporting, but it will cause 1/2 dBm tx power over the
		 * regulatory limit. Perform the checking here, if the
		 * "tx_power_user_lmt" is higher than EEPROM value (in
		 * half-dBm format), lower the tx power based on EEPROM
		 */
		tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
	}
535 536
	tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
	tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
537 538 539 540 541 542

	if (IWL_UCODE_API(priv->ucode_ver) == 1)
		tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
	else
		tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;

543 544
	return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
				&tx_power_cmd);
545 546 547 548
}

void iwlagn_temperature(struct iwl_priv *priv)
{
549
	/* store temperature from correct statistics (in Celsius) */
550
	priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
551 552 553 554 555 556 557 558 559 560 561 562
	iwl_tt_handler(priv);
}

u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
{
	struct iwl_eeprom_calib_hdr {
		u8 version;
		u8 pa_type;
		u16 voltage;
	} *hdr;

	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
563
							EEPROM_CALIB_ALL);
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
	return hdr->version;

}

/*
 * EEPROM
 */
static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
{
	u16 offset = 0;

	if ((address & INDIRECT_ADDRESS) == 0)
		return address;

	switch (address & INDIRECT_TYPE_MSK) {
	case INDIRECT_HOST:
580
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
581 582
		break;
	case INDIRECT_GENERAL:
583
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
584 585
		break;
	case INDIRECT_REGULATORY:
586
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
587
		break;
588 589 590 591 592 593
	case INDIRECT_TXP_LIMIT:
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
		break;
	case INDIRECT_TXP_LIMIT_SIZE:
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
		break;
594
	case INDIRECT_CALIBRATION:
595
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
596 597
		break;
	case INDIRECT_PROCESS_ADJST:
598
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
599 600
		break;
	case INDIRECT_OTHERS:
601
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
		break;
	default:
		IWL_ERR(priv, "illegal indirect type: 0x%X\n",
		address & INDIRECT_TYPE_MSK);
		break;
	}

	/* translate the offset from words to byte */
	return (address & ADDRESS_MSK) + (offset << 1);
}

const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
					   size_t offset)
{
	u32 address = eeprom_indirect_address(priv, offset);
617
	BUG_ON(address >= priv->cfg->base_params->eeprom_size);
618 619
	return &priv->eeprom[address];
}
620 621 622 623

struct iwl_mod_params iwlagn_mod_params = {
	.amsdu_size_8K = 1,
	.restart_fw = 1,
624
	.plcp_check = true,
625
	.bt_coex_active = true,
626
	.no_sleep_autoadjust = true,
627
	.power_level = IWL_POWER_INDEX_1,
628 629
	/* the rest are 0 by default */
};
630 631 632 633 634 635 636

int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
	u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */

W
Wey-Yi Guy 已提交
637
	rb_timeout = RX_RB_TIMEOUT;
638

D
Don Fry 已提交
639
	if (iwlagn_mod_params.amsdu_size_8K)
640 641 642 643 644 645 646 647 648 649 650 651
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
652
			   (u32)(rxq->bd_dma >> 8));
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
			   rb_size|
			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);

	return 0;
}

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
{
/*
 * (for documentation purposes)
 * to set power to V_AUX, do:

		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
			iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
 */

	iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
			       ~APMG_PS_CTRL_MSK_PWR_SRC);
}

698 699 700 701 702 703 704 705 706 707 708 709 710 711
int iwlagn_hw_nic_init(struct iwl_priv *priv)
{
	unsigned long flags;
	struct iwl_rx_queue *rxq = &priv->rxq;

	/* nic_init */
	spin_lock_irqsave(&priv->lock, flags);
	priv->cfg->ops->lib->apm_ops.init(priv);

	/* Set interrupt coalescing calibration timer to default (512 usecs) */
	iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);

	spin_unlock_irqrestore(&priv->lock, flags);

712
	iwlagn_set_pwr_vmain(priv);
713 714 715 716

	priv->cfg->ops->lib->apm_ops.config(priv);

	/* Allocate the RX queue, or reset if it is already allocated */
717
	priv->trans.ops->rx_init(priv);
718

719
	iwlagn_rx_replenish(priv);
720 721 722 723 724 725 726 727 728 729

	iwlagn_rx_init(priv, rxq);

	spin_lock_irqsave(&priv->lock, flags);

	rxq->need_update = 1;
	iwl_rx_queue_update_write_ptr(priv, rxq);

	spin_unlock_irqrestore(&priv->lock, flags);

730
	/* Allocate or reset and init all Tx and Command queues */
731 732
	if (priv->trans.ops->tx_init(priv))
		return -ENOMEM;
733

W
Wey-Yi Guy 已提交
734 735 736 737 738 739
	if (priv->cfg->base_params->shadow_reg_enable) {
		/* enable shadow regs in HW */
		iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
			0x800FFFFF);
	}

740 741 742 743
	set_bit(STATUS_INIT, &priv->status);

	return 0;
}
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773

/**
 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
					  dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

/**
 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
void iwlagn_rx_queue_restock(struct iwl_priv *priv)
{
	struct iwl_rx_queue *rxq = &priv->rxq;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	unsigned long flags;

	spin_lock_irqsave(&rxq->lock, flags);
	while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
774 775 776 777
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
		/* Get next free Rx buffer, remove from free list */
		element = rxq->rx_free.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		/* Point to Rx buffer via next RBD in circular buffer */
		rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
							      rxb->page_dma);
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
	spin_unlock_irqrestore(&rxq->lock, flags);
	/* If the pre-allocated buffer pool is dropping low, schedule to
	 * refill it */
	if (rxq->free_count <= RX_LOW_WATERMARK)
		queue_work(priv->workqueue, &priv->rx_replenish);


	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock_irqsave(&rxq->lock, flags);
		rxq->need_update = 1;
		spin_unlock_irqrestore(&rxq->lock, flags);
		iwl_rx_queue_update_write_ptr(priv, rxq);
	}
}

/**
 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
 *
 * When moving to rx_free an SKB is allocated for the slot.
 *
 * Also restock the Rx queue via iwl_rx_queue_restock.
 * This is called as a scheduled work item (except for during initialization)
 */
void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
{
	struct iwl_rx_queue *rxq = &priv->rxq;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

		if (priv->hw_params.rx_page_order > 0)
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
		if (!page) {
			if (net_ratelimit())
				IWL_DEBUG_INFO(priv, "alloc_pages failed, "
					       "order: %d\n",
					       priv->hw_params.rx_page_order);

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
				IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, priv->hw_params.rx_page_order);
			return;
		}
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		spin_unlock_irqrestore(&rxq->lock, flags);

870
		BUG_ON(rxb->page);
871 872
		rxb->page = page;
		/* Get physical address of the RB */
873
		rxb->page_dma = dma_map_page(priv->bus.dev, page, 0,
874
				PAGE_SIZE << priv->hw_params.rx_page_order,
875
				DMA_FROM_DEVICE);
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}

void iwlagn_rx_replenish(struct iwl_priv *priv)
{
	unsigned long flags;

	iwlagn_rx_allocate(priv, GFP_KERNEL);

	spin_lock_irqsave(&priv->lock, flags);
	iwlagn_rx_queue_restock(priv);
	spin_unlock_irqrestore(&priv->lock, flags);
}

void iwlagn_rx_replenish_now(struct iwl_priv *priv)
{
	iwlagn_rx_allocate(priv, GFP_ATOMIC);

	iwlagn_rx_queue_restock(priv);
}

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
	int idx = 0;
	int band_offset = 0;

	/* HT rate format: mac80211 wants an MCS number, which is just LSB */
	if (rate_n_flags & RATE_MCS_HT_MSK) {
		idx = (rate_n_flags & 0xff);
		return idx;
	/* Legacy rate format, search for match in table */
	} else {
		if (band == IEEE80211_BAND_5GHZ)
			band_offset = IWL_FIRST_OFDM_RATE;
		for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
			if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
				return idx - band_offset;
	}

	return -1;
}

929
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
930 931 932
					   struct ieee80211_vif *vif,
					   enum ieee80211_band band,
					   struct iwl_scan_channel *scan_ch)
933 934 935 936
{
	const struct ieee80211_supported_band *sband;
	u16 passive_dwell = 0;
	u16 active_dwell = 0;
A
Abhijeet Kolekar 已提交
937
	int added = 0;
938 939 940 941 942 943 944 945 946
	u16 channel = 0;

	sband = iwl_get_hw_mode(priv, band);
	if (!sband) {
		IWL_ERR(priv, "invalid band\n");
		return added;
	}

	active_dwell = iwl_get_active_dwell_time(priv, band, 0);
947
	passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
948 949 950 951

	if (passive_dwell <= active_dwell)
		passive_dwell = active_dwell + 1;

A
Abhijeet Kolekar 已提交
952
	channel = iwl_get_single_channel_number(priv, band);
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
	if (channel) {
		scan_ch->channel = cpu_to_le16(channel);
		scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
		scan_ch->active_dwell = cpu_to_le16(active_dwell);
		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
		/* Set txpower levels to defaults */
		scan_ch->dsp_atten = 110;
		if (band == IEEE80211_BAND_5GHZ)
			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
		else
			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
		added++;
	} else
		IWL_ERR(priv, "no valid channel found\n");
	return added;
}

static int iwl_get_channels_for_scan(struct iwl_priv *priv,
971
				     struct ieee80211_vif *vif,
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
				     enum ieee80211_band band,
				     u8 is_active, u8 n_probes,
				     struct iwl_scan_channel *scan_ch)
{
	struct ieee80211_channel *chan;
	const struct ieee80211_supported_band *sband;
	const struct iwl_channel_info *ch_info;
	u16 passive_dwell = 0;
	u16 active_dwell = 0;
	int added, i;
	u16 channel;

	sband = iwl_get_hw_mode(priv, band);
	if (!sband)
		return 0;

	active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
989
	passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
990 991 992 993 994 995 996 997 998 999

	if (passive_dwell <= active_dwell)
		passive_dwell = active_dwell + 1;

	for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
		chan = priv->scan_request->channels[i];

		if (chan->band != band)
			continue;

1000
		channel = chan->hw_value;
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
		scan_ch->channel = cpu_to_le16(channel);

		ch_info = iwl_get_channel_info(priv, band, channel);
		if (!is_channel_valid(ch_info)) {
			IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
					channel);
			continue;
		}

		if (!is_active || is_channel_passive(ch_info) ||
		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
		else
			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;

		if (n_probes)
			scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);

		scan_ch->active_dwell = cpu_to_le16(active_dwell);
		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);

		/* Set txpower levels to defaults */
		scan_ch->dsp_atten = 110;

		/* NOTE: if we were doing 6Mb OFDM for scans we'd use
		 * power level:
		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
		 */
		if (band == IEEE80211_BAND_5GHZ)
			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
		else
			scan_ch->tx_gain = ((1 << 5) | (5 << 3));

		IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
			       channel, le32_to_cpu(scan_ch->type),
			       (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
				"ACTIVE" : "PASSIVE",
			       (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
			       active_dwell : passive_dwell);

		scan_ch++;
		added++;
	}

	IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
	return added;
}

J
Johannes Berg 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
{
	struct sk_buff *skb = priv->_agn.offchan_tx_skb;

	if (skb->len < maxlen)
		maxlen = skb->len;

	memcpy(data, skb->data, maxlen);

	return maxlen;
}

J
Johannes Berg 已提交
1061
int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1062 1063 1064
{
	struct iwl_host_cmd cmd = {
		.id = REPLY_SCAN_CMD,
1065
		.len = { sizeof(struct iwl_scan_cmd), },
1066 1067
	};
	struct iwl_scan_cmd *scan;
1068
	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
	u32 rate_flags = 0;
	u16 cmd_len;
	u16 rx_chain = 0;
	enum ieee80211_band band;
	u8 n_probes = 0;
	u8 rx_ant = priv->hw_params.valid_rx_ant;
	u8 rate;
	bool is_active = false;
	int  chan_mod;
	u8 active_chains;
1079
	u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
J
Johannes Berg 已提交
1080 1081 1082
	int ret;

	lockdep_assert_held(&priv->mutex);
1083

1084 1085 1086
	if (vif)
		ctx = iwl_rxon_ctx_from_vif(vif);

1087 1088 1089 1090 1091 1092
	if (!priv->scan_cmd) {
		priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
					 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
		if (!priv->scan_cmd) {
			IWL_DEBUG_SCAN(priv,
				       "fail to allocate memory for scan\n");
J
Johannes Berg 已提交
1093
			return -ENOMEM;
1094 1095 1096 1097 1098 1099 1100 1101
		}
	}
	scan = priv->scan_cmd;
	memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);

	scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
	scan->quiet_time = IWL_ACTIVE_QUIET_TIME;

J
Johannes Berg 已提交
1102 1103
	if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
	    iwl_is_any_associated(priv)) {
1104 1105 1106 1107 1108 1109
		u16 interval = 0;
		u32 extra;
		u32 suspend_time = 100;
		u32 scan_suspend_time = 100;

		IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
J
Johannes Berg 已提交
1110 1111 1112 1113 1114
		switch (priv->scan_type) {
		case IWL_SCAN_OFFCH_TX:
			WARN_ON(1);
			break;
		case IWL_SCAN_RADIO_RESET:
1115
			interval = 0;
J
Johannes Berg 已提交
1116 1117
			break;
		case IWL_SCAN_NORMAL:
1118
			interval = vif->bss_conf.beacon_int;
J
Johannes Berg 已提交
1119 1120
			break;
		}
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132

		scan->suspend_time = 0;
		scan->max_out_time = cpu_to_le32(200 * 1024);
		if (!interval)
			interval = suspend_time;

		extra = (suspend_time / interval) << 22;
		scan_suspend_time = (extra |
		    ((suspend_time % interval) * 1024));
		scan->suspend_time = cpu_to_le32(scan_suspend_time);
		IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
			       scan_suspend_time, interval);
J
Johannes Berg 已提交
1133 1134 1135 1136
	} else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
		scan->suspend_time = 0;
		scan->max_out_time =
			cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
1137 1138
	}

J
Johannes Berg 已提交
1139 1140
	switch (priv->scan_type) {
	case IWL_SCAN_RADIO_RESET:
1141
		IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
J
Johannes Berg 已提交
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
		break;
	case IWL_SCAN_NORMAL:
		if (priv->scan_request->n_ssids) {
			int i, p = 0;
			IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
			for (i = 0; i < priv->scan_request->n_ssids; i++) {
				/* always does wildcard anyway */
				if (!priv->scan_request->ssids[i].ssid_len)
					continue;
				scan->direct_scan[p].id = WLAN_EID_SSID;
				scan->direct_scan[p].len =
					priv->scan_request->ssids[i].ssid_len;
				memcpy(scan->direct_scan[p].ssid,
				       priv->scan_request->ssids[i].ssid,
				       priv->scan_request->ssids[i].ssid_len);
				n_probes++;
				p++;
			}
			is_active = true;
		} else
			IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
		break;
	case IWL_SCAN_OFFCH_TX:
		IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
		break;
	}
1168 1169

	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1170
	scan->tx_cmd.sta_id = ctx->bcast_sta_id;
1171 1172 1173 1174 1175
	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;

	switch (priv->scan_band) {
	case IEEE80211_BAND_2GHZ:
		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1176 1177 1178
		chan_mod = le32_to_cpu(
			priv->contexts[IWL_RXON_CTX_BSS].active.flags &
						RXON_FLG_CHANNEL_MODE_MSK)
1179 1180 1181 1182 1183 1184 1185
				       >> RXON_FLG_CHANNEL_MODE_POS;
		if (chan_mod == CHANNEL_MODE_PURE_40) {
			rate = IWL_RATE_6M_PLCP;
		} else {
			rate = IWL_RATE_1M_PLCP;
			rate_flags = RATE_MCS_CCK_MSK;
		}
1186 1187 1188 1189
		/*
		 * Internal scans are passive, so we can indiscriminately set
		 * the BT ignore flag on 2.4 GHz since it applies to TX only.
		 */
1190 1191
		if (priv->cfg->bt_params &&
		    priv->cfg->bt_params->advanced_bt_coexist)
1192
			scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
1193 1194 1195 1196 1197
		break;
	case IEEE80211_BAND_5GHZ:
		rate = IWL_RATE_6M_PLCP;
		break;
	default:
J
Johannes Berg 已提交
1198 1199
		IWL_WARN(priv, "Invalid scan band\n");
		return -EIO;
1200 1201
	}

1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	/*
	 * If active scanning is requested but a certain channel is
	 * marked passive, we can do active scanning if we detect
	 * transmissions.
	 *
	 * There is an issue with some firmware versions that triggers
	 * a sysassert on a "good CRC threshold" of zero (== disabled),
	 * on a radar channel even though this means that we should NOT
	 * send probes.
	 *
	 * The "good CRC threshold" is the number of frames that we
	 * need to receive during our dwell time on a channel before
	 * sending out probes -- setting this to a huge value will
	 * mean we never reach it, but at the same time work around
	 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
	 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1218 1219 1220 1221
	 *
	 * This was fixed in later versions along with some other
	 * scan changes, and the threshold behaves as a flag in those
	 * versions.
1222
	 */
1223 1224 1225 1226 1227 1228
	if (priv->new_scan_threshold_behaviour)
		scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
						IWL_GOOD_CRC_TH_DISABLED;
	else
		scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
						IWL_GOOD_CRC_TH_NEVER;
1229

1230 1231
	band = priv->scan_band;

1232 1233
	if (priv->cfg->scan_rx_antennas[band])
		rx_ant = priv->cfg->scan_rx_antennas[band];
1234

1235 1236 1237 1238 1239
	if (band == IEEE80211_BAND_2GHZ &&
	    priv->cfg->bt_params &&
	    priv->cfg->bt_params->advanced_bt_coexist) {
		/* transmit 2.4 GHz probes only on first antenna */
		scan_tx_antennas = first_antenna(scan_tx_antennas);
1240 1241
	}

1242 1243
	priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
						    scan_tx_antennas);
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
	scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);

	/* In power save mode use one chain, otherwise use all chains */
	if (test_bit(STATUS_POWER_PMI, &priv->status)) {
		/* rx_ant has been set to all valid chains previously */
		active_chains = rx_ant &
				((u8)(priv->chain_noise_data.active_chains));
		if (!active_chains)
			active_chains = rx_ant;

		IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
				priv->chain_noise_data.active_chains);

		rx_ant = first_antenna(active_chains);
	}
1260 1261 1262
	if (priv->cfg->bt_params &&
	    priv->cfg->bt_params->advanced_bt_coexist &&
	    priv->bt_full_concurrent) {
1263 1264 1265 1266
		/* operated as 1x1 in full concurrency mode */
		rx_ant = first_antenna(rx_ant);
	}

1267 1268 1269 1270 1271 1272
	/* MIMO is not used here, but value is required */
	rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
	scan->rx_chain = cpu_to_le16(rx_chain);
J
Johannes Berg 已提交
1273 1274
	switch (priv->scan_type) {
	case IWL_SCAN_NORMAL:
1275 1276
		cmd_len = iwl_fill_probe_req(priv,
					(struct ieee80211_mgmt *)scan->data,
1277
					vif->addr,
1278 1279 1280
					priv->scan_request->ie,
					priv->scan_request->ie_len,
					IWL_MAX_SCAN_SIZE - sizeof(*scan));
J
Johannes Berg 已提交
1281 1282
		break;
	case IWL_SCAN_RADIO_RESET:
1283
		/* use bcast addr, will not be transmitted but must be valid */
1284 1285
		cmd_len = iwl_fill_probe_req(priv,
					(struct ieee80211_mgmt *)scan->data,
1286
					iwl_bcast_addr, NULL, 0,
1287
					IWL_MAX_SCAN_SIZE - sizeof(*scan));
J
Johannes Berg 已提交
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
		break;
	case IWL_SCAN_OFFCH_TX:
		cmd_len = iwl_fill_offch_tx(priv, scan->data,
					    IWL_MAX_SCAN_SIZE
					     - sizeof(*scan)
					     - sizeof(struct iwl_scan_channel));
		scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
		break;
	default:
		BUG();
1298 1299 1300 1301 1302 1303
	}
	scan->tx_cmd.len = cpu_to_le16(cmd_len);

	scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
			       RXON_FILTER_BCON_AWARE_MSK);

J
Johannes Berg 已提交
1304 1305
	switch (priv->scan_type) {
	case IWL_SCAN_RADIO_RESET:
1306
		scan->channel_count =
1307
			iwl_get_single_channel_for_scan(priv, vif, band,
J
Johannes Berg 已提交
1308 1309 1310
				(void *)&scan->data[cmd_len]);
		break;
	case IWL_SCAN_NORMAL:
1311
		scan->channel_count =
1312
			iwl_get_channels_for_scan(priv, vif, band,
1313
				is_active, n_probes,
J
Johannes Berg 已提交
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
				(void *)&scan->data[cmd_len]);
		break;
	case IWL_SCAN_OFFCH_TX: {
		struct iwl_scan_channel *scan_ch;

		scan->channel_count = 1;

		scan_ch = (void *)&scan->data[cmd_len];
		scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
		scan_ch->channel =
			cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
		scan_ch->active_dwell =
			cpu_to_le16(priv->_agn.offchan_tx_timeout);
		scan_ch->passive_dwell = 0;

		/* Set txpower levels to defaults */
		scan_ch->dsp_atten = 110;

		/* NOTE: if we were doing 6Mb OFDM for scans we'd use
		 * power level:
		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
		 */
		if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
		else
			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
		}
		break;
1342
	}
J
Johannes Berg 已提交
1343

1344 1345
	if (scan->channel_count == 0) {
		IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
J
Johannes Berg 已提交
1346
		return -EIO;
1347 1348
	}

1349
	cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
1350
	    scan->channel_count * sizeof(struct iwl_scan_channel);
1351
	cmd.data[0] = scan;
1352
	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1353
	scan->len = cpu_to_le16(cmd.len[0]);
1354

J
Johannes Berg 已提交
1355 1356 1357
	/* set scan bit here for PAN params */
	set_bit(STATUS_SCAN_HW, &priv->status);

W
Wey-Yi Guy 已提交
1358 1359 1360
	ret = iwlagn_set_pan_params(priv);
	if (ret)
		return ret;
1361

J
Johannes Berg 已提交
1362 1363 1364
	ret = iwl_send_cmd_sync(priv, &cmd);
	if (ret) {
		clear_bit(STATUS_SCAN_HW, &priv->status);
W
Wey-Yi Guy 已提交
1365
		iwlagn_set_pan_params(priv);
J
Johannes Berg 已提交
1366
	}
1367

J
Johannes Berg 已提交
1368
	return ret;
1369
}
1370 1371 1372 1373

int iwlagn_manage_ibss_station(struct iwl_priv *priv,
			       struct ieee80211_vif *vif, bool add)
{
J
Johannes Berg 已提交
1374 1375
	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;

1376
	if (add)
1377 1378 1379
		return iwlagn_add_bssid_station(priv, vif_priv->ctx,
						vif->bss_conf.bssid,
						&vif_priv->ibss_bssid_sta_id);
J
Johannes Berg 已提交
1380 1381
	return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
				  vif->bss_conf.bssid);
1382
}
1383 1384 1385 1386

void iwl_free_tfds_in_queue(struct iwl_priv *priv,
			    int sta_id, int tid, int freed)
{
J
Johannes Berg 已提交
1387
	lockdep_assert_held(&priv->sta_lock);
1388

1389 1390 1391 1392 1393 1394 1395 1396 1397
	if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
		priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
	else {
		IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
			priv->stations[sta_id].tid[tid].tfds_in_queue,
			freed);
		priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
	}
}
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410

#define IWL_FLUSH_WAIT_MS	2000

int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
{
	struct iwl_tx_queue *txq;
	struct iwl_queue *q;
	int cnt;
	unsigned long now = jiffies;
	int ret = 0;

	/* waiting for all the tx frames complete might take a while */
	for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1411
		if (cnt == priv->cmd_queue)
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
			continue;
		txq = &priv->txq[cnt];
		q = &txq->q;
		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
				msleep(1);

		if (q->read_ptr != q->write_ptr) {
			IWL_ERR(priv, "fail to flush all tx fifo queues\n");
			ret = -ETIMEDOUT;
			break;
		}
	}
	return ret;
}

#define IWL_TX_QUEUE_MSK	0xfffff

/**
 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
 *
 * pre-requirements:
 *  1. acquire mutex before calling
 *  2. make sure rf is on and not in exit state
 */
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
	struct iwl_txfifo_flush_cmd flush_cmd;
	struct iwl_host_cmd cmd = {
		.id = REPLY_TXFIFO_FLUSH,
1442
		.len = { sizeof(struct iwl_txfifo_flush_cmd), },
1443
		.flags = CMD_SYNC,
1444
		.data = { &flush_cmd, },
1445 1446 1447 1448 1449
	};

	might_sleep();

	memset(&flush_cmd, 0, sizeof(flush_cmd));
1450 1451
	if (flush_control & BIT(IWL_RXON_CTX_BSS))
		flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
W
Wey-Yi Guy 已提交
1452 1453
				 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
				 IWL_SCD_MGMT_MSK;
1454 1455
	if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
	    (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
W
Wey-Yi Guy 已提交
1456 1457 1458 1459 1460
		flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
				IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
				IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
				IWL_PAN_SCD_MULTICAST_MSK;

1461
	if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
1462 1463 1464 1465 1466 1467 1468 1469
		flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;

	IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
		       flush_cmd.fifo_control);
	flush_cmd.flush_control = cpu_to_le16(flush_control);

	return iwl_send_cmd(priv, &cmd);
}
1470 1471 1472 1473 1474

void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
	mutex_lock(&priv->mutex);
	ieee80211_stop_queues(priv->hw);
1475
	if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
1476 1477 1478 1479 1480 1481 1482 1483 1484
		IWL_ERR(priv, "flush request fail\n");
		goto done;
	}
	IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
	iwlagn_wait_tx_queue_empty(priv);
done:
	ieee80211_wake_queues(priv->hw);
	mutex_unlock(&priv->mutex);
}
W
Wey-Yi Guy 已提交
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569

/*
 * BT coex
 */
/*
 * Macros to access the lookup table.
 *
 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
* wifi_prio, wifi_txrx and wifi_sh_ant_req.
 *
 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
 *
 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
 * one after another in 32-bit registers, and "registers" 0 through 7 contain
 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
 *
 * These macros encode that format.
 */
#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
		  wifi_txrx, wifi_sh_ant_req) \
	(bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
	(wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))

#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
	lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
				 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	(!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
				   bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
				   wifi_sh_ant_req))))
#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
				wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
			       bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
			       wifi_sh_ant_req))
#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
				  wifi_req, wifi_prio, wifi_txrx, \
				  wifi_sh_ant_req) \
	LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
			       bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
			       wifi_sh_ant_req))

#define LUT_WLAN_KILL_OP(lut, op, val) \
	lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
			   wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	(!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
			     wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
			  wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
			 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
			    wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
			 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))

#define LUT_ANT_SWITCH_OP(lut, op, val) \
	lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
			    wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	(!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
			      wifi_req, wifi_prio, wifi_txrx, \
			      wifi_sh_ant_req))))
#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
			   wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
			  wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
			     wifi_prio, wifi_txrx, wifi_sh_ant_req) \
	LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
			  wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))

static const __le32 iwlagn_def_3w_lookup[12] = {
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaeaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xcc00ff28),
	cpu_to_le32(0x0000aaaa),
	cpu_to_le32(0xcc00aaaa),
	cpu_to_le32(0x0000aaaa),
	cpu_to_le32(0xc0004000),
	cpu_to_le32(0x00004000),
	cpu_to_le32(0xf0005000),
1570
	cpu_to_le32(0xf0005000),
W
Wey-Yi Guy 已提交
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
};

static const __le32 iwlagn_concurrent_lookup[12] = {
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0xaaaaaaaa),
	cpu_to_le32(0x00000000),
	cpu_to_le32(0x00000000),
	cpu_to_le32(0x00000000),
	cpu_to_le32(0x00000000),
};

void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
{
1590
	struct iwl_basic_bt_cmd basic = {
W
Wey-Yi Guy 已提交
1591 1592 1593 1594 1595
		.max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
		.bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
		.bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
		.bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
	};
1596 1597 1598
	struct iwl6000_bt_cmd bt_cmd_6000;
	struct iwl2000_bt_cmd bt_cmd_2000;
	int ret;
W
Wey-Yi Guy 已提交
1599 1600

	BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
			sizeof(basic.bt3_lookup_table));

	if (priv->cfg->bt_params) {
		if (priv->cfg->bt_params->bt_session_2) {
			bt_cmd_2000.prio_boost = cpu_to_le32(
				priv->cfg->bt_params->bt_prio_boost);
			bt_cmd_2000.tx_prio_boost = 0;
			bt_cmd_2000.rx_prio_boost = 0;
		} else {
			bt_cmd_6000.prio_boost =
				priv->cfg->bt_params->bt_prio_boost;
			bt_cmd_6000.tx_prio_boost = 0;
			bt_cmd_6000.rx_prio_boost = 0;
		}
	} else {
		IWL_ERR(priv, "failed to construct BT Coex Config\n");
		return;
	}
1619

1620 1621 1622
	basic.kill_ack_mask = priv->kill_ack_mask;
	basic.kill_cts_mask = priv->kill_cts_mask;
	basic.valid = priv->bt_valid;
W
Wey-Yi Guy 已提交
1623 1624 1625 1626 1627 1628 1629

	/*
	 * Configure BT coex mode to "no coexistence" when the
	 * user disabled BT coexistence, we have no interface
	 * (might be in monitor mode), or the interface is in
	 * IBSS mode (no proper uCode support for coex then).
	 */
1630 1631
	if (!iwlagn_mod_params.bt_coex_active ||
	    priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1632
		basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
W
Wey-Yi Guy 已提交
1633
	} else {
1634
		basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
W
Wey-Yi Guy 已提交
1635
					IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1636 1637
		if (priv->cfg->bt_params &&
		    priv->cfg->bt_params->bt_sco_disable)
1638
			basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1639

W
Wey-Yi Guy 已提交
1640
		if (priv->bt_ch_announce)
1641
			basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1642
		IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
W
Wey-Yi Guy 已提交
1643
	}
1644
	priv->bt_enable_flag = basic.flags;
W
Wey-Yi Guy 已提交
1645
	if (priv->bt_full_concurrent)
1646
		memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
W
Wey-Yi Guy 已提交
1647 1648
			sizeof(iwlagn_concurrent_lookup));
	else
1649
		memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
W
Wey-Yi Guy 已提交
1650 1651
			sizeof(iwlagn_def_3w_lookup));

1652
	IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
1653
		       basic.flags ? "active" : "disabled",
W
Wey-Yi Guy 已提交
1654 1655 1656
		       priv->bt_full_concurrent ?
		       "full concurrency" : "3-wire");

1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	if (priv->cfg->bt_params->bt_session_2) {
		memcpy(&bt_cmd_2000.basic, &basic,
			sizeof(basic));
		ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
			sizeof(bt_cmd_2000), &bt_cmd_2000);
	} else {
		memcpy(&bt_cmd_6000.basic, &basic,
			sizeof(basic));
		ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
			sizeof(bt_cmd_6000), &bt_cmd_6000);
	}
	if (ret)
W
Wey-Yi Guy 已提交
1669 1670 1671 1672 1673 1674 1675 1676
		IWL_ERR(priv, "failed to send BT Coex Config\n");

}

static void iwlagn_bt_traffic_change_work(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, bt_traffic_change_work);
1677
	struct iwl_rxon_context *ctx;
W
Wey-Yi Guy 已提交
1678 1679
	int smps_request = -1;

1680 1681 1682 1683 1684
	if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
		/* bt coex disabled */
		return;
	}

1685 1686 1687 1688 1689
	/*
	 * Note: bt_traffic_load can be overridden by scan complete and
	 * coex profile notifications. Ignore that since only bad consequence
	 * can be not matching debug print with actual state.
	 */
1690
	IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
W
Wey-Yi Guy 已提交
1691 1692 1693 1694
		       priv->bt_traffic_load);

	switch (priv->bt_traffic_load) {
	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1695 1696 1697 1698
		if (priv->bt_status)
			smps_request = IEEE80211_SMPS_DYNAMIC;
		else
			smps_request = IEEE80211_SMPS_AUTOMATIC;
W
Wey-Yi Guy 已提交
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
		break;
	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
		smps_request = IEEE80211_SMPS_DYNAMIC;
		break;
	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
		smps_request = IEEE80211_SMPS_STATIC;
		break;
	default:
		IWL_ERR(priv, "Invalid BT traffic load: %d\n",
			priv->bt_traffic_load);
		break;
	}

	mutex_lock(&priv->mutex);

1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
	/*
	 * We can not send command to firmware while scanning. When the scan
	 * complete we will schedule this work again. We do check with mutex
	 * locked to prevent new scan request to arrive. We do not check
	 * STATUS_SCANNING to avoid race when queue_work two times from
	 * different notifications, but quit and not perform any work at all.
	 */
	if (test_bit(STATUS_SCAN_HW, &priv->status))
		goto out;

W
Wey-Yi Guy 已提交
1725 1726 1727
	if (priv->cfg->ops->lib->update_chain_flags)
		priv->cfg->ops->lib->update_chain_flags(priv);

1728
	if (smps_request != -1) {
W
Wey-Yi Guy 已提交
1729
		priv->current_ht_config.smps = smps_request;
1730 1731 1732 1733 1734
		for_each_context(priv, ctx) {
			if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
				ieee80211_request_smps(ctx->vif, smps_request);
		}
	}
1735
out:
W
Wey-Yi Guy 已提交
1736 1737 1738 1739 1740 1741
	mutex_unlock(&priv->mutex);
}

static void iwlagn_print_uartmsg(struct iwl_priv *priv,
				struct iwl_bt_uart_msg *uart_msg)
{
1742
	IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
W
Wey-Yi Guy 已提交
1743 1744 1745 1746 1747 1748 1749 1750
			"Update Req = 0x%X",
		(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
			BT_UART_MSG_FRAME1MSGTYPE_POS,
		(BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
			BT_UART_MSG_FRAME1SSN_POS,
		(BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
			BT_UART_MSG_FRAME1UPDATEREQ_POS);

1751
	IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
W
Wey-Yi Guy 已提交
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
			"Chl_SeqN = 0x%X, In band = 0x%X",
		(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
			BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
		(BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
			BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
		(BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
			BT_UART_MSG_FRAME2CHLSEQN_POS,
		(BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
			BT_UART_MSG_FRAME2INBAND_POS);

1762
	IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
W
Wey-Yi Guy 已提交
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
			"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
		(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
			BT_UART_MSG_FRAME3SCOESCO_POS,
		(BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
			BT_UART_MSG_FRAME3SNIFF_POS,
		(BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
			BT_UART_MSG_FRAME3A2DP_POS,
		(BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
			BT_UART_MSG_FRAME3ACL_POS,
		(BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
			BT_UART_MSG_FRAME3MASTER_POS,
		(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
			BT_UART_MSG_FRAME3OBEX_POS);

1777
	IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
W
Wey-Yi Guy 已提交
1778 1779 1780
		(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
			BT_UART_MSG_FRAME4IDLEDURATION_POS);

1781
	IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
W
Wey-Yi Guy 已提交
1782 1783 1784 1785 1786 1787 1788 1789
			"eSCO Retransmissions = 0x%X",
		(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
			BT_UART_MSG_FRAME5TXACTIVITY_POS,
		(BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
			BT_UART_MSG_FRAME5RXACTIVITY_POS,
		(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
			BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);

1790
	IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
W
Wey-Yi Guy 已提交
1791 1792 1793 1794 1795
		(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
			BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
		(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
			BT_UART_MSG_FRAME6DISCOVERABLE_POS);

1796
	IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
1797
			"0x%X, Inquiry = 0x%X, Connectable = 0x%X",
W
Wey-Yi Guy 已提交
1798 1799
		(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
			BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1800 1801 1802 1803
		(BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
			BT_UART_MSG_FRAME7PAGE_POS,
		(BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
			BT_UART_MSG_FRAME7INQUIRY_POS,
W
Wey-Yi Guy 已提交
1804 1805 1806 1807
		(BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
			BT_UART_MSG_FRAME7CONNECTABLE_POS);
}

1808 1809
static void iwlagn_set_kill_msk(struct iwl_priv *priv,
				struct iwl_bt_uart_msg *uart_msg)
W
Wey-Yi Guy 已提交
1810
{
1811
	u8 kill_msk;
J
Joe Perches 已提交
1812
	static const __le32 bt_kill_ack_msg[2] = {
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
		IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
	static const __le32 bt_kill_cts_msg[2] = {
		IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };

	kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
		? 1 : 0;
	if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
	    priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
W
Wey-Yi Guy 已提交
1823
		priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
1824 1825 1826 1827
		priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
		priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
		priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];

W
Wey-Yi Guy 已提交
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
		/* schedule to send runtime bt_config */
		queue_work(priv->workqueue, &priv->bt_runtime_config);
	}
}

void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
					     struct iwl_rx_mem_buffer *rxb)
{
	unsigned long flags;
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
	struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;

1841 1842 1843 1844 1845
	if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
		/* bt coex disabled */
		return;
	}

1846 1847 1848 1849
	IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
	IWL_DEBUG_COEX(priv, "    status: %d\n", coex->bt_status);
	IWL_DEBUG_COEX(priv, "    traffic load: %d\n", coex->bt_traffic_load);
	IWL_DEBUG_COEX(priv, "    CI compliance: %d\n",
W
Wey-Yi Guy 已提交
1850 1851 1852
			coex->bt_ci_compliance);
	iwlagn_print_uartmsg(priv, uart_msg);

1853
	priv->last_bt_traffic_load = priv->bt_traffic_load;
W
Wey-Yi Guy 已提交
1854 1855
	if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
		if (priv->bt_status != coex->bt_status ||
1856
		    priv->last_bt_traffic_load != coex->bt_traffic_load) {
W
Wey-Yi Guy 已提交
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
			if (coex->bt_status) {
				/* BT on */
				if (!priv->bt_ch_announce)
					priv->bt_traffic_load =
						IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
				else
					priv->bt_traffic_load =
						coex->bt_traffic_load;
			} else {
				/* BT off */
				priv->bt_traffic_load =
					IWL_BT_COEX_TRAFFIC_LOAD_NONE;
			}
			priv->bt_status = coex->bt_status;
			queue_work(priv->workqueue,
				   &priv->bt_traffic_change_work);
		}
	}

1876
	iwlagn_set_kill_msk(priv, uart_msg);
W
Wey-Yi Guy 已提交
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903

	/* FIXME: based on notification, adjust the prio_boost */

	spin_lock_irqsave(&priv->lock, flags);
	priv->bt_ci_compliance = coex->bt_ci_compliance;
	spin_unlock_irqrestore(&priv->lock, flags);
}

void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
{
	iwlagn_rx_handler_setup(priv);
	priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
		iwlagn_bt_coex_profile_notif;
}

void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
{
	iwlagn_setup_deferred_work(priv);

	INIT_WORK(&priv->bt_traffic_change_work,
		  iwlagn_bt_traffic_change_work);
}

void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
{
	cancel_work_sync(&priv->bt_traffic_change_work);
}
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043

static bool is_single_rx_stream(struct iwl_priv *priv)
{
	return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
	       priv->current_ht_config.single_chain_sufficient;
}

#define IWL_NUM_RX_CHAINS_MULTIPLE	3
#define IWL_NUM_RX_CHAINS_SINGLE	2
#define IWL_NUM_IDLE_CHAINS_DUAL	2
#define IWL_NUM_IDLE_CHAINS_SINGLE	1

/*
 * Determine how many receiver/antenna chains to use.
 *
 * More provides better reception via diversity.  Fewer saves power
 * at the expense of throughput, but only when not in powersave to
 * start with.
 *
 * MIMO (dual stream) requires at least 2, but works better with 3.
 * This does not determine *which* chains to use, just how many.
 */
static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
{
	if (priv->cfg->bt_params &&
	    priv->cfg->bt_params->advanced_bt_coexist &&
	    (priv->bt_full_concurrent ||
	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
		/*
		 * only use chain 'A' in bt high traffic load or
		 * full concurrency mode
		 */
		return IWL_NUM_RX_CHAINS_SINGLE;
	}
	/* # of Rx chains to use when expecting MIMO. */
	if (is_single_rx_stream(priv))
		return IWL_NUM_RX_CHAINS_SINGLE;
	else
		return IWL_NUM_RX_CHAINS_MULTIPLE;
}

/*
 * When we are in power saving mode, unless device support spatial
 * multiplexing power save, use the active count for rx chain count.
 */
static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
{
	/* # Rx chains when idling, depending on SMPS mode */
	switch (priv->current_ht_config.smps) {
	case IEEE80211_SMPS_STATIC:
	case IEEE80211_SMPS_DYNAMIC:
		return IWL_NUM_IDLE_CHAINS_SINGLE;
	case IEEE80211_SMPS_OFF:
		return active_cnt;
	default:
		WARN(1, "invalid SMPS mode %d",
		     priv->current_ht_config.smps);
		return active_cnt;
	}
}

/* up to 4 chains */
static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
{
	u8 res;
	res = (chain_bitmap & BIT(0)) >> 0;
	res += (chain_bitmap & BIT(1)) >> 1;
	res += (chain_bitmap & BIT(2)) >> 2;
	res += (chain_bitmap & BIT(3)) >> 3;
	return res;
}

/**
 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
 *
 * Selects how many and which Rx receivers/antennas/chains to use.
 * This should not be used for scan command ... it puts data in wrong place.
 */
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
	bool is_single = is_single_rx_stream(priv);
	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
	u32 active_chains;
	u16 rx_chain;

	/* Tell uCode which antennas are actually connected.
	 * Before first association, we assume all antennas are connected.
	 * Just after first association, iwl_chain_noise_calibration()
	 *    checks which antennas actually *are* connected. */
	if (priv->chain_noise_data.active_chains)
		active_chains = priv->chain_noise_data.active_chains;
	else
		active_chains = priv->hw_params.valid_rx_ant;

	if (priv->cfg->bt_params &&
	    priv->cfg->bt_params->advanced_bt_coexist &&
	    (priv->bt_full_concurrent ||
	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
		/*
		 * only use chain 'A' in bt high traffic load or
		 * full concurrency mode
		 */
		active_chains = first_antenna(active_chains);
	}

	rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;

	/* How many receivers should we use? */
	active_rx_cnt = iwl_get_active_rx_chain_count(priv);
	idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);


	/* correct rx chain count according hw settings
	 * and chain noise calibration
	 */
	valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
	if (valid_rx_cnt < active_rx_cnt)
		active_rx_cnt = valid_rx_cnt;

	if (valid_rx_cnt < idle_rx_cnt)
		idle_rx_cnt = valid_rx_cnt;

	rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
	rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;

	ctx->staging.rx_chain = cpu_to_le16(rx_chain);

	if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
		ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
	else
		ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;

	IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
			ctx->staging.rx_chain,
			active_rx_cnt, idle_rx_cnt);

	WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
		active_rx_cnt < idle_rx_cnt);
}
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060

u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
{
	int i;
	u8 ind = ant;

	if (priv->band == IEEE80211_BAND_2GHZ &&
	    priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
		return 0;

	for (i = 0; i < RATE_ANT_NUM - 1; i++) {
		ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
		if (valid & BIT(ind))
			return ind;
	}
	return ant;
}
J
Johannes Berg 已提交
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095

static const char *get_csr_string(int cmd)
{
	switch (cmd) {
	IWL_CMD(CSR_HW_IF_CONFIG_REG);
	IWL_CMD(CSR_INT_COALESCING);
	IWL_CMD(CSR_INT);
	IWL_CMD(CSR_INT_MASK);
	IWL_CMD(CSR_FH_INT_STATUS);
	IWL_CMD(CSR_GPIO_IN);
	IWL_CMD(CSR_RESET);
	IWL_CMD(CSR_GP_CNTRL);
	IWL_CMD(CSR_HW_REV);
	IWL_CMD(CSR_EEPROM_REG);
	IWL_CMD(CSR_EEPROM_GP);
	IWL_CMD(CSR_OTP_GP_REG);
	IWL_CMD(CSR_GIO_REG);
	IWL_CMD(CSR_GP_UCODE_REG);
	IWL_CMD(CSR_GP_DRIVER_REG);
	IWL_CMD(CSR_UCODE_DRV_GP1);
	IWL_CMD(CSR_UCODE_DRV_GP2);
	IWL_CMD(CSR_LED_REG);
	IWL_CMD(CSR_DRAM_INT_TBL_REG);
	IWL_CMD(CSR_GIO_CHICKEN_BITS);
	IWL_CMD(CSR_ANA_PLL_CFG);
	IWL_CMD(CSR_HW_REV_WA_REG);
	IWL_CMD(CSR_DBG_HPET_MEM_REG);
	default:
		return "UNKNOWN";
	}
}

void iwl_dump_csr(struct iwl_priv *priv)
{
	int i;
J
Joe Perches 已提交
2096
	static const u32 csr_tbl[] = {
J
Johannes Berg 已提交
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
		CSR_HW_IF_CONFIG_REG,
		CSR_INT_COALESCING,
		CSR_INT,
		CSR_INT_MASK,
		CSR_FH_INT_STATUS,
		CSR_GPIO_IN,
		CSR_RESET,
		CSR_GP_CNTRL,
		CSR_HW_REV,
		CSR_EEPROM_REG,
		CSR_EEPROM_GP,
		CSR_OTP_GP_REG,
		CSR_GIO_REG,
		CSR_GP_UCODE_REG,
		CSR_GP_DRIVER_REG,
		CSR_UCODE_DRV_GP1,
		CSR_UCODE_DRV_GP2,
		CSR_LED_REG,
		CSR_DRAM_INT_TBL_REG,
		CSR_GIO_CHICKEN_BITS,
		CSR_ANA_PLL_CFG,
		CSR_HW_REV_WA_REG,
		CSR_DBG_HPET_MEM_REG
	};
	IWL_ERR(priv, "CSR values:\n");
	IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
		"CSR_INT_PERIODIC_REG)\n");
	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
		IWL_ERR(priv, "  %25s: 0X%08x\n",
			get_csr_string(csr_tbl[i]),
			iwl_read32(priv, csr_tbl[i]));
	}
}
J
Johannes Berg 已提交
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154

static const char *get_fh_string(int cmd)
{
	switch (cmd) {
	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
	IWL_CMD(FH_TSSR_TX_STATUS_REG);
	IWL_CMD(FH_TSSR_TX_ERROR_REG);
	default:
		return "UNKNOWN";
	}
}

int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
{
	int i;
#ifdef CONFIG_IWLWIFI_DEBUG
	int pos = 0;
	size_t bufsz = 0;
#endif
J
Joe Perches 已提交
2155
	static const u32 fh_tbl[] = {
J
Johannes Berg 已提交
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
		FH_RSCSR_CHNL0_STTS_WPTR_REG,
		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
		FH_RSCSR_CHNL0_WPTR,
		FH_MEM_RCSR_CHNL0_CONFIG_REG,
		FH_MEM_RSSR_SHARED_CTRL_REG,
		FH_MEM_RSSR_RX_STATUS_REG,
		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
		FH_TSSR_TX_STATUS_REG,
		FH_TSSR_TX_ERROR_REG
	};
#ifdef CONFIG_IWLWIFI_DEBUG
	if (display) {
		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
		*buf = kmalloc(bufsz, GFP_KERNEL);
		if (!*buf)
			return -ENOMEM;
		pos += scnprintf(*buf + pos, bufsz - pos,
				"FH register values:\n");
		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
			pos += scnprintf(*buf + pos, bufsz - pos,
				"  %34s: 0X%08x\n",
				get_fh_string(fh_tbl[i]),
				iwl_read_direct32(priv, fh_tbl[i]));
		}
		return pos;
	}
#endif
	IWL_ERR(priv, "FH register values:\n");
	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
		IWL_ERR(priv, "  %34s: 0X%08x\n",
			get_fh_string(fh_tbl[i]),
			iwl_read_direct32(priv, fh_tbl[i]));
	}
	return 0;
}
2191 2192 2193 2194

/* notification wait support */
void iwlagn_init_notification_wait(struct iwl_priv *priv,
				   struct iwl_notification_wait *wait_entry,
2195
				   u8 cmd,
2196
				   void (*fn)(struct iwl_priv *priv,
2197 2198 2199
					      struct iwl_rx_packet *pkt,
					      void *data),
				   void *fn_data)
2200 2201
{
	wait_entry->fn = fn;
2202
	wait_entry->fn_data = fn_data;
2203 2204
	wait_entry->cmd = cmd;
	wait_entry->triggered = false;
2205
	wait_entry->aborted = false;
2206 2207 2208 2209 2210 2211

	spin_lock_bh(&priv->_agn.notif_wait_lock);
	list_add(&wait_entry->list, &priv->_agn.notif_waits);
	spin_unlock_bh(&priv->_agn.notif_wait_lock);
}

2212 2213 2214
int iwlagn_wait_notification(struct iwl_priv *priv,
			     struct iwl_notification_wait *wait_entry,
			     unsigned long timeout)
2215 2216 2217 2218
{
	int ret;

	ret = wait_event_timeout(priv->_agn.notif_waitq,
2219
				 wait_entry->triggered || wait_entry->aborted,
2220 2221 2222 2223 2224 2225
				 timeout);

	spin_lock_bh(&priv->_agn.notif_wait_lock);
	list_del(&wait_entry->list);
	spin_unlock_bh(&priv->_agn.notif_wait_lock);

2226 2227 2228
	if (wait_entry->aborted)
		return -EIO;

2229 2230 2231 2232
	/* return value is always >= 0 */
	if (ret <= 0)
		return -ETIMEDOUT;
	return 0;
2233 2234 2235 2236 2237 2238 2239 2240 2241
}

void iwlagn_remove_notification(struct iwl_priv *priv,
				struct iwl_notification_wait *wait_entry)
{
	spin_lock_bh(&priv->_agn.notif_wait_lock);
	list_del(&wait_entry->list);
	spin_unlock_bh(&priv->_agn.notif_wait_lock);
}
J
Johannes Berg 已提交
2242

J
Johannes Berg 已提交
2243 2244 2245 2246
int iwlagn_start_device(struct iwl_priv *priv)
{
	int ret;

2247 2248
	if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
	     iwl_prepare_card_hw(priv)) {
J
Johannes Berg 已提交
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
		IWL_WARN(priv, "Exit HW not ready\n");
		return -EIO;
	}

	/* If platform's RF_KILL switch is NOT set to KILL */
	if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
		clear_bit(STATUS_RF_KILL_HW, &priv->status);
	else
		set_bit(STATUS_RF_KILL_HW, &priv->status);

	if (iwl_is_rfkill(priv)) {
		wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
		iwl_enable_interrupts(priv);
		return -ERFKILL;
	}

	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);

	ret = iwlagn_hw_nic_init(priv);
	if (ret) {
		IWL_ERR(priv, "Unable to init nic\n");
		return ret;
	}

	/* make sure rfkill handshake bits are cleared */
	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

	/* clear (again), then enable host interrupts */
	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
	iwl_enable_interrupts(priv);

	/* really make sure rfkill handshake bits are cleared */
	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);

	return 0;
}

J
Johannes Berg 已提交
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
void iwlagn_stop_device(struct iwl_priv *priv)
{
	unsigned long flags;

	/* stop and reset the on-board processor */
	iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);

	/* tell the device to stop sending interrupts */
	spin_lock_irqsave(&priv->lock, flags);
	iwl_disable_interrupts(priv);
	spin_unlock_irqrestore(&priv->lock, flags);
	iwl_synchronize_irq(priv);

	/* device going down, Stop using ICT table */
	iwl_disable_ict(priv);

2305 2306 2307 2308 2309 2310 2311 2312
	/*
	 * If a HW restart happens during firmware loading,
	 * then the firmware loading might call this function
	 * and later it might be called again due to the
	 * restart. So don't process again if the device is
	 * already dead.
	 */
	if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
2313
		priv->trans.ops->tx_stop(priv);
2314
		priv->trans.ops->rx_stop(priv);
2315

2316 2317 2318 2319 2320
		/* Power-down device's busmaster DMA clocks */
		iwl_write_prph(priv, APMG_CLK_DIS_REG,
			       APMG_CLK_VAL_DMA_CLK_RQT);
		udelay(5);
	}
J
Johannes Berg 已提交
2321 2322 2323 2324 2325 2326 2327

	/* Make sure (redundant) we've released our request to stay awake */
	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);

	/* Stop the device, and put it in low power state */
	iwl_apm_stop(priv);
}