rx.c 62.5 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6
 * Copyright(c) 2018 Intel Corporation
7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
21
 * this program.
22 23 24 25 26
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30 31 32
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
33
#include <linux/gfp.h>
34

35
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "internal.h"
38
#include "iwl-op-mode.h"
39
#include "iwl-context-info-gen3.h"
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
 *   When the interrupt handler is called, the request is processed.
 *   The page is either stolen - transferred to the upper layer
 *   or reused - added immediately to the iwl->rxq->rx_free list.
 * + When the page is stolen - the driver updates the matching queue's used
 *   count, detaches the RBD and transfers it to the queue used list.
 *   When there are two used RBDs - they are transferred to the allocator empty
 *   list. Work is then scheduled for the allocator to start allocating
 *   eight buffers.
 *   When there are another 6 used RBDs - they are transferred to the allocator
 *   empty list and the driver tries to claim the pre-allocated buffers and
 *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
 *   until ready.
 *   When there are 8+ buffers in the free list - either from allocation or from
 *   8 reused unstolen pages - restock is called to update the FW and indexes.
 * + In order to make sure the allocator always has RBDs to use for allocation
 *   the allocator has initial pool in the size of num_queues*(8-2) - the
 *   maximum missing RBDs per allocation request (request posted with 2
 *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
 *   The queues supplies the recycle of the rest of the RBDs.
99 100
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
101
 * + If there are no allocated buffers in iwl->rxq->rx_free,
102 103
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
104 105 106 107
 *
 *
 * Driver sequence:
 *
108 109
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
110 111
 *                            iwl_pcie_rxq_restock.
 *                            Used only during initialization.
112
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
113
 *                            queue, updates firmware pointers, and updates
114 115
 *                            the WRITE index.
 * iwl_pcie_rx_allocator()     Background work for allocating pages.
116 117
 *
 * -- enable interrupts --
118
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
119 120
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
121
 *                            Posts and claims requests to the allocator.
122
 *                            Calls iwl_pcie_rxq_restock to refill any empty
123
 *                            slots.
124 125 126 127 128 129 130 131 132 133 134 135
 *
 * RBD life-cycle:
 *
 * Init:
 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
 *
 * Regular Receive interrupt:
 * Page Stolen:
 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
 * Page not Stolen:
 * rxq.queue -> rxq.rx_free -> rxq.queue
136 137 138 139
 * ...
 *
 */

140 141
/*
 * iwl_rxq_space - Return number of free slots available in queue.
142
 */
143
static int iwl_rxq_space(const struct iwl_rxq *rxq)
144
{
145 146
	/* Make sure rx queue size is a power of 2 */
	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
147

148 149 150 151 152 153
	/*
	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
	 * between empty and completely full queues.
	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
	 * defined for negative dividends.
	 */
154
	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
155 156
}

157 158 159 160 161 162 163 164
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

165 166 167
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
168 169
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
170 171 172 173 174 175
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
		/* TODO: remove this for 22560 once fw does it */
		iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
					 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
	} else if (trans->cfg->mq_rx_supported) {
176 177 178 179 180 181 182 183 184
		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
	} else {
		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
					   1000);
	}
185 186
}

187 188
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
189
 */
190 191
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
192 193 194
{
	u32 reg;

195
	lockdep_assert_held(&rxq->lock);
196

197 198 199 200 201 202 203 204 205 206 207 208 209
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
				       reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
210
				    BIT(trans->cfg->csr->flag_mac_access_req));
211 212
			rxq->need_update = true;
			return;
213 214
		}
	}
215 216

	rxq->write_actual = round_down(rxq->write, 8);
217 218 219 220 221
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
		iwl_write32(trans, HBUS_TARG_WRPTR,
			    (rxq->write_actual |
			     ((FIRST_RX_QUEUE + rxq->id) << 16)));
	else if (trans->cfg->mq_rx_supported)
222 223
		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
			    rxq->write_actual);
224 225
	else
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
226 227 228 229 230
}

static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
231
	int i;
232

233 234
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
235

236 237 238 239 240 241 242
		if (!rxq->need_update)
			continue;
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		rxq->need_update = false;
		spin_unlock(&rxq->lock);
	}
243 244
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
static void iwl_pcie_restock_bd(struct iwl_trans *trans,
				struct iwl_rxq *rxq,
				struct iwl_rx_mem_buffer *rxb)
{
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
		struct iwl_rx_transfer_desc *bd = rxq->bd;

		bd[rxq->write].type_n_size =
			cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
			((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
	} else {
		__le64 *bd = rxq->bd;

		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
	}
}

264
/*
265
 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
266
 */
267 268
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
				  struct iwl_rxq *rxq)
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
{
	struct iwl_rx_mem_buffer *rxb;

	/*
	 * If the device isn't enabled - no need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
		return;

	spin_lock(&rxq->lock);
	while (rxq->free_count) {
		/* Get next free Rx buffer, remove from free list */
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
S
Sara Sharon 已提交
289
		rxb->invalid = false;
290 291 292
		/* 12 first bits are expected to be empty */
		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
		/* Point to Rx buffer via next RBD in circular buffer */
293
		iwl_pcie_restock_bd(trans, rxq, rxb);
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
		rxq->free_count--;
	}
	spin_unlock(&rxq->lock);

	/*
	 * If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8.
	 */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		spin_unlock(&rxq->lock);
	}
}

310
/*
311
 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
312
 */
313 314
static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
				  struct iwl_rxq *rxq)
315 316 317
{
	struct iwl_rx_mem_buffer *rxb;

318 319 320
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
321 322 323
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
324 325
	 * So don't try to restock if the APM has been already stopped.
	 */
326
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
327 328
		return;

329
	spin_lock(&rxq->lock);
330
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
331
		__le32 *bd = (__le32 *)rxq->bd;
332 333 334 335 336
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
337 338 339
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
S
Sara Sharon 已提交
340
		rxb->invalid = false;
341 342

		/* Point to Rx buffer via next RBD in circular buffer */
343
		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
344 345 346 347
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
348
	spin_unlock(&rxq->lock);
349 350 351 352

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
353
		spin_lock(&rxq->lock);
354
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
355
		spin_unlock(&rxq->lock);
356 357 358
	}
}

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	if (trans->cfg->mq_rx_supported)
374
		iwl_pcie_rxmq_restock(trans, rxq);
375
	else
376
		iwl_pcie_rxsq_restock(trans, rxq);
377 378
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
/*
 * iwl_pcie_rx_alloc_page - allocates and returns a page.
 *
 */
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
					   gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct page *page;
	gfp_t gfp_mask = priority;

	if (trans_pcie->rx_page_order > 0)
		gfp_mask |= __GFP_COMP;

	/* Alloc a new receive buffer */
	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
	if (!page) {
		if (net_ratelimit())
			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
				       trans_pcie->rx_page_order);
399 400 401
		/*
		 * Issue an error if we don't have enough pre-allocated
		  * buffers.
402
`		 */
403
		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
404
			IWL_CRIT(trans,
405
				 "Failed to alloc_pages\n");
406 407 408 409 410
		return NULL;
	}
	return page;
}

411
/*
412
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
413
 *
414 415 416
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
417
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
418
 * allocated buffers.
419
 */
420 421
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
			    struct iwl_rxq *rxq)
422
{
423
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
424 425 426 427
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;

	while (1) {
428
		spin_lock(&rxq->lock);
429
		if (list_empty(&rxq->rx_used)) {
430
			spin_unlock(&rxq->lock);
431 432
			return;
		}
433
		spin_unlock(&rxq->lock);
434 435

		/* Alloc a new receive buffer */
436 437
		page = iwl_pcie_rx_alloc_page(trans, priority);
		if (!page)
438 439
			return;

440
		spin_lock(&rxq->lock);
441 442

		if (list_empty(&rxq->rx_used)) {
443
			spin_unlock(&rxq->lock);
444
			__free_pages(page, trans_pcie->rx_page_order);
445 446
			return;
		}
J
Johannes Berg 已提交
447 448 449
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
450
		spin_unlock(&rxq->lock);
451 452 453 454

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
455 456 457 458
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
459 460
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
461
			spin_lock(&rxq->lock);
462
			list_add(&rxb->list, &rxq->rx_used);
463
			spin_unlock(&rxq->lock);
464 465 466
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
467

468
		spin_lock(&rxq->lock);
469 470 471 472

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

473
		spin_unlock(&rxq->lock);
474 475 476
	}
}

477
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
478 479 480 481
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

482
	for (i = 0; i < RX_POOL_SIZE; i++) {
483
		if (!trans_pcie->rx_pool[i].page)
484
			continue;
485
		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
486 487
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
488 489 490
		__free_pages(trans_pcie->rx_pool[i].page,
			     trans_pcie->rx_page_order);
		trans_pcie->rx_pool[i].page = NULL;
491 492 493
	}
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
517
		LIST_HEAD(local_allocated);
518 519 520 521 522
		gfp_t gfp_mask = GFP_KERNEL;

		/* Do not post a warning if there are only a few requests */
		if (pending < RX_PENDING_WATERMARK)
			gfp_mask |= __GFP_NOWARN;
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
540
			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}

/*
585
 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
586 587 588
.*
.* Called by queue when the queue posted allocation request and
 * has freed 8 RBDs in order to restock itself.
589 590
 * This function directly moves the allocated RBs to the queue's ownership
 * and updates the relevant counters.
591
 */
592 593
static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
				      struct iwl_rxq *rxq)
594 595 596 597 598
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

599 600
	lockdep_assert_held(&rxq->lock);

601 602 603
	/*
	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
604
	 * function will return early, as there are no ready requests.
605 606 607 608 609
	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
	 * req_ready > 0, i.e. - there are ready requests and the function
	 * hands one request to the caller.
	 */
	if (atomic_dec_if_positive(&rba->req_ready) < 0)
610
		return;
611 612 613 614

	spin_lock(&rba->lock);
	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
		/* Get next free Rx buffer, remove it from free list */
615 616 617 618 619
		struct iwl_rx_mem_buffer *rxb =
			list_first_entry(&rba->rbd_allocated,
					 struct iwl_rx_mem_buffer, list);

		list_move(&rxb->list, &rxq->rx_free);
620 621 622
	}
	spin_unlock(&rba->lock);

623 624
	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
	rxq->free_count += RX_CLAIM_REQ_ALLOC;
625 626
}

627
void iwl_pcie_rx_allocator_work(struct work_struct *data)
628
{
629 630
	struct iwl_rb_allocator *rba_p =
		container_of(data, struct iwl_rb_allocator, rx_alloc);
631
	struct iwl_trans_pcie *trans_pcie =
632
		container_of(rba_p, struct iwl_trans_pcie, rba);
633

634
	iwl_pcie_rx_allocator(trans_pcie->trans);
635 636
}

637 638 639 640 641 642 643 644 645 646 647
static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
{
	struct iwl_rx_transfer_desc *rx_td;

	if (use_rx_td)
		return sizeof(*rx_td);
	else
		return trans->cfg->mq_rx_supported ? sizeof(__le64) :
			sizeof(__le32);
}

648 649
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
				  struct iwl_rxq *rxq)
650 651
{
	struct device *dev = trans->dev;
652 653 654
	bool use_rx_td = (trans->cfg->device_family >=
			  IWL_DEVICE_FAMILY_22560);
	int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
655

656
	if (rxq->bd)
657 658
		dma_free_coherent(trans->dev,
				  free_size * rxq->queue_size,
659 660 661 662 663 664
				  rxq->bd, rxq->bd_dma);
	rxq->bd_dma = 0;
	rxq->bd = NULL;

	if (rxq->rb_stts)
		dma_free_coherent(trans->dev,
665
				  use_rx_td ? sizeof(__le16) :
666 667 668 669 670 671
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	rxq->rb_stts_dma = 0;
	rxq->rb_stts = NULL;

	if (rxq->used_bd)
672
		dma_free_coherent(trans->dev,
673
				  (use_rx_td ? sizeof(*rxq->cd) :
674
				   sizeof(__le32)) * rxq->queue_size,
675 676 677 678 679 680
				  rxq->used_bd, rxq->used_bd_dma);
	rxq->used_bd_dma = 0;
	rxq->used_bd = NULL;

	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
		return;
681

682 683 684 685 686 687 688 689 690 691 692 693
	if (rxq->tr_tail)
		dma_free_coherent(dev, sizeof(__le16),
				  rxq->tr_tail, rxq->tr_tail_dma);
	rxq->tr_tail_dma = 0;
	rxq->tr_tail = NULL;

	if (rxq->cr_tail)
		dma_free_coherent(dev, sizeof(__le16),
				  rxq->cr_tail, rxq->cr_tail_dma);
	rxq->cr_tail_dma = 0;
	rxq->cr_tail = NULL;
}
694

695 696 697 698 699 700
static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
				  struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct device *dev = trans->dev;
	int i;
701 702 703
	int free_size;
	bool use_rx_td = (trans->cfg->device_family >=
			  IWL_DEVICE_FAMILY_22560);
704

705 706 707 708 709
	spin_lock_init(&rxq->lock);
	if (trans->cfg->mq_rx_supported)
		rxq->queue_size = MQ_RX_TABLE_SIZE;
	else
		rxq->queue_size = RX_QUEUE_SIZE;
710

711 712
	free_size = iwl_pcie_free_bd_size(trans, use_rx_td);

713 714 715 716 717 718 719 720 721
	/*
	 * Allocate the circular buffer of Read Buffer Descriptors
	 * (RBDs)
	 */
	rxq->bd = dma_zalloc_coherent(dev,
				      free_size * rxq->queue_size,
				      &rxq->bd_dma, GFP_KERNEL);
	if (!rxq->bd)
		goto err;
722

723 724
	if (trans->cfg->mq_rx_supported) {
		rxq->used_bd = dma_zalloc_coherent(dev,
725
						   (use_rx_td ?
726
						   sizeof(*rxq->cd) :
727
						   sizeof(__le32)) *
728 729
						   rxq->queue_size,
						   &rxq->used_bd_dma,
730
						   GFP_KERNEL);
731
		if (!rxq->used_bd)
732 733
			goto err;
	}
734 735

	/* Allocate the driver's pointer to receive buffer status */
736 737 738
	rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
					   sizeof(__le16) :
					   sizeof(struct iwl_rb_status),
739 740 741 742 743
					   &rxq->rb_stts_dma,
					   GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err;

744
	if (!use_rx_td)
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
		return 0;

	/* Allocate the driver's pointer to TR tail */
	rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
					   &rxq->tr_tail_dma,
					   GFP_KERNEL);
	if (!rxq->tr_tail)
		goto err;

	/* Allocate the driver's pointer to CR tail */
	rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
					   &rxq->cr_tail_dma,
					   GFP_KERNEL);
	if (!rxq->cr_tail)
		goto err;
760 761 762 763 764
	/*
	 * W/A 22560 device step Z0 must be non zero bug
	 * TODO: remove this when stop supporting Z0
	 */
	*rxq->cr_tail = cpu_to_le16(500);
765

766 767
	return 0;

768 769 770 771
err:
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

772
		iwl_pcie_free_rxq_dma(trans, rxq);
773 774
	}
	kfree(trans_pcie->rxq);
775

776
	return -ENOMEM;
777 778
}

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i, ret;

	if (WARN_ON(trans_pcie->rxq))
		return -EINVAL;

	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
				  GFP_KERNEL);
	if (!trans_pcie->rxq)
		return -EINVAL;

	spin_lock_init(&rba->lock);

	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
		if (ret)
			return ret;
	}
	return 0;
}

805 806 807 808
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
809
	unsigned long flags;
810 811
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

812 813 814 815 816
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
817
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
818 819 820 821 822 823
		break;
	case IWL_AMSDU_12K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
824
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
825
	}
826

827 828 829
	if (!iwl_trans_grab_nic_access(trans, &flags))
		return;

830
	/* Stop Rx DMA */
831
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
832
	/* reset and flush pointers */
833 834 835
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
836 837

	/* Reset driver's Rx queue write index */
838
	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
839 840

	/* Tell device where to find RBD circular buffer in DRAM */
841 842
	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
		    (u32)(rxq->bd_dma >> 8));
843 844

	/* Tell device where in DRAM to update its Rx status */
845 846
	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
		    rxq->rb_stts_dma >> 4);
847 848 849 850 851

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
852
	 * Rx buffer size 4 or 8k or 12k
853 854 855
	 * RB timeout 0x10
	 * 256 RBDs
	 */
856 857 858 859 860 861 862 863 864
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
		    rb_size |
		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	iwl_trans_release_nic_access(trans, &flags);
865 866 867

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
868 869 870 871

	/* W/A for interrupt coalescing bug in 7260 and 3160 */
	if (trans->cfg->host_interrupt_operation_mode)
		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
872 873
}

874 875
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
{
876 877 878 879 880 881 882 883 884
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
		return;

	if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
		return;

	if (!trans->cfg->integrated)
		return;

885 886 887 888 889 890 891
	/*
	 * Turn on the chicken-bits that cause MAC wakeup for RX-related
	 * values.
	 * This costs some power, but needed for W/A 9000 integrated A-step
	 * bug where shadow registers are not in the retention list and their
	 * value is lost when NIC powers down
	 */
892 893 894 895
	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
		    CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
		    CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
896 897
}

898
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
899
{
900 901
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size, enabled = 0;
902
	unsigned long flags;
903
	int i;
904

905
	switch (trans_pcie->rx_buf_size) {
906 907 908
	case IWL_AMSDU_2K:
		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
		break;
909 910 911 912 913 914 915 916 917 918 919 920 921
	case IWL_AMSDU_4K:
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
		break;
	case IWL_AMSDU_12K:
		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
	}
922

923 924 925
	if (!iwl_trans_grab_nic_access(trans, &flags))
		return;

926
	/* Stop Rx DMA */
927
	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
928
	/* disable free amd used rx queue operation */
929
	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
930

931 932
	for (i = 0; i < trans->num_rx_queues; i++) {
		/* Tell device where to find RBD free table in DRAM */
933 934 935
		iwl_write_prph64_no_grab(trans,
					 RFH_Q_FRBDCB_BA_LSB(i),
					 trans_pcie->rxq[i].bd_dma);
936
		/* Tell device where to find RBD used table in DRAM */
937 938 939
		iwl_write_prph64_no_grab(trans,
					 RFH_Q_URBDCB_BA_LSB(i),
					 trans_pcie->rxq[i].used_bd_dma);
940
		/* Tell device where in DRAM to update its Rx status */
941 942 943
		iwl_write_prph64_no_grab(trans,
					 RFH_Q_URBD_STTS_WPTR_LSB(i),
					 trans_pcie->rxq[i].rb_stts_dma);
944
		/* Reset device indice tables */
945 946 947
		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
948 949 950

		enabled |= BIT(i) | BIT(i + 16);
	}
951

952 953 954 955
	/*
	 * Enable Rx DMA
	 * Rx buffer size 4 or 8k or 12k
	 * Min RB size 4 or 8
956
	 * Drop frames that exceed RB size
957 958
	 * 512 RBDs
	 */
959
	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
960
			       RFH_DMA_EN_ENABLE_VAL | rb_size |
961 962 963
			       RFH_RXF_DMA_MIN_RB_4_8 |
			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
			       RFH_RXF_DMA_RBDCB_SIZE_512);
964

965 966
	/*
	 * Activate DMA snooping.
967
	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
968 969
	 * Default queue is 0
	 */
970 971 972
	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
			       RFH_GEN_CFG_RFH_DMA_SNOOP |
			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
973
			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
974 975 976 977
			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
					       trans->cfg->integrated ?
					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
978
	/* Enable the relevant rx queues */
979 980 981
	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);

	iwl_trans_release_nic_access(trans, &flags);
982

983 984
	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
985 986

	iwl_pcie_enable_rx_wake(trans, true);
987 988
}

989
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
990
{
991
	lockdep_assert_held(&rxq->lock);
992

993 994 995 996
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;
	rxq->used_count = 0;
997 998
}

999
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1000 1001 1002 1003 1004
{
	WARN_ON(1);
	return 0;
}

1005
static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1006 1007
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1008
	struct iwl_rxq *def_rxq;
1009
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1010
	int i, err, queue_size, allocator_pool_size, num_alloc;
1011

1012
	if (!trans_pcie->rxq) {
1013 1014 1015 1016
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}
1017
	def_rxq = trans_pcie->rxq;
1018

1019 1020
	cancel_work_sync(&rba->rx_alloc);

1021 1022 1023
	spin_lock(&rba->lock);
	atomic_set(&rba->req_pending, 0);
	atomic_set(&rba->req_ready, 0);
1024 1025
	INIT_LIST_HEAD(&rba->rbd_allocated);
	INIT_LIST_HEAD(&rba->rbd_empty);
1026
	spin_unlock(&rba->lock);
1027

1028
	/* free all first - we might be reconfigured for a different size */
1029
	iwl_pcie_free_rbs_pool(trans);
1030 1031

	for (i = 0; i < RX_QUEUE_SIZE; i++)
1032
		def_rxq->queue[i] = NULL;
1033

1034 1035 1036
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

1037 1038
		rxq->id = i;

1039 1040 1041 1042 1043 1044 1045 1046 1047
		spin_lock(&rxq->lock);
		/*
		 * Set read write pointer to reflect that we have processed
		 * and used all buffers, but have not restocked the Rx queue
		 * with fresh buffers
		 */
		rxq->read = 0;
		rxq->write = 0;
		rxq->write_actual = 0;
1048 1049 1050
		memset(rxq->rb_stts, 0,
		       (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
		       sizeof(__le16) : sizeof(struct iwl_rb_status));
1051

1052 1053
		iwl_pcie_rx_init_rxb_lists(rxq);

1054 1055 1056 1057
		if (!rxq->napi.poll)
			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
				       iwl_pcie_dummy_napi_poll, 64);

1058 1059
		spin_unlock(&rxq->lock);
	}
1060

1061
	/* move the pool to the default queue and allocator ownerships */
1062 1063
	queue_size = trans->cfg->mq_rx_supported ?
		     MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
1064 1065
	allocator_pool_size = trans->num_rx_queues *
		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1066
	num_alloc = queue_size + allocator_pool_size;
1067 1068
	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
		     ARRAY_SIZE(trans_pcie->rx_pool));
1069
	for (i = 0; i < num_alloc; i++) {
1070 1071 1072 1073 1074 1075 1076
		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];

		if (i < allocator_pool_size)
			list_add(&rxb->list, &rba->rbd_empty);
		else
			list_add(&rxb->list, &def_rxq->rx_used);
		trans_pcie->global_table[i] = rxb;
S
Sara Sharon 已提交
1077
		rxb->vid = (u16)(i + 1);
S
Sara Sharon 已提交
1078
		rxb->invalid = true;
1079
	}
1080

1081
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1082

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	return 0;
}

int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret = _iwl_pcie_rx_init(trans);

	if (ret)
		return ret;

1094
	if (trans->cfg->mq_rx_supported)
1095
		iwl_pcie_rx_mq_hw_init(trans);
1096
	else
1097
		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1098

1099
	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1100

1101 1102 1103
	spin_lock(&trans_pcie->rxq->lock);
	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
	spin_unlock(&trans_pcie->rxq->lock);
1104 1105 1106 1107

	return 0;
}

1108 1109 1110 1111 1112 1113 1114 1115 1116
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
{
	/*
	 * We don't configure the RFH.
	 * Restock will be done at alive, after firmware configured the RFH.
	 */
	return _iwl_pcie_rx_init(trans);
}

1117 1118 1119
void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1120
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1121
	int i;
1122

1123 1124 1125 1126 1127
	/*
	 * if rxq is NULL, it means that nothing has been allocated,
	 * exit now
	 */
	if (!trans_pcie->rxq) {
1128 1129 1130 1131
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

1132 1133
	cancel_work_sync(&rba->rx_alloc);

1134 1135 1136 1137 1138
	iwl_pcie_free_rbs_pool(trans);

	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

1139
		iwl_pcie_free_rxq_dma(trans, rxq);
1140 1141 1142

		if (rxq->napi.poll)
			netif_napi_del(&rxq->napi);
1143
	}
1144
	kfree(trans_pcie->rxq);
1145 1146
}

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
/*
 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
 *
 * Called when a RBD can be reused. The RBD is transferred to the allocator.
 * When there are 2 empty RBDs - a request for allocation is posted
 */
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
				  struct iwl_rx_mem_buffer *rxb,
				  struct iwl_rxq *rxq, bool emergency)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;

	/* Move the RBD to the used list, will be moved to allocator in batches
	 * before claiming or posting a request*/
	list_add_tail(&rxb->list, &rxq->rx_used);

	if (unlikely(emergency))
		return;

	/* Count the allocator owned RBDs */
	rxq->used_count++;

	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
	 * after but we still need to post another request.
	 */
	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
		/* Move the 2 RBDs to the allocator ownership.
		 Allocator has another 6 from pool for the request completion*/
		spin_lock(&rba->lock);
		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_pending);
		queue_work(rba->alloc_wq, &rba->rx_alloc);
	}
}

1187
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1188
				struct iwl_rxq *rxq,
1189 1190
				struct iwl_rx_mem_buffer *rxb,
				bool emergency)
J
Johannes Berg 已提交
1191 1192
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1193
	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1194
	bool page_stolen = false;
1195
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1196
	u32 offset = 0;
J
Johannes Berg 已提交
1197 1198 1199 1200

	if (WARN_ON(!rxb))
		return;

1201 1202 1203 1204 1205 1206
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		u16 sequence;
		bool reclaim;
1207
		int index, cmd_index, len;
1208 1209
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
1210
			._rx_page_order = trans_pcie->rx_page_order,
1211 1212
			._page = rxb->page,
			._page_stolen = false,
1213
			.truesize = max_len,
1214 1215 1216 1217
		};

		pkt = rxb_addr(&rxcb);

1218 1219 1220 1221
		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
			IWL_DEBUG_RX(trans,
				     "Q %d: RB end marker at offset %d\n",
				     rxq->id, offset);
1222
			break;
1223
		}
1224

1225 1226 1227 1228 1229 1230
		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
			FH_RSCSR_RXQ_POS != rxq->id,
		     "frame on invalid queue - is on %d and indicates %d\n",
		     rxq->id,
		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
			FH_RSCSR_RXQ_POS);
1231

1232
		IWL_DEBUG_RX(trans,
1233 1234
			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
			     rxq->id, offset,
1235 1236 1237 1238
			     iwl_get_cmd_string(trans,
						iwl_cmd_id(pkt->hdr.cmd,
							   pkt->hdr.group_id,
							   0)),
1239 1240
			     pkt->hdr.group_id, pkt->hdr.cmd,
			     le16_to_cpu(pkt->hdr.sequence));
1241

1242
		len = iwl_rx_packet_len(pkt);
1243
		len += sizeof(u32); /* account for status word */
1244 1245
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1246 1247 1248 1249 1250 1251 1252 1253

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1254
		if (reclaim && !pkt->hdr.group_id) {
1255 1256 1257 1258 1259 1260 1261 1262
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
1263 1264
			}
		}
J
Johannes Berg 已提交
1265

1266 1267
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
1268
		cmd_index = iwl_pcie_get_cmd_index(txq, index);
1269

1270 1271 1272 1273 1274 1275
		if (rxq->id == 0)
			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
				       &rxcb);
		else
			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
					   &rxcb, rxq->id);
1276

1277
		if (reclaim) {
1278
			kzfree(txq->entries[cmd_index].free_buf);
1279
			txq->entries[cmd_index].free_buf = NULL;
1280 1281
		}

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
1293
				iwl_pcie_hcmd_complete(trans, &rxcb);
1294 1295 1296 1297 1298
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
1299 1300
		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
			break;
1301
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
1302 1303
	}

1304 1305
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
1306
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
1307
		rxb->page = NULL;
1308
	}
J
Johannes Berg 已提交
1309 1310 1311 1312 1313 1314 1315

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
1316 1317
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
1318 1319 1320 1321 1322 1323 1324 1325
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
1326
			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1327 1328 1329 1330
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
J
Johannes Berg 已提交
1331
	} else
1332
		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
J
Johannes Berg 已提交
1333 1334
}

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
						  struct iwl_rxq *rxq, int i)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_mem_buffer *rxb;
	u16 vid;

	if (!trans->cfg->mq_rx_supported) {
		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;
		return rxb;
	}

	/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
		vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
	else
		vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;

	if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
		goto out_err;

	rxb = trans_pcie->global_table[vid - 1];
	if (rxb->invalid)
		goto out_err;

	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
		rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;

	rxb->invalid = true;

	return rxb;

out_err:
	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
	iwl_force_nmi(trans);
	return NULL;
}

1374 1375
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1376
 */
1377
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1378
{
J
Johannes Berg 已提交
1379
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1380
	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1381
	u32 r, i, count = 0;
1382
	bool emergency = false;
1383

1384 1385
restart:
	spin_lock(&rxq->lock);
1386 1387
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
1388
	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1389 1390
	i = rxq->read;

1391 1392 1393
	/* W/A 9000 device step A0 wrap-around bug */
	r &= (rxq->queue_size - 1);

1394 1395
	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
1396
		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1397 1398

	while (i != r) {
1399
		struct iwl_rx_mem_buffer *rxb;
1400

1401
		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1402 1403
			emergency = true;

1404 1405 1406
		rxb = iwl_pcie_get_rxb(trans, rxq, i);
		if (!rxb)
			goto out;
1407

1408
		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1409
		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1410

1411
		i = (i + 1) & (rxq->queue_size - 1);
1412

1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
		/*
		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
		 * try to claim the pre-allocated buffers from the allocator.
		 * If not ready - will try to reclaim next time.
		 * There is no need to reschedule work - allocator exits only
		 * on success
		 */
		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
			iwl_pcie_rx_allocator_get(trans, rxq);

		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1424 1425
			struct iwl_rb_allocator *rba = &trans_pcie->rba;

1426 1427 1428 1429 1430
			/* Add the remaining empty RBDs for allocator use */
			spin_lock(&rba->lock);
			list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
			spin_unlock(&rba->lock);
		} else if (emergency) {
1431
			count++;
1432
			if (count == 8) {
1433
				count = 0;
1434
				if (rxq->used_count < rxq->queue_size / 3)
1435
					emergency = false;
1436 1437

				rxq->read = i;
1438
				spin_unlock(&rxq->lock);
1439
				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1440
				iwl_pcie_rxq_restock(trans, rxq);
1441 1442
				goto restart;
			}
1443
		}
1444
	}
1445
out:
1446 1447
	/* Backtrack one entry */
	rxq->read = i;
1448 1449 1450
	/* update cr tail with the rxq read pointer */
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
		*rxq->cr_tail = cpu_to_le16(r);
1451 1452
	spin_unlock(&rxq->lock);

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
	/*
	 * handle a case where in emergency there are some unallocated RBDs.
	 * those RBDs are in the used list, but are not tracked by the queue's
	 * used_count which counts allocator owned RBDs.
	 * unallocated emergency RBDs must be allocated on exit, otherwise
	 * when called again the function may not be in emergency mode and
	 * they will be handed to the allocator with no tracking in the RBD
	 * allocator counters, which will lead to them never being claimed back
	 * by the queue.
	 * by allocating them here, they are now in the queue free list, and
	 * will be restocked by the next call of iwl_pcie_rxq_restock.
	 */
	if (unlikely(emergency && count))
1466
		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1467

1468 1469
	if (rxq->napi.poll)
		napi_gro_flush(&rxq->napi, false);
1470 1471

	iwl_pcie_rxq_restock(trans, rxq);
1472 1473
}

1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
{
	u8 queue = entry->entry;
	struct msix_entry *entries = entry - queue;

	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}

/*
 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
 * This interrupt handler should be used with RSS queue only.
 */
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
{
	struct msix_entry *entry = dev_id;
	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
	struct iwl_trans *trans = trans_pcie->trans;

1492 1493
	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);

1494 1495 1496
	if (WARN_ON(entry->entry >= trans->num_rx_queues))
		return IRQ_NONE;

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

	local_bh_disable();
	iwl_pcie_rx_handle(trans, entry->entry);
	local_bh_enable();

	iwl_pcie_clear_irq(trans, entry);

	lock_map_release(&trans->sync_cmd_lockdep_map);

	return IRQ_HANDLED;
}

1510 1511
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1512
 */
1513
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1514
{
1515
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1516
	int i;
1517

1518
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1519
	if (trans->cfg->internal_wimax_coex &&
1520
	    !trans->cfg->apmg_not_supported &&
1521
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1522
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1523
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1524
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1525
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1526
		iwl_op_mode_wimax_active(trans->op_mode);
1527
		wake_up(&trans_pcie->wait_command_queue);
1528 1529 1530
		return;
	}

1531 1532 1533
	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		if (!trans_pcie->txq[i])
			continue;
1534
		del_timer(&trans_pcie->txq[i]->stuck_timer);
1535
	}
1536

1537 1538 1539 1540
	/* The STATUS_FW_ERROR bit is set in this function. This must happen
	 * before we wake up the command caller, to ensure a proper cleanup. */
	iwl_trans_fw_error(trans);

1541 1542
	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	wake_up(&trans_pcie->wait_command_queue);
1543 1544
}

1545
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1546 1547 1548
{
	u32 inta;

1549
	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1550 1551 1552 1553 1554 1555 1556

	trace_iwlwifi_dev_irq(trans->dev);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(trans, CSR_INT);

	/* the thread will service interrupts and re-enable them */
1557
	return inta;
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
}

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1573
static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 inta;
	u32 val = 0;
	u32 read;

	trace_iwlwifi_dev_irq(trans->dev);

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1587 1588
	if (!read)
		return 0;
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599

	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
				trans_pcie->ict_index, read);
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
1600
			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621

		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
					   read);
	} while (read);

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1622
	return inta;
1623 1624
}

1625
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1626 1627 1628
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1629
	bool hw_rfkill, prev, report;
1630 1631

	mutex_lock(&trans_pcie->mutex);
1632
	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1633
	hw_rfkill = iwl_is_rfkill_set(trans);
1634 1635 1636 1637 1638 1639 1640 1641
	if (hw_rfkill) {
		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
		set_bit(STATUS_RFKILL_HW, &trans->status);
	}
	if (trans_pcie->opmode_down)
		report = hw_rfkill;
	else
		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1642 1643 1644 1645 1646 1647

	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
		 hw_rfkill ? "disable radio" : "enable radio");

	isr_stats->rfkill++;

1648 1649
	if (prev != report)
		iwl_trans_pcie_rf_kill(trans, report);
1650 1651 1652 1653 1654 1655 1656 1657 1658
	mutex_unlock(&trans_pcie->mutex);

	if (hw_rfkill) {
		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
				       &trans->status))
			IWL_DEBUG_RF_KILL(trans,
					  "Rfkill while SYNC HCMD in flight\n");
		wake_up(&trans_pcie->wait_command_queue);
	} else {
1659 1660 1661
		clear_bit(STATUS_RFKILL_HW, &trans->status);
		if (trans_pcie->opmode_down)
			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1662 1663 1664
	}
}

1665
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1666
{
1667
	struct iwl_trans *trans = dev_id;
1668 1669
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1670 1671 1672
	u32 inta = 0;
	u32 handled = 0;

1673 1674
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

1675
	spin_lock(&trans_pcie->irq_lock);
1676

1677 1678 1679 1680
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
	if (likely(trans_pcie->use_ict))
1681
		inta = iwl_pcie_int_cause_ict(trans);
1682
	else
1683
		inta = iwl_pcie_int_cause_non_ict(trans);
1684

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
	if (iwl_have_debug_level(IWL_DL_ISR)) {
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
			      inta, trans_pcie->inta_mask,
			      iwl_read32(trans, CSR_INT_MASK),
			      iwl_read32(trans, CSR_FH_INT_STATUS));
		if (inta & (~trans_pcie->inta_mask))
			IWL_DEBUG_ISR(trans,
				      "We got a masked interrupt (0x%08x)\n",
				      inta & (~trans_pcie->inta_mask));
	}

	inta &= trans_pcie->inta_mask;

	/*
	 * Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC.
	 */
1704
	if (unlikely(!inta)) {
1705 1706 1707 1708 1709 1710
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		/*
		 * Re-enable interrupts here since we don't
		 * have anything to service
		 */
		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1711
			_iwl_enable_interrupts(trans);
1712
		spin_unlock(&trans_pcie->irq_lock);
1713 1714 1715 1716
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

1717 1718 1719 1720 1721 1722
	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/*
		 * Hardware disappeared. It might have
		 * already raised an interrupt.
		 */
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1723
		spin_unlock(&trans_pcie->irq_lock);
1724
		goto out;
1725 1726
	}

1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
1738
	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1739

1740
	if (iwl_have_debug_level(IWL_DL_ISR))
1741
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1742
			      inta, iwl_read32(trans, CSR_INT_MASK));
1743

1744
	spin_unlock(&trans_pcie->irq_lock);
1745

1746 1747
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
1748
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1749 1750

		/* Tell the device to stop sending interrupts */
1751
		iwl_disable_interrupts(trans);
1752

1753
		isr_stats->hw++;
1754
		iwl_pcie_irq_handle_error(trans);
1755 1756 1757

		handled |= CSR_INT_BIT_HW_ERR;

1758
		goto out;
1759 1760
	}

1761
	if (iwl_have_debug_level(IWL_DL_ISR)) {
1762 1763
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
1764 1765
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
1766
			isr_stats->sch++;
1767 1768 1769 1770
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
1771
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1772
			isr_stats->alive++;
1773 1774 1775 1776 1777 1778 1779
			if (trans->cfg->gen2) {
				/*
				 * We can restock, since firmware configured
				 * the RFH
				 */
				iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
			}
1780 1781
		}
	}
1782

1783 1784 1785 1786 1787
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
1788
		iwl_pcie_handle_rfkill_irq(trans);
1789 1790 1791 1792 1793
		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
1794
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1795
		isr_stats->ctkill++;
1796 1797 1798 1799 1800
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
1801
		IWL_ERR(trans, "Microcode SW error detected. "
1802
			" Restarting 0x%X.\n", inta);
1803
		isr_stats->sw++;
1804
		iwl_pcie_irq_handle_error(trans);
1805 1806 1807 1808 1809
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
1810
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1811
		iwl_pcie_rxq_check_wrptr(trans);
1812
		iwl_pcie_txq_check_wrptrs(trans);
1813

1814
		isr_stats->wakeup++;
1815 1816 1817 1818 1819 1820 1821 1822

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1823
		    CSR_INT_BIT_RX_PERIODIC)) {
1824
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1825 1826
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1827
			iwl_write32(trans, CSR_FH_INT_STATUS,
1828 1829 1830 1831
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
1832
			iwl_write32(trans,
1833
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
1847
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1848
			    CSR_INT_PERIODIC_DIS);
1849

1850 1851 1852 1853 1854 1855 1856 1857
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1858
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1859
				   CSR_INT_PERIODIC_ENA);
1860

1861
		isr_stats->rx++;
1862 1863

		local_bh_disable();
1864
		iwl_pcie_rx_handle(trans, 0);
1865
		local_bh_enable();
1866 1867 1868 1869
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
1870
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1871
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1872
		isr_stats->tx++;
1873 1874
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
1875 1876
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
1877 1878 1879
	}

	if (inta & ~handled) {
1880
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1881
		isr_stats->unhandled++;
1882 1883
	}

1884 1885 1886
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
1887 1888
	}

1889 1890 1891 1892
	spin_lock(&trans_pcie->irq_lock);
	/* only Re-enable all interrupt if disabled by irq */
	if (test_bit(STATUS_INT_ENABLED, &trans->status))
		_iwl_enable_interrupts(trans);
1893
	/* we are loading the firmware, enable FH_TX interrupt only */
1894
	else if (handled & CSR_INT_BIT_FH_TX)
1895
		iwl_enable_fw_load_int(trans);
1896
	/* Re-enable RF_KILL if it occurred */
1897 1898
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1899
	spin_unlock(&trans_pcie->irq_lock);
1900 1901 1902 1903

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1904 1905
}

1906 1907 1908 1909 1910
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1911

1912
/* Free dram table */
1913
void iwl_pcie_free_ict(struct iwl_trans *trans)
1914
{
1915
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1916

1917
	if (trans_pcie->ict_tbl) {
1918
		dma_free_coherent(trans->dev, ICT_SIZE,
1919
				  trans_pcie->ict_tbl,
1920
				  trans_pcie->ict_tbl_dma);
1921 1922
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1923 1924 1925
	}
}

1926 1927 1928
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1929 1930
 * also reset all data related to ICT table interrupt.
 */
1931
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1932
{
1933
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1934

1935
	trans_pcie->ict_tbl =
1936
		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1937 1938 1939
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1940 1941
		return -ENOMEM;

1942 1943
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1944
		iwl_pcie_free_ict(trans);
1945 1946
		return -EINVAL;
	}
1947 1948 1949 1950 1951 1952 1953

	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1954
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1955
{
1956
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1957 1958
	u32 val;

1959
	if (!trans_pcie->ict_tbl)
1960
		return;
1961

1962
	spin_lock(&trans_pcie->irq_lock);
1963
	_iwl_disable_interrupts(trans);
1964

1965
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1966

1967
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1968

1969 1970 1971
	val |= CSR_DRAM_INT_TBL_ENABLE |
	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1972

1973
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1974

1975
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1976 1977
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1978
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1979
	_iwl_enable_interrupts(trans);
1980
	spin_unlock(&trans_pcie->irq_lock);
1981 1982 1983
}

/* Device is going down disable ict interrupt usage */
1984
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1985
{
1986
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1987

1988
	spin_lock(&trans_pcie->irq_lock);
1989
	trans_pcie->use_ict = false;
1990
	spin_unlock(&trans_pcie->irq_lock);
1991 1992
}

1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
irqreturn_t iwl_pcie_isr(int irq, void *data)
{
	struct iwl_trans *trans = data;

	if (!trans)
		return IRQ_NONE;

	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);

2007
	return IRQ_WAKE_THREAD;
2008
}
2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019

irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
{
	return IRQ_WAKE_THREAD;
}

irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
{
	struct msix_entry *entry = dev_id;
	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
	struct iwl_trans *trans = trans_pcie->trans;
2020
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2021 2022 2023 2024 2025
	u32 inta_fh, inta_hw;

	lock_map_acquire(&trans->sync_cmd_lockdep_map);

	spin_lock(&trans_pcie->irq_lock);
2026 2027
	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2028 2029 2030
	/*
	 * Clear causes registers to avoid being handling the same cause.
	 */
2031 2032
	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2033 2034
	spin_unlock(&trans_pcie->irq_lock);

2035 2036
	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);

2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
	if (unlikely(!(inta_fh | inta_hw))) {
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
			      inta_fh,
			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));

2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
		local_bh_disable();
		iwl_pcie_rx_handle(trans, 0);
		local_bh_enable();
	}

	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
		local_bh_disable();
		iwl_pcie_rx_handle(trans, 1);
		local_bh_enable();
	}

2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
		isr_stats->tx++;
		/*
		 * Wake up uCode load routine,
		 * now that load is complete
		 */
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
	}

	/* Error detected by uCode */
	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2076 2077
	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
		IWL_ERR(trans,
			"Microcode SW error detected. Restarting 0x%X.\n",
			inta_fh);
		isr_stats->sw++;
		iwl_pcie_irq_handle_error(trans);
	}

	/* After checking FH register check HW register */
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans,
			      "ISR inta_hw 0x%08x, enabled 0x%08x\n",
			      inta_hw,
			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));

	/* Alive notification via Rx interrupt will do the real work */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
		isr_stats->alive++;
2096 2097 2098 2099
		if (trans->cfg->gen2) {
			/* We can restock, since firmware configured the RFH */
			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
		}
2100 2101
	}

2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
	    inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
		/* Reflect IML transfer status */
		int res = iwl_read32(trans, CSR_IML_RESP_ADDR);

		IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
		if (res == IWL_IMAGE_RESP_FAIL) {
			isr_stats->sw++;
			iwl_pcie_irq_handle_error(trans);
		}
	} else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
		/* uCode wakes up after power-down sleep */
2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
		iwl_pcie_rxq_check_wrptr(trans);
		iwl_pcie_txq_check_wrptrs(trans);

		isr_stats->wakeup++;
	}

	/* Chip got too hot and stopped itself */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
		isr_stats->ctkill++;
	}

	/* HW RF KILL switch toggled */
2128 2129
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
		iwl_pcie_handle_rfkill_irq(trans);
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144

	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
		IWL_ERR(trans,
			"Hardware error detected. Restarting.\n");

		isr_stats->hw++;
		iwl_pcie_irq_handle_error(trans);
	}

	iwl_pcie_clear_irq(trans, entry);

	lock_map_release(&trans->sync_cmd_lockdep_map);

	return IRQ_HANDLED;
}