rx.c 59.6 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30 31 32
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
33
#include <linux/gfp.h>
34

35
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "internal.h"
38
#include "iwl-op-mode.h"
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
 *   When the interrupt handler is called, the request is processed.
 *   The page is either stolen - transferred to the upper layer
 *   or reused - added immediately to the iwl->rxq->rx_free list.
 * + When the page is stolen - the driver updates the matching queue's used
 *   count, detaches the RBD and transfers it to the queue used list.
 *   When there are two used RBDs - they are transferred to the allocator empty
 *   list. Work is then scheduled for the allocator to start allocating
 *   eight buffers.
 *   When there are another 6 used RBDs - they are transferred to the allocator
 *   empty list and the driver tries to claim the pre-allocated buffers and
 *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
 *   until ready.
 *   When there are 8+ buffers in the free list - either from allocation or from
 *   8 reused unstolen pages - restock is called to update the FW and indexes.
 * + In order to make sure the allocator always has RBDs to use for allocation
 *   the allocator has initial pool in the size of num_queues*(8-2) - the
 *   maximum missing RBDs per allocation request (request posted with 2
 *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
 *   The queues supplies the recycle of the rest of the RBDs.
98 99
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
100
 * + If there are no allocated buffers in iwl->rxq->rx_free,
101 102
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
103 104 105 106
 *
 *
 * Driver sequence:
 *
107 108
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
109 110
 *                            iwl_pcie_rxq_restock.
 *                            Used only during initialization.
111
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
112
 *                            queue, updates firmware pointers, and updates
113 114
 *                            the WRITE index.
 * iwl_pcie_rx_allocator()     Background work for allocating pages.
115 116
 *
 * -- enable interrupts --
117
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
118 119
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
120
 *                            Posts and claims requests to the allocator.
121
 *                            Calls iwl_pcie_rxq_restock to refill any empty
122
 *                            slots.
123 124 125 126 127 128 129 130 131 132 133 134
 *
 * RBD life-cycle:
 *
 * Init:
 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
 *
 * Regular Receive interrupt:
 * Page Stolen:
 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
 * Page not Stolen:
 * rxq.queue -> rxq.rx_free -> rxq.queue
135 136 137 138
 * ...
 *
 */

139 140
/*
 * iwl_rxq_space - Return number of free slots available in queue.
141
 */
142
static int iwl_rxq_space(const struct iwl_rxq *rxq)
143
{
144 145
	/* Make sure rx queue size is a power of 2 */
	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
146

147 148 149 150 151 152
	/*
	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
	 * between empty and completely full queues.
	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
	 * defined for negative dividends.
	 */
153
	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
154 155
}

156 157 158 159 160 161 162 163
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

164 165 166
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
167 168
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
169 170 171 172 173 174 175 176 177 178
	if (trans->cfg->mq_rx_supported) {
		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
	} else {
		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
					   1000);
	}
179 180
}

181 182
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
183
 */
184 185
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
186 187 188
{
	u32 reg;

189
	lockdep_assert_held(&rxq->lock);
190

191 192 193 194 195 196 197 198 199 200 201 202 203 204
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
				       reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
205 206
			rxq->need_update = true;
			return;
207 208
		}
	}
209 210

	rxq->write_actual = round_down(rxq->write, 8);
211
	if (trans->cfg->mq_rx_supported)
212 213
		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
			    rxq->write_actual);
214 215
	else
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
216 217 218 219 220
}

static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
221
	int i;
222

223 224
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
225

226 227 228 229 230 231 232
		if (!rxq->need_update)
			continue;
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		rxq->need_update = false;
		spin_unlock(&rxq->lock);
	}
233 234
}

235
/*
236
 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
237
 */
238 239
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
				  struct iwl_rxq *rxq)
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
{
	struct iwl_rx_mem_buffer *rxb;

	/*
	 * If the device isn't enabled - no need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
		return;

	spin_lock(&rxq->lock);
	while (rxq->free_count) {
		__le64 *bd = (__le64 *)rxq->bd;

		/* Get next free Rx buffer, remove from free list */
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
262
		rxb->invalid = false;
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
		/* 12 first bits are expected to be empty */
		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
		/* Point to Rx buffer via next RBD in circular buffer */
		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
		rxq->free_count--;
	}
	spin_unlock(&rxq->lock);

	/*
	 * If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8.
	 */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		spin_unlock(&rxq->lock);
	}
}

283
/*
284
 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
285
 */
286 287
static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
				  struct iwl_rxq *rxq)
288 289 290
{
	struct iwl_rx_mem_buffer *rxb;

291 292 293
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
294 295 296
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
297 298
	 * So don't try to restock if the APM has been already stopped.
	 */
299
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
300 301
		return;

302
	spin_lock(&rxq->lock);
303
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
304
		__le32 *bd = (__le32 *)rxq->bd;
305 306 307 308 309
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
310 311 312
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
313
		rxb->invalid = false;
314 315

		/* Point to Rx buffer via next RBD in circular buffer */
316
		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
317 318 319 320
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
321
	spin_unlock(&rxq->lock);
322 323 324 325

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
326
		spin_lock(&rxq->lock);
327
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
328
		spin_unlock(&rxq->lock);
329 330 331
	}
}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	if (trans->cfg->mq_rx_supported)
347
		iwl_pcie_rxmq_restock(trans, rxq);
348
	else
349
		iwl_pcie_rxsq_restock(trans, rxq);
350 351
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
/*
 * iwl_pcie_rx_alloc_page - allocates and returns a page.
 *
 */
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
					   gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct page *page;
	gfp_t gfp_mask = priority;

	if (trans_pcie->rx_page_order > 0)
		gfp_mask |= __GFP_COMP;

	/* Alloc a new receive buffer */
	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
	if (!page) {
		if (net_ratelimit())
			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
				       trans_pcie->rx_page_order);
372 373 374
		/*
		 * Issue an error if we don't have enough pre-allocated
		  * buffers.
375
`		 */
376
		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
377
			IWL_CRIT(trans,
378
				 "Failed to alloc_pages\n");
379 380 381 382 383
		return NULL;
	}
	return page;
}

384
/*
385
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
386
 *
387 388 389
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
390
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
391
 * allocated buffers.
392
 */
393 394
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
				   struct iwl_rxq *rxq)
395
{
396
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
397 398 399 400
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;

	while (1) {
401
		spin_lock(&rxq->lock);
402
		if (list_empty(&rxq->rx_used)) {
403
			spin_unlock(&rxq->lock);
404 405
			return;
		}
406
		spin_unlock(&rxq->lock);
407 408

		/* Alloc a new receive buffer */
409 410
		page = iwl_pcie_rx_alloc_page(trans, priority);
		if (!page)
411 412
			return;

413
		spin_lock(&rxq->lock);
414 415

		if (list_empty(&rxq->rx_used)) {
416
			spin_unlock(&rxq->lock);
417
			__free_pages(page, trans_pcie->rx_page_order);
418 419
			return;
		}
420 421 422
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
423
		spin_unlock(&rxq->lock);
424 425 426 427

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
428 429 430 431
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
432 433
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
434
			spin_lock(&rxq->lock);
435
			list_add(&rxb->list, &rxq->rx_used);
436
			spin_unlock(&rxq->lock);
437 438 439
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
440

441
		spin_lock(&rxq->lock);
442 443 444 445

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

446
		spin_unlock(&rxq->lock);
447 448 449
	}
}

450
static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
451 452 453 454
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

455
	for (i = 0; i < RX_POOL_SIZE; i++) {
456
		if (!trans_pcie->rx_pool[i].page)
457
			continue;
458
		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
459 460
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
461 462 463
		__free_pages(trans_pcie->rx_pool[i].page,
			     trans_pcie->rx_page_order);
		trans_pcie->rx_pool[i].page = NULL;
464 465 466
	}
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
490
		LIST_HEAD(local_allocated);
491 492 493 494 495
		gfp_t gfp_mask = GFP_KERNEL;

		/* Do not post a warning if there are only a few requests */
		if (pending < RX_PENDING_WATERMARK)
			gfp_mask |= __GFP_NOWARN;
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
513
			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}

/*
558
 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
559 560 561
.*
.* Called by queue when the queue posted allocation request and
 * has freed 8 RBDs in order to restock itself.
562 563
 * This function directly moves the allocated RBs to the queue's ownership
 * and updates the relevant counters.
564
 */
565 566
static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
				      struct iwl_rxq *rxq)
567 568 569 570 571
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

572 573
	lockdep_assert_held(&rxq->lock);

574 575 576
	/*
	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
577
	 * function will return early, as there are no ready requests.
578 579 580 581 582
	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
	 * req_ready > 0, i.e. - there are ready requests and the function
	 * hands one request to the caller.
	 */
	if (atomic_dec_if_positive(&rba->req_ready) < 0)
583
		return;
584 585 586 587

	spin_lock(&rba->lock);
	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
		/* Get next free Rx buffer, remove it from free list */
588 589 590 591 592
		struct iwl_rx_mem_buffer *rxb =
			list_first_entry(&rba->rbd_allocated,
					 struct iwl_rx_mem_buffer, list);

		list_move(&rxb->list, &rxq->rx_free);
593 594 595
	}
	spin_unlock(&rba->lock);

596 597
	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
	rxq->free_count += RX_CLAIM_REQ_ALLOC;
598 599
}

600
void iwl_pcie_rx_allocator_work(struct work_struct *data)
601
{
602 603
	struct iwl_rb_allocator *rba_p =
		container_of(data, struct iwl_rb_allocator, rx_alloc);
604
	struct iwl_trans_pcie *trans_pcie =
605
		container_of(rba_p, struct iwl_trans_pcie, rba);
606

607
	iwl_pcie_rx_allocator(trans_pcie->trans);
608 609
}

610 611 612
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
613
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
614
	struct device *dev = trans->dev;
615
	int i;
616 617
	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
						      sizeof(__le32);
618

619 620 621 622 623 624 625
	if (WARN_ON(trans_pcie->rxq))
		return -EINVAL;

	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
				  GFP_KERNEL);
	if (!trans_pcie->rxq)
		return -EINVAL;
626

627
	spin_lock_init(&rba->lock);
628

629 630
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
631

632
		spin_lock_init(&rxq->lock);
633 634 635 636 637
		if (trans->cfg->mq_rx_supported)
			rxq->queue_size = MQ_RX_TABLE_SIZE;
		else
			rxq->queue_size = RX_QUEUE_SIZE;

638 639 640 641 642
		/*
		 * Allocate the circular buffer of Read Buffer Descriptors
		 * (RBDs)
		 */
		rxq->bd = dma_zalloc_coherent(dev,
643 644
					     free_size * rxq->queue_size,
					     &rxq->bd_dma, GFP_KERNEL);
645 646
		if (!rxq->bd)
			goto err;
647

648 649 650 651 652 653 654 655 656
		if (trans->cfg->mq_rx_supported) {
			rxq->used_bd = dma_zalloc_coherent(dev,
							   sizeof(__le32) *
							   rxq->queue_size,
							   &rxq->used_bd_dma,
							   GFP_KERNEL);
			if (!rxq->used_bd)
				goto err;
		}
657

658 659 660 661 662 663 664
		/*Allocate the driver's pointer to receive buffer status */
		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
						   &rxq->rb_stts_dma,
						   GFP_KERNEL);
		if (!rxq->rb_stts)
			goto err;
	}
665 666
	return 0;

667 668 669 670 671
err:
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		if (rxq->bd)
672
			dma_free_coherent(dev, free_size * rxq->queue_size,
673 674 675 676 677 678 679 680
					  rxq->bd, rxq->bd_dma);
		rxq->bd_dma = 0;
		rxq->bd = NULL;

		if (rxq->rb_stts)
			dma_free_coherent(trans->dev,
					  sizeof(struct iwl_rb_status),
					  rxq->rb_stts, rxq->rb_stts_dma);
681 682 683 684 685 686

		if (rxq->used_bd)
			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
					  rxq->used_bd, rxq->used_bd_dma);
		rxq->used_bd_dma = 0;
		rxq->used_bd = NULL;
687 688
	}
	kfree(trans_pcie->rxq);
689

690
	return -ENOMEM;
691 692
}

693 694 695 696
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
697
	unsigned long flags;
698 699
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

700 701 702 703 704
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
705
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
706 707 708 709 710 711
		break;
	case IWL_AMSDU_12K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
712
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
713
	}
714

715 716 717
	if (!iwl_trans_grab_nic_access(trans, &flags))
		return;

718
	/* Stop Rx DMA */
719
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
720
	/* reset and flush pointers */
721 722 723
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
724 725

	/* Reset driver's Rx queue write index */
726
	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
727 728

	/* Tell device where to find RBD circular buffer in DRAM */
729 730
	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
		    (u32)(rxq->bd_dma >> 8));
731 732

	/* Tell device where in DRAM to update its Rx status */
733 734
	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
		    rxq->rb_stts_dma >> 4);
735 736 737 738 739

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
740
	 * Rx buffer size 4 or 8k or 12k
741 742 743
	 * RB timeout 0x10
	 * 256 RBDs
	 */
744 745 746 747 748 749 750 751 752
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
		    rb_size |
		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	iwl_trans_release_nic_access(trans, &flags);
753 754 755

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
756 757 758 759

	/* W/A for interrupt coalescing bug in 7260 and 3160 */
	if (trans->cfg->host_interrupt_operation_mode)
		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
760 761
}

762 763
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
{
764 765 766 767 768 769 770 771 772
	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
		return;

	if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
		return;

	if (!trans->cfg->integrated)
		return;

773 774 775 776 777 778 779
	/*
	 * Turn on the chicken-bits that cause MAC wakeup for RX-related
	 * values.
	 * This costs some power, but needed for W/A 9000 integrated A-step
	 * bug where shadow registers are not in the retention list and their
	 * value is lost when NIC powers down
	 */
780 781 782 783
	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
		    CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
		    CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
784 785
}

786
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
787
{
788 789
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size, enabled = 0;
790
	unsigned long flags;
791
	int i;
792

793 794 795 796 797 798 799 800 801 802 803 804 805 806
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
		break;
	case IWL_AMSDU_12K:
		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
	}
807

808 809 810
	if (!iwl_trans_grab_nic_access(trans, &flags))
		return;

811
	/* Stop Rx DMA */
812
	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
813
	/* disable free amd used rx queue operation */
814
	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
815

816 817
	for (i = 0; i < trans->num_rx_queues; i++) {
		/* Tell device where to find RBD free table in DRAM */
818 819 820
		iwl_write_prph64_no_grab(trans,
					 RFH_Q_FRBDCB_BA_LSB(i),
					 trans_pcie->rxq[i].bd_dma);
821
		/* Tell device where to find RBD used table in DRAM */
822 823 824
		iwl_write_prph64_no_grab(trans,
					 RFH_Q_URBDCB_BA_LSB(i),
					 trans_pcie->rxq[i].used_bd_dma);
825
		/* Tell device where in DRAM to update its Rx status */
826 827 828
		iwl_write_prph64_no_grab(trans,
					 RFH_Q_URBD_STTS_WPTR_LSB(i),
					 trans_pcie->rxq[i].rb_stts_dma);
829
		/* Reset device indice tables */
830 831 832
		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
833 834 835

		enabled |= BIT(i) | BIT(i + 16);
	}
836

837 838 839 840
	/*
	 * Enable Rx DMA
	 * Rx buffer size 4 or 8k or 12k
	 * Min RB size 4 or 8
841
	 * Drop frames that exceed RB size
842 843
	 * 512 RBDs
	 */
844
	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
845
			       RFH_DMA_EN_ENABLE_VAL | rb_size |
846 847 848
			       RFH_RXF_DMA_MIN_RB_4_8 |
			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
			       RFH_RXF_DMA_RBDCB_SIZE_512);
849

850 851
	/*
	 * Activate DMA snooping.
852
	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
853 854
	 * Default queue is 0
	 */
855 856 857
	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
			       RFH_GEN_CFG_RFH_DMA_SNOOP |
			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
858
			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
859 860 861 862
			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
					       trans->cfg->integrated ?
					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
863
	/* Enable the relevant rx queues */
864 865 866
	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);

	iwl_trans_release_nic_access(trans, &flags);
867

868 869
	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
870 871

	iwl_pcie_enable_rx_wake(trans, true);
872 873
}

874
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
875
{
876
	lockdep_assert_held(&rxq->lock);
877

878 879 880 881
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;
	rxq->used_count = 0;
882 883
}

884 885 886 887 888 889
static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
	WARN_ON(1);
	return 0;
}

890
static int _iwl_pcie_rx_init(struct iwl_trans *trans)
891 892
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
893
	struct iwl_rxq *def_rxq;
894
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
895
	int i, err, queue_size, allocator_pool_size, num_alloc;
896

897
	if (!trans_pcie->rxq) {
898 899 900 901
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}
902
	def_rxq = trans_pcie->rxq;
903 904 905 906

	spin_lock(&rba->lock);
	atomic_set(&rba->req_pending, 0);
	atomic_set(&rba->req_ready, 0);
907 908
	INIT_LIST_HEAD(&rba->rbd_allocated);
	INIT_LIST_HEAD(&rba->rbd_empty);
909
	spin_unlock(&rba->lock);
910

911
	/* free all first - we might be reconfigured for a different size */
912
	iwl_pcie_free_rbs_pool(trans);
913 914

	for (i = 0; i < RX_QUEUE_SIZE; i++)
915
		def_rxq->queue[i] = NULL;
916

917 918 919
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

920 921
		rxq->id = i;

922 923 924 925 926 927 928 929 930 931
		spin_lock(&rxq->lock);
		/*
		 * Set read write pointer to reflect that we have processed
		 * and used all buffers, but have not restocked the Rx queue
		 * with fresh buffers
		 */
		rxq->read = 0;
		rxq->write = 0;
		rxq->write_actual = 0;
		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
932

933 934
		iwl_pcie_rx_init_rxb_lists(rxq);

935 936 937 938
		if (!rxq->napi.poll)
			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
				       iwl_pcie_dummy_napi_poll, 64);

939 940
		spin_unlock(&rxq->lock);
	}
941

942
	/* move the pool to the default queue and allocator ownerships */
943 944
	queue_size = trans->cfg->mq_rx_supported ?
		     MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
945 946
	allocator_pool_size = trans->num_rx_queues *
		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
947
	num_alloc = queue_size + allocator_pool_size;
948 949
	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
		     ARRAY_SIZE(trans_pcie->rx_pool));
950
	for (i = 0; i < num_alloc; i++) {
951 952 953 954 955 956 957
		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];

		if (i < allocator_pool_size)
			list_add(&rxb->list, &rba->rbd_empty);
		else
			list_add(&rxb->list, &def_rxq->rx_used);
		trans_pcie->global_table[i] = rxb;
958
		rxb->vid = (u16)(i + 1);
959
		rxb->invalid = true;
960
	}
961

962
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
963

964 965 966 967 968 969 970 971 972 973 974
	return 0;
}

int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int ret = _iwl_pcie_rx_init(trans);

	if (ret)
		return ret;

975
	if (trans->cfg->mq_rx_supported)
976
		iwl_pcie_rx_mq_hw_init(trans);
977
	else
978
		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
979

980
	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
981

982 983 984
	spin_lock(&trans_pcie->rxq->lock);
	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
	spin_unlock(&trans_pcie->rxq->lock);
985 986 987 988

	return 0;
}

989 990 991 992 993 994 995 996 997
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
{
	/*
	 * We don't configure the RFH.
	 * Restock will be done at alive, after firmware configured the RFH.
	 */
	return _iwl_pcie_rx_init(trans);
}

998 999 1000
void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1001
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1002 1003
	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
					      sizeof(__le32);
1004
	int i;
1005

1006 1007 1008 1009 1010
	/*
	 * if rxq is NULL, it means that nothing has been allocated,
	 * exit now
	 */
	if (!trans_pcie->rxq) {
1011 1012 1013 1014
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

1015 1016
	cancel_work_sync(&rba->rx_alloc);

1017 1018 1019 1020 1021 1022 1023
	iwl_pcie_free_rbs_pool(trans);

	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		if (rxq->bd)
			dma_free_coherent(trans->dev,
1024
					  free_size * rxq->queue_size,
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
					  rxq->bd, rxq->bd_dma);
		rxq->bd_dma = 0;
		rxq->bd = NULL;

		if (rxq->rb_stts)
			dma_free_coherent(trans->dev,
					  sizeof(struct iwl_rb_status),
					  rxq->rb_stts, rxq->rb_stts_dma);
		else
			IWL_DEBUG_INFO(trans,
				       "Free rxq->rb_stts which is NULL\n");
1036

1037 1038 1039 1040 1041 1042
		if (rxq->used_bd)
			dma_free_coherent(trans->dev,
					  sizeof(__le32) * rxq->queue_size,
					  rxq->used_bd, rxq->used_bd_dma);
		rxq->used_bd_dma = 0;
		rxq->used_bd = NULL;
1043 1044 1045

		if (rxq->napi.poll)
			netif_napi_del(&rxq->napi);
1046
	}
1047
	kfree(trans_pcie->rxq);
1048 1049
}

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
/*
 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
 *
 * Called when a RBD can be reused. The RBD is transferred to the allocator.
 * When there are 2 empty RBDs - a request for allocation is posted
 */
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
				  struct iwl_rx_mem_buffer *rxb,
				  struct iwl_rxq *rxq, bool emergency)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;

	/* Move the RBD to the used list, will be moved to allocator in batches
	 * before claiming or posting a request*/
	list_add_tail(&rxb->list, &rxq->rx_used);

	if (unlikely(emergency))
		return;

	/* Count the allocator owned RBDs */
	rxq->used_count++;

	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
	 * after but we still need to post another request.
	 */
	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
		/* Move the 2 RBDs to the allocator ownership.
		 Allocator has another 6 from pool for the request completion*/
		spin_lock(&rba->lock);
		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_pending);
		queue_work(rba->alloc_wq, &rba->rx_alloc);
	}
}

1090
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1091
				struct iwl_rxq *rxq,
1092 1093
				struct iwl_rx_mem_buffer *rxb,
				bool emergency)
1094 1095
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1096
	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1097
	bool page_stolen = false;
1098
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1099
	u32 offset = 0;
1100 1101 1102 1103

	if (WARN_ON(!rxb))
		return;

1104 1105 1106 1107 1108 1109
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		u16 sequence;
		bool reclaim;
1110
		int index, cmd_index, len;
1111 1112
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
1113
			._rx_page_order = trans_pcie->rx_page_order,
1114 1115
			._page = rxb->page,
			._page_stolen = false,
1116
			.truesize = max_len,
1117 1118 1119 1120
		};

		pkt = rxb_addr(&rxcb);

1121 1122 1123 1124
		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
			IWL_DEBUG_RX(trans,
				     "Q %d: RB end marker at offset %d\n",
				     rxq->id, offset);
1125
			break;
1126
		}
1127

1128 1129 1130 1131 1132 1133
		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
			FH_RSCSR_RXQ_POS != rxq->id,
		     "frame on invalid queue - is on %d and indicates %d\n",
		     rxq->id,
		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
			FH_RSCSR_RXQ_POS);
1134

1135
		IWL_DEBUG_RX(trans,
1136 1137
			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
			     rxq->id, offset,
1138 1139 1140 1141
			     iwl_get_cmd_string(trans,
						iwl_cmd_id(pkt->hdr.cmd,
							   pkt->hdr.group_id,
							   0)),
1142 1143
			     pkt->hdr.group_id, pkt->hdr.cmd,
			     le16_to_cpu(pkt->hdr.sequence));
1144

1145
		len = iwl_rx_packet_len(pkt);
1146
		len += sizeof(u32); /* account for status word */
1147 1148
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1149 1150 1151 1152 1153 1154 1155 1156

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1157
		if (reclaim && !pkt->hdr.group_id) {
1158 1159 1160 1161 1162 1163 1164 1165
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
1166 1167
			}
		}
1168

1169 1170
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
1171
		cmd_index = get_cmd_index(txq, index);
1172

1173 1174 1175 1176 1177 1178
		if (rxq->id == 0)
			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
				       &rxcb);
		else
			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
					   &rxcb, rxq->id);
1179

1180
		if (reclaim) {
1181
			kzfree(txq->entries[cmd_index].free_buf);
1182
			txq->entries[cmd_index].free_buf = NULL;
1183 1184
		}

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
1196
				iwl_pcie_hcmd_complete(trans, &rxcb);
1197 1198 1199 1200 1201 1202
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1203 1204
	}

1205 1206
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
1207
		__free_pages(rxb->page, trans_pcie->rx_page_order);
1208
		rxb->page = NULL;
1209
	}
1210 1211 1212 1213 1214 1215 1216

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
1217 1218
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
1219 1220 1221 1222 1223 1224 1225 1226
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
1227
			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1228 1229 1230 1231
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
1232
	} else
1233
		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1234 1235
}

1236 1237
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1238
 */
1239
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1240
{
1241
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1242
	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1243
	u32 r, i, count = 0;
1244
	bool emergency = false;
1245

1246 1247
restart:
	spin_lock(&rxq->lock);
1248 1249
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
1250
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1251 1252
	i = rxq->read;

1253 1254 1255
	/* W/A 9000 device step A0 wrap-around bug */
	r &= (rxq->queue_size - 1);

1256 1257
	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
1258
		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1259 1260

	while (i != r) {
1261
		struct iwl_rx_mem_buffer *rxb;
1262

1263
		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1264 1265
			emergency = true;

1266 1267 1268 1269 1270
		if (trans->cfg->mq_rx_supported) {
			/*
			 * used_bd is a 32 bit but only 12 are used to retrieve
			 * the vid
			 */
1271
			u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
1272

1273 1274 1275 1276
			if (WARN(!vid ||
				 vid > ARRAY_SIZE(trans_pcie->global_table),
				 "Invalid rxb index from HW %u\n", (u32)vid)) {
				iwl_force_nmi(trans);
1277
				goto out;
1278 1279
			}
			rxb = trans_pcie->global_table[vid - 1];
1280 1281 1282 1283 1284 1285
			if (WARN(rxb->invalid,
				 "Invalid rxb from HW %u\n", (u32)vid)) {
				iwl_force_nmi(trans);
				goto out;
			}
			rxb->invalid = true;
1286 1287 1288 1289
		} else {
			rxb = rxq->queue[i];
			rxq->queue[i] = NULL;
		}
1290

1291
		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1292
		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1293

1294
		i = (i + 1) & (rxq->queue_size - 1);
1295

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
		/*
		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
		 * try to claim the pre-allocated buffers from the allocator.
		 * If not ready - will try to reclaim next time.
		 * There is no need to reschedule work - allocator exits only
		 * on success
		 */
		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
			iwl_pcie_rx_allocator_get(trans, rxq);

		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1307 1308
			struct iwl_rb_allocator *rba = &trans_pcie->rba;

1309 1310 1311 1312 1313
			/* Add the remaining empty RBDs for allocator use */
			spin_lock(&rba->lock);
			list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
			spin_unlock(&rba->lock);
		} else if (emergency) {
1314
			count++;
1315
			if (count == 8) {
1316
				count = 0;
1317
				if (rxq->used_count < rxq->queue_size / 3)
1318
					emergency = false;
1319 1320

				rxq->read = i;
1321
				spin_unlock(&rxq->lock);
1322
				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1323
				iwl_pcie_rxq_restock(trans, rxq);
1324 1325
				goto restart;
			}
1326
		}
1327
	}
1328
out:
1329 1330
	/* Backtrack one entry */
	rxq->read = i;
1331 1332
	spin_unlock(&rxq->lock);

1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
	/*
	 * handle a case where in emergency there are some unallocated RBDs.
	 * those RBDs are in the used list, but are not tracked by the queue's
	 * used_count which counts allocator owned RBDs.
	 * unallocated emergency RBDs must be allocated on exit, otherwise
	 * when called again the function may not be in emergency mode and
	 * they will be handed to the allocator with no tracking in the RBD
	 * allocator counters, which will lead to them never being claimed back
	 * by the queue.
	 * by allocating them here, they are now in the queue free list, and
	 * will be restocked by the next call of iwl_pcie_rxq_restock.
	 */
	if (unlikely(emergency && count))
1346
		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1347

1348 1349
	if (rxq->napi.poll)
		napi_gro_flush(&rxq->napi, false);
1350 1351

	iwl_pcie_rxq_restock(trans, rxq);
1352 1353
}

1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
{
	u8 queue = entry->entry;
	struct msix_entry *entries = entry - queue;

	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}

static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
				      struct msix_entry *entry)
{
	/*
	 * Before sending the interrupt the HW disables it to prevent
	 * a nested interrupt. This is done by writing 1 to the corresponding
	 * bit in the mask register. After handling the interrupt, it should be
	 * re-enabled by clearing this bit. This register is defined as
	 * write 1 clear (W1C) register, meaning that it's being clear
	 * by writing 1 to the bit.
	 */
1373
	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
}

/*
 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
 * This interrupt handler should be used with RSS queue only.
 */
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
{
	struct msix_entry *entry = dev_id;
	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
	struct iwl_trans *trans = trans_pcie->trans;

1386 1387
	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);

1388 1389 1390
	if (WARN_ON(entry->entry >= trans->num_rx_queues))
		return IRQ_NONE;

1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

	local_bh_disable();
	iwl_pcie_rx_handle(trans, entry->entry);
	local_bh_enable();

	iwl_pcie_clear_irq(trans, entry);

	lock_map_release(&trans->sync_cmd_lockdep_map);

	return IRQ_HANDLED;
}

1404 1405
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1406
 */
1407
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1408
{
1409
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1410
	int i;
1411

1412
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1413
	if (trans->cfg->internal_wimax_coex &&
1414
	    !trans->cfg->apmg_not_supported &&
1415
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1416
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1417
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1418
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1419
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1420
		iwl_op_mode_wimax_active(trans->op_mode);
1421
		wake_up(&trans_pcie->wait_command_queue);
1422 1423 1424
		return;
	}

1425 1426 1427
	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
		if (!trans_pcie->txq[i])
			continue;
1428
		del_timer(&trans_pcie->txq[i]->stuck_timer);
1429
	}
1430

1431 1432 1433 1434
	/* The STATUS_FW_ERROR bit is set in this function. This must happen
	 * before we wake up the command caller, to ensure a proper cleanup. */
	iwl_trans_fw_error(trans);

1435 1436
	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	wake_up(&trans_pcie->wait_command_queue);
1437 1438
}

1439
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1440 1441 1442
{
	u32 inta;

1443
	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1444 1445 1446 1447 1448 1449 1450

	trace_iwlwifi_dev_irq(trans->dev);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(trans, CSR_INT);

	/* the thread will service interrupts and re-enable them */
1451
	return inta;
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
}

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1467
static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 inta;
	u32 val = 0;
	u32 read;

	trace_iwlwifi_dev_irq(trans->dev);

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1481 1482
	if (!read)
		return 0;
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493

	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
				trans_pcie->ict_index, read);
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
1494
			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515

		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
					   read);
	} while (read);

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1516
	return inta;
1517 1518
}

1519
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1520 1521 1522
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1523
	bool hw_rfkill, prev, report;
1524 1525

	mutex_lock(&trans_pcie->mutex);
1526
	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1527
	hw_rfkill = iwl_is_rfkill_set(trans);
1528 1529 1530 1531 1532 1533 1534 1535
	if (hw_rfkill) {
		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
		set_bit(STATUS_RFKILL_HW, &trans->status);
	}
	if (trans_pcie->opmode_down)
		report = hw_rfkill;
	else
		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1536 1537 1538 1539 1540 1541

	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
		 hw_rfkill ? "disable radio" : "enable radio");

	isr_stats->rfkill++;

1542 1543
	if (prev != report)
		iwl_trans_pcie_rf_kill(trans, report);
1544 1545 1546 1547 1548 1549 1550 1551 1552
	mutex_unlock(&trans_pcie->mutex);

	if (hw_rfkill) {
		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
				       &trans->status))
			IWL_DEBUG_RF_KILL(trans,
					  "Rfkill while SYNC HCMD in flight\n");
		wake_up(&trans_pcie->wait_command_queue);
	} else {
1553 1554 1555
		clear_bit(STATUS_RFKILL_HW, &trans->status);
		if (trans_pcie->opmode_down)
			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1556 1557 1558
	}
}

1559
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1560
{
1561
	struct iwl_trans *trans = dev_id;
1562 1563
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1564 1565 1566
	u32 inta = 0;
	u32 handled = 0;

1567 1568
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

1569
	spin_lock(&trans_pcie->irq_lock);
1570

1571 1572 1573 1574
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
	if (likely(trans_pcie->use_ict))
1575
		inta = iwl_pcie_int_cause_ict(trans);
1576
	else
1577
		inta = iwl_pcie_int_cause_non_ict(trans);
1578

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	if (iwl_have_debug_level(IWL_DL_ISR)) {
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
			      inta, trans_pcie->inta_mask,
			      iwl_read32(trans, CSR_INT_MASK),
			      iwl_read32(trans, CSR_FH_INT_STATUS));
		if (inta & (~trans_pcie->inta_mask))
			IWL_DEBUG_ISR(trans,
				      "We got a masked interrupt (0x%08x)\n",
				      inta & (~trans_pcie->inta_mask));
	}

	inta &= trans_pcie->inta_mask;

	/*
	 * Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC.
	 */
1598
	if (unlikely(!inta)) {
1599 1600 1601 1602 1603 1604
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		/*
		 * Re-enable interrupts here since we don't
		 * have anything to service
		 */
		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1605
			_iwl_enable_interrupts(trans);
1606
		spin_unlock(&trans_pcie->irq_lock);
1607 1608 1609 1610
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

1611 1612 1613 1614 1615 1616
	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/*
		 * Hardware disappeared. It might have
		 * already raised an interrupt.
		 */
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1617
		spin_unlock(&trans_pcie->irq_lock);
1618
		goto out;
1619 1620
	}

1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
1632
	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1633

1634
	if (iwl_have_debug_level(IWL_DL_ISR))
1635
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1636
			      inta, iwl_read32(trans, CSR_INT_MASK));
1637

1638
	spin_unlock(&trans_pcie->irq_lock);
1639

1640 1641
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
1642
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1643 1644

		/* Tell the device to stop sending interrupts */
1645
		iwl_disable_interrupts(trans);
1646

1647
		isr_stats->hw++;
1648
		iwl_pcie_irq_handle_error(trans);
1649 1650 1651

		handled |= CSR_INT_BIT_HW_ERR;

1652
		goto out;
1653 1654
	}

1655
	if (iwl_have_debug_level(IWL_DL_ISR)) {
1656 1657
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
1658 1659
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
1660
			isr_stats->sch++;
1661 1662 1663 1664
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
1665
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1666
			isr_stats->alive++;
1667 1668 1669 1670 1671 1672 1673
			if (trans->cfg->gen2) {
				/*
				 * We can restock, since firmware configured
				 * the RFH
				 */
				iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
			}
1674 1675
		}
	}
1676

1677 1678 1679 1680 1681
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
1682
		iwl_pcie_handle_rfkill_irq(trans);
1683 1684 1685 1686 1687
		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
1688
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1689
		isr_stats->ctkill++;
1690 1691 1692 1693 1694
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
1695
		IWL_ERR(trans, "Microcode SW error detected. "
1696
			" Restarting 0x%X.\n", inta);
1697
		isr_stats->sw++;
1698
		iwl_pcie_irq_handle_error(trans);
1699 1700 1701 1702 1703
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
1704
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1705
		iwl_pcie_rxq_check_wrptr(trans);
1706
		iwl_pcie_txq_check_wrptrs(trans);
1707

1708
		isr_stats->wakeup++;
1709 1710 1711 1712 1713 1714 1715 1716

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1717
		    CSR_INT_BIT_RX_PERIODIC)) {
1718
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1719 1720
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1721
			iwl_write32(trans, CSR_FH_INT_STATUS,
1722 1723 1724 1725
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
1726
			iwl_write32(trans,
1727
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
1741
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1742
			    CSR_INT_PERIODIC_DIS);
1743

1744 1745 1746 1747 1748 1749 1750 1751
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1752
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1753
				   CSR_INT_PERIODIC_ENA);
1754

1755
		isr_stats->rx++;
1756 1757

		local_bh_disable();
1758
		iwl_pcie_rx_handle(trans, 0);
1759
		local_bh_enable();
1760 1761 1762 1763
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
1764
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1765
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1766
		isr_stats->tx++;
1767 1768
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
1769 1770
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
1771 1772 1773
	}

	if (inta & ~handled) {
1774
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1775
		isr_stats->unhandled++;
1776 1777
	}

1778 1779 1780
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
1781 1782
	}

1783 1784 1785 1786
	spin_lock(&trans_pcie->irq_lock);
	/* only Re-enable all interrupt if disabled by irq */
	if (test_bit(STATUS_INT_ENABLED, &trans->status))
		_iwl_enable_interrupts(trans);
1787
	/* we are loading the firmware, enable FH_TX interrupt only */
1788
	else if (handled & CSR_INT_BIT_FH_TX)
1789
		iwl_enable_fw_load_int(trans);
1790
	/* Re-enable RF_KILL if it occurred */
1791 1792
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1793
	spin_unlock(&trans_pcie->irq_lock);
1794 1795 1796 1797

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1798 1799
}

1800 1801 1802 1803 1804
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1805

1806
/* Free dram table */
1807
void iwl_pcie_free_ict(struct iwl_trans *trans)
1808
{
1809
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1810

1811
	if (trans_pcie->ict_tbl) {
1812
		dma_free_coherent(trans->dev, ICT_SIZE,
1813
				  trans_pcie->ict_tbl,
1814
				  trans_pcie->ict_tbl_dma);
1815 1816
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1817 1818 1819
	}
}

1820 1821 1822
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1823 1824
 * also reset all data related to ICT table interrupt.
 */
1825
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1826
{
1827
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1828

1829
	trans_pcie->ict_tbl =
1830
		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1831 1832 1833
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1834 1835
		return -ENOMEM;

1836 1837
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1838
		iwl_pcie_free_ict(trans);
1839 1840
		return -EINVAL;
	}
1841 1842 1843 1844 1845 1846 1847

	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1848
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1849
{
1850
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1851 1852
	u32 val;

1853
	if (!trans_pcie->ict_tbl)
1854
		return;
1855

1856
	spin_lock(&trans_pcie->irq_lock);
1857
	_iwl_disable_interrupts(trans);
1858

1859
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1860

1861
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1862

1863 1864 1865
	val |= CSR_DRAM_INT_TBL_ENABLE |
	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1866

1867
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1868

1869
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1870 1871
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1872
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1873
	_iwl_enable_interrupts(trans);
1874
	spin_unlock(&trans_pcie->irq_lock);
1875 1876 1877
}

/* Device is going down disable ict interrupt usage */
1878
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1879
{
1880
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1881

1882
	spin_lock(&trans_pcie->irq_lock);
1883
	trans_pcie->use_ict = false;
1884
	spin_unlock(&trans_pcie->irq_lock);
1885 1886
}

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
irqreturn_t iwl_pcie_isr(int irq, void *data)
{
	struct iwl_trans *trans = data;

	if (!trans)
		return IRQ_NONE;

	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);

1901
	return IRQ_WAKE_THREAD;
1902
}
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913

irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
{
	return IRQ_WAKE_THREAD;
}

irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
{
	struct msix_entry *entry = dev_id;
	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
	struct iwl_trans *trans = trans_pcie->trans;
1914
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1915 1916 1917 1918 1919
	u32 inta_fh, inta_hw;

	lock_map_acquire(&trans->sync_cmd_lockdep_map);

	spin_lock(&trans_pcie->irq_lock);
1920 1921
	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
1922 1923 1924
	/*
	 * Clear causes registers to avoid being handling the same cause.
	 */
1925 1926
	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
1927 1928
	spin_unlock(&trans_pcie->irq_lock);

1929 1930
	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);

1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
	if (unlikely(!(inta_fh | inta_hw))) {
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
			      inta_fh,
			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));

1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
		local_bh_disable();
		iwl_pcie_rx_handle(trans, 0);
		local_bh_enable();
	}

	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
		local_bh_disable();
		iwl_pcie_rx_handle(trans, 1);
		local_bh_enable();
	}

1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
		isr_stats->tx++;
		/*
		 * Wake up uCode load routine,
		 * now that load is complete
		 */
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
	}

	/* Error detected by uCode */
	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
		IWL_ERR(trans,
			"Microcode SW error detected. Restarting 0x%X.\n",
			inta_fh);
		isr_stats->sw++;
		iwl_pcie_irq_handle_error(trans);
	}

	/* After checking FH register check HW register */
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans,
			      "ISR inta_hw 0x%08x, enabled 0x%08x\n",
			      inta_hw,
			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));

	/* Alive notification via Rx interrupt will do the real work */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
		isr_stats->alive++;
1989 1990 1991 1992
		if (trans->cfg->gen2) {
			/* We can restock, since firmware configured the RFH */
			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
		}
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	}

	/* uCode wakes up after power-down sleep */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
		iwl_pcie_rxq_check_wrptr(trans);
		iwl_pcie_txq_check_wrptrs(trans);

		isr_stats->wakeup++;
	}

	/* Chip got too hot and stopped itself */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
		isr_stats->ctkill++;
	}

	/* HW RF KILL switch toggled */
2011 2012
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
		iwl_pcie_handle_rfkill_irq(trans);
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027

	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
		IWL_ERR(trans,
			"Hardware error detected. Restarting.\n");

		isr_stats->hw++;
		iwl_pcie_irq_handle_error(trans);
	}

	iwl_pcie_clear_irq(trans, entry);

	lock_map_release(&trans->sync_cmd_lockdep_map);

	return IRQ_HANDLED;
}
新手
引导
客服 返回
顶部