rx.c 57.5 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5
 * Copyright(c) 2016 Intel Deutschland GmbH
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30 31 32
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
33
#include <linux/gfp.h>
34

35
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "internal.h"
38
#include "iwl-op-mode.h"
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
 *   When the interrupt handler is called, the request is processed.
 *   The page is either stolen - transferred to the upper layer
 *   or reused - added immediately to the iwl->rxq->rx_free list.
 * + When the page is stolen - the driver updates the matching queue's used
 *   count, detaches the RBD and transfers it to the queue used list.
 *   When there are two used RBDs - they are transferred to the allocator empty
 *   list. Work is then scheduled for the allocator to start allocating
 *   eight buffers.
 *   When there are another 6 used RBDs - they are transferred to the allocator
 *   empty list and the driver tries to claim the pre-allocated buffers and
 *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
 *   until ready.
 *   When there are 8+ buffers in the free list - either from allocation or from
 *   8 reused unstolen pages - restock is called to update the FW and indexes.
 * + In order to make sure the allocator always has RBDs to use for allocation
 *   the allocator has initial pool in the size of num_queues*(8-2) - the
 *   maximum missing RBDs per allocation request (request posted with 2
 *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
 *   The queues supplies the recycle of the rest of the RBDs.
98 99
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
100
 * + If there are no allocated buffers in iwl->rxq->rx_free,
101 102
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
103 104 105 106
 *
 *
 * Driver sequence:
 *
107 108
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
109 110
 *                            iwl_pcie_rxq_restock.
 *                            Used only during initialization.
111
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
112
 *                            queue, updates firmware pointers, and updates
113 114
 *                            the WRITE index.
 * iwl_pcie_rx_allocator()     Background work for allocating pages.
115 116
 *
 * -- enable interrupts --
117
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
118 119
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
120
 *                            Posts and claims requests to the allocator.
121
 *                            Calls iwl_pcie_rxq_restock to refill any empty
122
 *                            slots.
123 124 125 126 127 128 129 130 131 132 133 134
 *
 * RBD life-cycle:
 *
 * Init:
 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
 *
 * Regular Receive interrupt:
 * Page Stolen:
 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
 * Page not Stolen:
 * rxq.queue -> rxq.rx_free -> rxq.queue
135 136 137 138
 * ...
 *
 */

139 140
/*
 * iwl_rxq_space - Return number of free slots available in queue.
141
 */
142
static int iwl_rxq_space(const struct iwl_rxq *rxq)
143
{
144 145
	/* Make sure rx queue size is a power of 2 */
	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
146

147 148 149 150 151 152
	/*
	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
	 * between empty and completely full queues.
	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
	 * defined for negative dividends.
	 */
153
	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
154 155
}

156 157 158 159 160 161 162 163
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

164 165
static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
					   u64 val)
166
{
167 168
	iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
	iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
169 170
}

171 172 173
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
174 175 176 177 178 179 180
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

181 182
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
183
 */
184 185
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
186 187 188
{
	u32 reg;

189
	lockdep_assert_held(&rxq->lock);
190

191 192 193 194 195 196 197 198 199 200 201 202 203 204
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
				       reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
205 206
			rxq->need_update = true;
			return;
207 208
		}
	}
209 210

	rxq->write_actual = round_down(rxq->write, 8);
211
	if (trans->cfg->mq_rx_supported)
212 213
		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
			    rxq->write_actual);
214 215 216 217 218 219
	/*
	 * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
	 * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
	 * not wake the NIC.
	 */
	iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
220 221 222 223 224
}

static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
225
	int i;
226

227 228
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
229

230 231 232 233 234 235 236
		if (!rxq->need_update)
			continue;
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		rxq->need_update = false;
		spin_unlock(&rxq->lock);
	}
237 238
}

239 240 241
/*
 * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx
 */
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
{
	struct iwl_rx_mem_buffer *rxb;

	/*
	 * If the device isn't enabled - no need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
		return;

	spin_lock(&rxq->lock);
	while (rxq->free_count) {
		__le64 *bd = (__le64 *)rxq->bd;

		/* Get next free Rx buffer, remove from free list */
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);

		/* 12 first bits are expected to be empty */
		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
		/* Point to Rx buffer via next RBD in circular buffer */
		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
		rxq->free_count--;
	}
	spin_unlock(&rxq->lock);

	/*
	 * If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8.
	 */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		spin_unlock(&rxq->lock);
	}
}

287
/*
288
 * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx
289
 */
290 291
static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
292 293 294
{
	struct iwl_rx_mem_buffer *rxb;

295 296 297
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
298 299 300
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
301 302
	 * So don't try to restock if the APM has been already stopped.
	 */
303
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
304 305
		return;

306
	spin_lock(&rxq->lock);
307
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
308
		__le32 *bd = (__le32 *)rxq->bd;
309 310 311 312 313
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
314 315 316
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
317 318

		/* Point to Rx buffer via next RBD in circular buffer */
319
		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
320 321 322 323
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
324
	spin_unlock(&rxq->lock);
325 326 327 328

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
329
		spin_lock(&rxq->lock);
330
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
331
		spin_unlock(&rxq->lock);
332 333 334
	}
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	if (trans->cfg->mq_rx_supported)
		iwl_pcie_rxq_mq_restock(trans, rxq);
	else
		iwl_pcie_rxq_sq_restock(trans, rxq);
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
/*
 * iwl_pcie_rx_alloc_page - allocates and returns a page.
 *
 */
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
					   gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct page *page;
	gfp_t gfp_mask = priority;

	if (trans_pcie->rx_page_order > 0)
		gfp_mask |= __GFP_COMP;

	/* Alloc a new receive buffer */
	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
	if (!page) {
		if (net_ratelimit())
			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
				       trans_pcie->rx_page_order);
375 376 377
		/*
		 * Issue an error if we don't have enough pre-allocated
		  * buffers.
378
`		 */
379
		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
380
			IWL_CRIT(trans,
381
				 "Failed to alloc_pages\n");
382 383 384 385 386
		return NULL;
	}
	return page;
}

387
/*
388
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
389
 *
390 391 392
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
393
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
394
 * allocated buffers.
395
 */
396 397
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
				   struct iwl_rxq *rxq)
398
{
399
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
400 401 402 403
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;

	while (1) {
404
		spin_lock(&rxq->lock);
405
		if (list_empty(&rxq->rx_used)) {
406
			spin_unlock(&rxq->lock);
407 408
			return;
		}
409
		spin_unlock(&rxq->lock);
410 411

		/* Alloc a new receive buffer */
412 413
		page = iwl_pcie_rx_alloc_page(trans, priority);
		if (!page)
414 415
			return;

416
		spin_lock(&rxq->lock);
417 418

		if (list_empty(&rxq->rx_used)) {
419
			spin_unlock(&rxq->lock);
420
			__free_pages(page, trans_pcie->rx_page_order);
421 422
			return;
		}
J
Johannes Berg 已提交
423 424 425
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
426
		spin_unlock(&rxq->lock);
427 428 429 430

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
431 432 433 434
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
435 436
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
437
			spin_lock(&rxq->lock);
438
			list_add(&rxb->list, &rxq->rx_used);
439
			spin_unlock(&rxq->lock);
440 441 442
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
443

444
		spin_lock(&rxq->lock);
445 446 447 448

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

449
		spin_unlock(&rxq->lock);
450 451 452
	}
}

453
static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
454 455 456 457
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

458
	for (i = 0; i < RX_POOL_SIZE; i++) {
459
		if (!trans_pcie->rx_pool[i].page)
460
			continue;
461
		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
462 463
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
464 465 466
		__free_pages(trans_pcie->rx_pool[i].page,
			     trans_pcie->rx_page_order);
		trans_pcie->rx_pool[i].page = NULL;
467 468 469
	}
}

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
		struct list_head local_allocated;
494 495 496 497 498
		gfp_t gfp_mask = GFP_KERNEL;

		/* Do not post a warning if there are only a few requests */
		if (pending < RX_PENDING_WATERMARK)
			gfp_mask |= __GFP_NOWARN;
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517

		INIT_LIST_HEAD(&local_allocated);

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
518
			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}

/*
563
 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
564 565 566
.*
.* Called by queue when the queue posted allocation request and
 * has freed 8 RBDs in order to restock itself.
567 568
 * This function directly moves the allocated RBs to the queue's ownership
 * and updates the relevant counters.
569
 */
570 571
static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
				      struct iwl_rxq *rxq)
572 573 574 575 576
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

577 578
	lockdep_assert_held(&rxq->lock);

579 580 581
	/*
	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
582
	 * function will return early, as there are no ready requests.
583 584 585 586 587
	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
	 * req_ready > 0, i.e. - there are ready requests and the function
	 * hands one request to the caller.
	 */
	if (atomic_dec_if_positive(&rba->req_ready) < 0)
588
		return;
589 590 591 592

	spin_lock(&rba->lock);
	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
		/* Get next free Rx buffer, remove it from free list */
593 594 595 596 597
		struct iwl_rx_mem_buffer *rxb =
			list_first_entry(&rba->rbd_allocated,
					 struct iwl_rx_mem_buffer, list);

		list_move(&rxb->list, &rxq->rx_free);
598 599 600
	}
	spin_unlock(&rba->lock);

601 602
	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
	rxq->free_count += RX_CLAIM_REQ_ALLOC;
603 604 605
}

static void iwl_pcie_rx_allocator_work(struct work_struct *data)
606
{
607 608
	struct iwl_rb_allocator *rba_p =
		container_of(data, struct iwl_rb_allocator, rx_alloc);
609
	struct iwl_trans_pcie *trans_pcie =
610
		container_of(rba_p, struct iwl_trans_pcie, rba);
611

612
	iwl_pcie_rx_allocator(trans_pcie->trans);
613 614
}

615 616 617
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
618
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
619
	struct device *dev = trans->dev;
620
	int i;
621 622
	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
						      sizeof(__le32);
623

624 625 626 627 628 629 630
	if (WARN_ON(trans_pcie->rxq))
		return -EINVAL;

	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
				  GFP_KERNEL);
	if (!trans_pcie->rxq)
		return -EINVAL;
631

632
	spin_lock_init(&rba->lock);
633

634 635
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
636

637
		spin_lock_init(&rxq->lock);
638 639 640 641 642
		if (trans->cfg->mq_rx_supported)
			rxq->queue_size = MQ_RX_TABLE_SIZE;
		else
			rxq->queue_size = RX_QUEUE_SIZE;

643 644 645 646 647
		/*
		 * Allocate the circular buffer of Read Buffer Descriptors
		 * (RBDs)
		 */
		rxq->bd = dma_zalloc_coherent(dev,
648 649
					     free_size * rxq->queue_size,
					     &rxq->bd_dma, GFP_KERNEL);
650 651
		if (!rxq->bd)
			goto err;
652

653 654 655 656 657 658 659 660 661
		if (trans->cfg->mq_rx_supported) {
			rxq->used_bd = dma_zalloc_coherent(dev,
							   sizeof(__le32) *
							   rxq->queue_size,
							   &rxq->used_bd_dma,
							   GFP_KERNEL);
			if (!rxq->used_bd)
				goto err;
		}
662

663 664 665 666 667 668 669
		/*Allocate the driver's pointer to receive buffer status */
		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
						   &rxq->rb_stts_dma,
						   GFP_KERNEL);
		if (!rxq->rb_stts)
			goto err;
	}
670 671
	return 0;

672 673 674 675 676
err:
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		if (rxq->bd)
677
			dma_free_coherent(dev, free_size * rxq->queue_size,
678 679 680 681 682 683 684 685
					  rxq->bd, rxq->bd_dma);
		rxq->bd_dma = 0;
		rxq->bd = NULL;

		if (rxq->rb_stts)
			dma_free_coherent(trans->dev,
					  sizeof(struct iwl_rb_status),
					  rxq->rb_stts, rxq->rb_stts_dma);
686 687 688 689 690 691

		if (rxq->used_bd)
			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
					  rxq->used_bd, rxq->used_bd_dma);
		rxq->used_bd_dma = 0;
		rxq->used_bd = NULL;
692 693
	}
	kfree(trans_pcie->rxq);
694

695
	return -ENOMEM;
696 697
}

698 699 700 701
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
702
	unsigned long flags;
703 704
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

705 706 707 708 709
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
710
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
711 712 713 714 715 716
		break;
	case IWL_AMSDU_12K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
717
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
718
	}
719

720 721 722
	if (!iwl_trans_grab_nic_access(trans, &flags))
		return;

723
	/* Stop Rx DMA */
724
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
725
	/* reset and flush pointers */
726 727 728
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
729 730

	/* Reset driver's Rx queue write index */
731
	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
732 733

	/* Tell device where to find RBD circular buffer in DRAM */
734 735
	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
		    (u32)(rxq->bd_dma >> 8));
736 737

	/* Tell device where in DRAM to update its Rx status */
738 739
	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
		    rxq->rb_stts_dma >> 4);
740 741 742 743 744

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
745
	 * Rx buffer size 4 or 8k or 12k
746 747 748
	 * RB timeout 0x10
	 * 256 RBDs
	 */
749 750 751 752 753 754 755 756 757
	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
		    rb_size |
		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	iwl_trans_release_nic_access(trans, &flags);
758 759 760

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
761 762 763 764

	/* W/A for interrupt coalescing bug in 7260 and 3160 */
	if (trans->cfg->host_interrupt_operation_mode)
		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
765 766
}

767
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
768
{
769 770
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size, enabled = 0;
771
	unsigned long flags;
772
	int i;
773

774 775 776 777 778 779 780 781 782 783 784 785 786 787
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
		break;
	case IWL_AMSDU_12K:
		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
	}
788

789 790 791
	if (!iwl_trans_grab_nic_access(trans, &flags))
		return;

792
	/* Stop Rx DMA */
793
	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
794
	/* disable free amd used rx queue operation */
795
	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
796

797 798
	for (i = 0; i < trans->num_rx_queues; i++) {
		/* Tell device where to find RBD free table in DRAM */
799 800 801
		iwl_pcie_write_prph_64_no_grab(trans,
					       RFH_Q_FRBDCB_BA_LSB(i),
					       trans_pcie->rxq[i].bd_dma);
802
		/* Tell device where to find RBD used table in DRAM */
803 804 805
		iwl_pcie_write_prph_64_no_grab(trans,
					       RFH_Q_URBDCB_BA_LSB(i),
					       trans_pcie->rxq[i].used_bd_dma);
806
		/* Tell device where in DRAM to update its Rx status */
807 808 809
		iwl_pcie_write_prph_64_no_grab(trans,
					       RFH_Q_URBD_STTS_WPTR_LSB(i),
					       trans_pcie->rxq[i].rb_stts_dma);
810
		/* Reset device indice tables */
811 812 813
		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
814 815 816

		enabled |= BIT(i) | BIT(i + 16);
	}
817

818 819 820 821 822 823 824 825
	/* restock default queue */
	iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);

	/*
	 * Enable Rx DMA
	 * Single frame mode
	 * Rx buffer size 4 or 8k or 12k
	 * Min RB size 4 or 8
826
	 * Drop frames that exceed RB size
827 828
	 * 512 RBDs
	 */
829 830 831 832 833 834
	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
			       RFH_DMA_EN_ENABLE_VAL |
			       rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
			       RFH_RXF_DMA_MIN_RB_4_8 |
			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
			       RFH_RXF_DMA_RBDCB_SIZE_512);
835

836 837
	/*
	 * Activate DMA snooping.
838
	 * Set RX DMA chunk size to 64B
839 840
	 * Default queue is 0
	 */
841 842 843 844
	iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
			       (DEFAULT_RXQ_NUM <<
				RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
			       RFH_GEN_CFG_SERVICE_DMA_SNOOP);
845
	/* Enable the relevant rx queues */
846 847 848
	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);

	iwl_trans_release_nic_access(trans, &flags);
849

850 851
	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
852 853
}

854
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
855
{
856
	lockdep_assert_held(&rxq->lock);
857

858 859 860 861
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;
	rxq->used_count = 0;
862 863
}

864 865 866 867 868 869
static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
	WARN_ON(1);
	return 0;
}

870 871 872
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
873
	struct iwl_rxq *def_rxq;
874
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
875
	int i, err, queue_size, allocator_pool_size, num_alloc;
876

877
	if (!trans_pcie->rxq) {
878 879 880 881
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}
882
	def_rxq = trans_pcie->rxq;
883 884 885 886 887 888 889 890
	if (!rba->alloc_wq)
		rba->alloc_wq = alloc_workqueue("rb_allocator",
						WQ_HIGHPRI | WQ_UNBOUND, 1);
	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);

	spin_lock(&rba->lock);
	atomic_set(&rba->req_pending, 0);
	atomic_set(&rba->req_ready, 0);
891 892
	INIT_LIST_HEAD(&rba->rbd_allocated);
	INIT_LIST_HEAD(&rba->rbd_empty);
893
	spin_unlock(&rba->lock);
894

895
	/* free all first - we might be reconfigured for a different size */
896
	iwl_pcie_free_rbs_pool(trans);
897 898

	for (i = 0; i < RX_QUEUE_SIZE; i++)
899
		def_rxq->queue[i] = NULL;
900

901 902 903
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

904 905
		rxq->id = i;

906 907 908 909 910 911 912 913 914 915
		spin_lock(&rxq->lock);
		/*
		 * Set read write pointer to reflect that we have processed
		 * and used all buffers, but have not restocked the Rx queue
		 * with fresh buffers
		 */
		rxq->read = 0;
		rxq->write = 0;
		rxq->write_actual = 0;
		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
916

917 918
		iwl_pcie_rx_init_rxb_lists(rxq);

919 920 921 922
		if (!rxq->napi.poll)
			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
				       iwl_pcie_dummy_napi_poll, 64);

923 924
		spin_unlock(&rxq->lock);
	}
925

926
	/* move the pool to the default queue and allocator ownerships */
927 928
	queue_size = trans->cfg->mq_rx_supported ?
		     MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
929 930
	allocator_pool_size = trans->num_rx_queues *
		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
931
	num_alloc = queue_size + allocator_pool_size;
932 933
	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
		     ARRAY_SIZE(trans_pcie->rx_pool));
934
	for (i = 0; i < num_alloc; i++) {
935 936 937 938 939 940 941 942 943
		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];

		if (i < allocator_pool_size)
			list_add(&rxb->list, &rba->rbd_empty);
		else
			list_add(&rxb->list, &def_rxq->rx_used);
		trans_pcie->global_table[i] = rxb;
		rxb->vid = (u16)i;
	}
944

945
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
946
	if (trans->cfg->mq_rx_supported) {
947
		iwl_pcie_rx_mq_hw_init(trans);
948
	} else {
949
		iwl_pcie_rxq_sq_restock(trans, def_rxq);
950 951
		iwl_pcie_rx_hw_init(trans, def_rxq);
	}
952 953 954 955

	spin_lock(&def_rxq->lock);
	iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
	spin_unlock(&def_rxq->lock);
956 957 958 959 960 961 962

	return 0;
}

void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
963
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
964 965
	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
					      sizeof(__le32);
966
	int i;
967

968 969 970 971 972
	/*
	 * if rxq is NULL, it means that nothing has been allocated,
	 * exit now
	 */
	if (!trans_pcie->rxq) {
973 974 975 976
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

977 978 979 980 981 982
	cancel_work_sync(&rba->rx_alloc);
	if (rba->alloc_wq) {
		destroy_workqueue(rba->alloc_wq);
		rba->alloc_wq = NULL;
	}

983 984 985 986 987 988 989
	iwl_pcie_free_rbs_pool(trans);

	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		if (rxq->bd)
			dma_free_coherent(trans->dev,
990
					  free_size * rxq->queue_size,
991 992 993 994 995 996 997 998 999 1000 1001
					  rxq->bd, rxq->bd_dma);
		rxq->bd_dma = 0;
		rxq->bd = NULL;

		if (rxq->rb_stts)
			dma_free_coherent(trans->dev,
					  sizeof(struct iwl_rb_status),
					  rxq->rb_stts, rxq->rb_stts_dma);
		else
			IWL_DEBUG_INFO(trans,
				       "Free rxq->rb_stts which is NULL\n");
1002

1003 1004 1005 1006 1007 1008
		if (rxq->used_bd)
			dma_free_coherent(trans->dev,
					  sizeof(__le32) * rxq->queue_size,
					  rxq->used_bd, rxq->used_bd_dma);
		rxq->used_bd_dma = 0;
		rxq->used_bd = NULL;
1009 1010 1011

		if (rxq->napi.poll)
			netif_napi_del(&rxq->napi);
1012
	}
1013
	kfree(trans_pcie->rxq);
1014 1015
}

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
/*
 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
 *
 * Called when a RBD can be reused. The RBD is transferred to the allocator.
 * When there are 2 empty RBDs - a request for allocation is posted
 */
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
				  struct iwl_rx_mem_buffer *rxb,
				  struct iwl_rxq *rxq, bool emergency)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;

	/* Move the RBD to the used list, will be moved to allocator in batches
	 * before claiming or posting a request*/
	list_add_tail(&rxb->list, &rxq->rx_used);

	if (unlikely(emergency))
		return;

	/* Count the allocator owned RBDs */
	rxq->used_count++;

	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
	 * after but we still need to post another request.
	 */
	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
		/* Move the 2 RBDs to the allocator ownership.
		 Allocator has another 6 from pool for the request completion*/
		spin_lock(&rba->lock);
		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_pending);
		queue_work(rba->alloc_wq, &rba->rx_alloc);
	}
}

1056
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1057
				struct iwl_rxq *rxq,
1058 1059
				struct iwl_rx_mem_buffer *rxb,
				bool emergency)
J
Johannes Berg 已提交
1060 1061
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1062
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1063
	bool page_stolen = false;
1064
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1065
	u32 offset = 0;
J
Johannes Berg 已提交
1066 1067 1068 1069

	if (WARN_ON(!rxb))
		return;

1070 1071 1072 1073 1074 1075
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		u16 sequence;
		bool reclaim;
1076
		int index, cmd_index, len;
1077 1078
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
1079
			._rx_page_order = trans_pcie->rx_page_order,
1080 1081
			._page = rxb->page,
			._page_stolen = false,
1082
			.truesize = max_len,
1083 1084 1085 1086 1087 1088 1089
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

1090 1091 1092
		IWL_DEBUG_RX(trans,
			     "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
			     rxcb._offset,
1093 1094 1095 1096
			     iwl_get_cmd_string(trans,
						iwl_cmd_id(pkt->hdr.cmd,
							   pkt->hdr.group_id,
							   0)),
1097
			     pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
1098

1099
		len = iwl_rx_packet_len(pkt);
1100
		len += sizeof(u32); /* account for status word */
1101 1102
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
1120 1121
			}
		}
J
Johannes Berg 已提交
1122

1123 1124 1125 1126
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

1127 1128 1129 1130 1131 1132
		if (rxq->id == 0)
			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
				       &rxcb);
		else
			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
					   &rxcb, rxq->id);
1133

1134
		if (reclaim) {
1135
			kzfree(txq->entries[cmd_index].free_buf);
1136
			txq->entries[cmd_index].free_buf = NULL;
1137 1138
		}

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
1150
				iwl_pcie_hcmd_complete(trans, &rxcb);
1151 1152 1153 1154 1155 1156
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
1157 1158
	}

1159 1160
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
1161
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
1162
		rxb->page = NULL;
1163
	}
J
Johannes Berg 已提交
1164 1165 1166 1167 1168 1169 1170

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
1171 1172
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
1173 1174 1175 1176 1177 1178 1179 1180
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
1181
			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1182 1183 1184 1185
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
J
Johannes Berg 已提交
1186
	} else
1187
		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
J
Johannes Berg 已提交
1188 1189
}

1190 1191
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1192
 */
1193
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1194
{
J
Johannes Berg 已提交
1195
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1196
	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1197
	u32 r, i, count = 0;
1198
	bool emergency = false;
1199

1200 1201
restart:
	spin_lock(&rxq->lock);
1202 1203
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
1204
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1205 1206
	i = rxq->read;

1207 1208 1209
	/* W/A 9000 device step A0 wrap-around bug */
	r &= (rxq->queue_size - 1);

1210 1211
	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
1212
		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1213 1214

	while (i != r) {
1215
		struct iwl_rx_mem_buffer *rxb;
1216

1217
		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1218 1219
			emergency = true;

1220 1221 1222 1223 1224
		if (trans->cfg->mq_rx_supported) {
			/*
			 * used_bd is a 32 bit but only 12 are used to retrieve
			 * the vid
			 */
1225
			u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
1226

1227 1228 1229
			if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table),
				 "Invalid rxb index from HW %u\n", (u32)vid))
				goto out;
1230 1231 1232 1233 1234
			rxb = trans_pcie->global_table[vid];
		} else {
			rxb = rxq->queue[i];
			rxq->queue[i] = NULL;
		}
1235

1236
		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1237
		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1238

1239
		i = (i + 1) & (rxq->queue_size - 1);
1240

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
		/*
		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
		 * try to claim the pre-allocated buffers from the allocator.
		 * If not ready - will try to reclaim next time.
		 * There is no need to reschedule work - allocator exits only
		 * on success
		 */
		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
			iwl_pcie_rx_allocator_get(trans, rxq);

		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1252 1253
			struct iwl_rb_allocator *rba = &trans_pcie->rba;

1254 1255 1256 1257 1258
			/* Add the remaining empty RBDs for allocator use */
			spin_lock(&rba->lock);
			list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
			spin_unlock(&rba->lock);
		} else if (emergency) {
1259
			count++;
1260
			if (count == 8) {
1261
				count = 0;
1262
				if (rxq->used_count < rxq->queue_size / 3)
1263
					emergency = false;
1264 1265

				rxq->read = i;
1266
				spin_unlock(&rxq->lock);
1267
				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1268
				iwl_pcie_rxq_restock(trans, rxq);
1269 1270
				goto restart;
			}
1271
		}
1272
	}
1273
out:
1274 1275
	/* Backtrack one entry */
	rxq->read = i;
1276 1277
	spin_unlock(&rxq->lock);

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
	/*
	 * handle a case where in emergency there are some unallocated RBDs.
	 * those RBDs are in the used list, but are not tracked by the queue's
	 * used_count which counts allocator owned RBDs.
	 * unallocated emergency RBDs must be allocated on exit, otherwise
	 * when called again the function may not be in emergency mode and
	 * they will be handed to the allocator with no tracking in the RBD
	 * allocator counters, which will lead to them never being claimed back
	 * by the queue.
	 * by allocating them here, they are now in the queue free list, and
	 * will be restocked by the next call of iwl_pcie_rxq_restock.
	 */
	if (unlikely(emergency && count))
1291
		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1292

1293 1294
	if (rxq->napi.poll)
		napi_gro_flush(&rxq->napi, false);
1295 1296

	iwl_pcie_rxq_restock(trans, rxq);
1297 1298
}

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
{
	u8 queue = entry->entry;
	struct msix_entry *entries = entry - queue;

	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}

static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
				      struct msix_entry *entry)
{
	/*
	 * Before sending the interrupt the HW disables it to prevent
	 * a nested interrupt. This is done by writing 1 to the corresponding
	 * bit in the mask register. After handling the interrupt, it should be
	 * re-enabled by clearing this bit. This register is defined as
	 * write 1 clear (W1C) register, meaning that it's being clear
	 * by writing 1 to the bit.
	 */
1318
	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
}

/*
 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
 * This interrupt handler should be used with RSS queue only.
 */
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
{
	struct msix_entry *entry = dev_id;
	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
	struct iwl_trans *trans = trans_pcie->trans;

1331 1332 1333
	if (WARN_ON(entry->entry >= trans->num_rx_queues))
		return IRQ_NONE;

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

	local_bh_disable();
	iwl_pcie_rx_handle(trans, entry->entry);
	local_bh_enable();

	iwl_pcie_clear_irq(trans, entry);

	lock_map_release(&trans->sync_cmd_lockdep_map);

	return IRQ_HANDLED;
}

1347 1348
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1349
 */
1350
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1351
{
1352
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1353
	int i;
1354

1355
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1356
	if (trans->cfg->internal_wimax_coex &&
1357
	    !trans->cfg->apmg_not_supported &&
1358
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1359
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1360
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1361
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1362
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1363
		iwl_op_mode_wimax_active(trans->op_mode);
1364
		wake_up(&trans_pcie->wait_command_queue);
1365 1366 1367
		return;
	}

1368
	iwl_pcie_dump_csr(trans);
1369
	iwl_dump_fh(trans, NULL);
1370

1371
	local_bh_disable();
1372 1373 1374
	/* The STATUS_FW_ERROR bit is set in this function. This must happen
	 * before we wake up the command caller, to ensure a proper cleanup. */
	iwl_trans_fw_error(trans);
1375
	local_bh_enable();
1376

1377 1378 1379
	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
		del_timer(&trans_pcie->txq[i].stuck_timer);

1380 1381
	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	wake_up(&trans_pcie->wait_command_queue);
1382 1383
}

1384
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1385 1386 1387
{
	u32 inta;

1388
	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1389 1390 1391 1392 1393 1394 1395

	trace_iwlwifi_dev_irq(trans->dev);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(trans, CSR_INT);

	/* the thread will service interrupts and re-enable them */
1396
	return inta;
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
}

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1412
static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 inta;
	u32 val = 0;
	u32 read;

	trace_iwlwifi_dev_irq(trans->dev);

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1426 1427
	if (!read)
		return 0;
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438

	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
				trans_pcie->ict_index, read);
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
1439
			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460

		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
					   read);
	} while (read);

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1461
	return inta;
1462 1463
}

1464
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1465
{
1466
	struct iwl_trans *trans = dev_id;
1467 1468
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1469 1470 1471
	u32 inta = 0;
	u32 handled = 0;

1472 1473
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

1474
	spin_lock(&trans_pcie->irq_lock);
1475

1476 1477 1478 1479
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
	if (likely(trans_pcie->use_ict))
1480
		inta = iwl_pcie_int_cause_ict(trans);
1481
	else
1482
		inta = iwl_pcie_int_cause_non_ict(trans);
1483

1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
	if (iwl_have_debug_level(IWL_DL_ISR)) {
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
			      inta, trans_pcie->inta_mask,
			      iwl_read32(trans, CSR_INT_MASK),
			      iwl_read32(trans, CSR_FH_INT_STATUS));
		if (inta & (~trans_pcie->inta_mask))
			IWL_DEBUG_ISR(trans,
				      "We got a masked interrupt (0x%08x)\n",
				      inta & (~trans_pcie->inta_mask));
	}

	inta &= trans_pcie->inta_mask;

	/*
	 * Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC.
	 */
1503
	if (unlikely(!inta)) {
1504 1505 1506 1507 1508 1509 1510
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		/*
		 * Re-enable interrupts here since we don't
		 * have anything to service
		 */
		if (test_bit(STATUS_INT_ENABLED, &trans->status))
			iwl_enable_interrupts(trans);
1511
		spin_unlock(&trans_pcie->irq_lock);
1512 1513 1514 1515
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

1516 1517 1518 1519 1520 1521
	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/*
		 * Hardware disappeared. It might have
		 * already raised an interrupt.
		 */
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1522
		spin_unlock(&trans_pcie->irq_lock);
1523
		goto out;
1524 1525
	}

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
1537
	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1538

1539
	if (iwl_have_debug_level(IWL_DL_ISR))
1540
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1541
			      inta, iwl_read32(trans, CSR_INT_MASK));
1542

1543
	spin_unlock(&trans_pcie->irq_lock);
1544

1545 1546
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
1547
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1548 1549

		/* Tell the device to stop sending interrupts */
1550
		iwl_disable_interrupts(trans);
1551

1552
		isr_stats->hw++;
1553
		iwl_pcie_irq_handle_error(trans);
1554 1555 1556

		handled |= CSR_INT_BIT_HW_ERR;

1557
		goto out;
1558 1559
	}

1560
	if (iwl_have_debug_level(IWL_DL_ISR)) {
1561 1562
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
1563 1564
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
1565
			isr_stats->sch++;
1566 1567 1568 1569
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
1570
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1571
			isr_stats->alive++;
1572 1573
		}
	}
1574

1575 1576 1577 1578 1579
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
1580
		bool hw_rfkill;
1581

1582
		hw_rfkill = iwl_is_rfkill_set(trans);
1583
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1584
			 hw_rfkill ? "disable radio" : "enable radio");
1585

1586
		isr_stats->rfkill++;
1587

1588
		mutex_lock(&trans_pcie->mutex);
1589
		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1590
		mutex_unlock(&trans_pcie->mutex);
1591
		if (hw_rfkill) {
1592 1593 1594
			set_bit(STATUS_RFKILL, &trans->status);
			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
					       &trans->status))
1595 1596 1597 1598
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
1599
			clear_bit(STATUS_RFKILL, &trans->status);
1600
		}
1601 1602 1603 1604 1605 1606

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
1607
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1608
		isr_stats->ctkill++;
1609 1610 1611 1612 1613
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
1614
		IWL_ERR(trans, "Microcode SW error detected. "
1615
			" Restarting 0x%X.\n", inta);
1616
		isr_stats->sw++;
1617
		iwl_pcie_irq_handle_error(trans);
1618 1619 1620 1621 1622
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
1623
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1624
		iwl_pcie_rxq_check_wrptr(trans);
1625
		iwl_pcie_txq_check_wrptrs(trans);
1626

1627
		isr_stats->wakeup++;
1628 1629 1630 1631 1632 1633 1634 1635

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1636
		    CSR_INT_BIT_RX_PERIODIC)) {
1637
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1638 1639
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1640
			iwl_write32(trans, CSR_FH_INT_STATUS,
1641 1642 1643 1644
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
1645
			iwl_write32(trans,
1646
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
1660
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1661
			    CSR_INT_PERIODIC_DIS);
1662

1663 1664 1665 1666 1667 1668 1669 1670
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1671
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1672
				   CSR_INT_PERIODIC_ENA);
1673

1674
		isr_stats->rx++;
1675 1676

		local_bh_disable();
1677
		iwl_pcie_rx_handle(trans, 0);
1678
		local_bh_enable();
1679 1680 1681 1682
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
1683
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1684
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1685
		isr_stats->tx++;
1686 1687
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
1688 1689
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
1690 1691 1692
	}

	if (inta & ~handled) {
1693
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1694
		isr_stats->unhandled++;
1695 1696
	}

1697 1698 1699
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
1700 1701
	}

1702 1703 1704 1705 1706
	/* we are loading the firmware, enable FH_TX interrupt only */
	if (handled & CSR_INT_BIT_FH_TX)
		iwl_enable_fw_load_int(trans);
	/* only Re-enable all interrupt if disabled by irq */
	else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1707
		iwl_enable_interrupts(trans);
1708
	/* Re-enable RF_KILL if it occurred */
1709 1710
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1711 1712 1713 1714

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1715 1716
}

1717 1718 1719 1720 1721
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1722

1723
/* Free dram table */
1724
void iwl_pcie_free_ict(struct iwl_trans *trans)
1725
{
1726
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1727

1728
	if (trans_pcie->ict_tbl) {
1729
		dma_free_coherent(trans->dev, ICT_SIZE,
1730
				  trans_pcie->ict_tbl,
1731
				  trans_pcie->ict_tbl_dma);
1732 1733
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1734 1735 1736
	}
}

1737 1738 1739
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1740 1741
 * also reset all data related to ICT table interrupt.
 */
1742
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1743
{
1744
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1745

1746
	trans_pcie->ict_tbl =
1747
		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1748 1749 1750
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1751 1752
		return -ENOMEM;

1753 1754
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1755
		iwl_pcie_free_ict(trans);
1756 1757
		return -EINVAL;
	}
1758 1759 1760 1761 1762 1763 1764

	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1765
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1766
{
1767
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1768 1769
	u32 val;

1770
	if (!trans_pcie->ict_tbl)
1771
		return;
1772

1773
	spin_lock(&trans_pcie->irq_lock);
1774
	iwl_disable_interrupts(trans);
1775

1776
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1777

1778
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1779

1780 1781 1782
	val |= CSR_DRAM_INT_TBL_ENABLE |
	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1783

1784
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1785

1786
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1787 1788
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1789
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1790
	iwl_enable_interrupts(trans);
1791
	spin_unlock(&trans_pcie->irq_lock);
1792 1793 1794
}

/* Device is going down disable ict interrupt usage */
1795
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1796
{
1797
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1798

1799
	spin_lock(&trans_pcie->irq_lock);
1800
	trans_pcie->use_ict = false;
1801
	spin_unlock(&trans_pcie->irq_lock);
1802 1803
}

1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
irqreturn_t iwl_pcie_isr(int irq, void *data)
{
	struct iwl_trans *trans = data;

	if (!trans)
		return IRQ_NONE;

	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);

1818
	return IRQ_WAKE_THREAD;
1819
}
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830

irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
{
	return IRQ_WAKE_THREAD;
}

irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
{
	struct msix_entry *entry = dev_id;
	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
	struct iwl_trans *trans = trans_pcie->trans;
1831
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1832 1833 1834 1835 1836
	u32 inta_fh, inta_hw;

	lock_map_acquire(&trans->sync_cmd_lockdep_map);

	spin_lock(&trans_pcie->irq_lock);
1837 1838
	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
1839 1840 1841
	/*
	 * Clear causes registers to avoid being handling the same cause.
	 */
1842 1843
	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
	spin_unlock(&trans_pcie->irq_lock);

	if (unlikely(!(inta_fh | inta_hw))) {
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
			      inta_fh,
			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
		isr_stats->tx++;
		/*
		 * Wake up uCode load routine,
		 * now that load is complete
		 */
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
	}

	/* Error detected by uCode */
	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
		IWL_ERR(trans,
			"Microcode SW error detected. Restarting 0x%X.\n",
			inta_fh);
		isr_stats->sw++;
		iwl_pcie_irq_handle_error(trans);
	}

	/* After checking FH register check HW register */
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans,
			      "ISR inta_hw 0x%08x, enabled 0x%08x\n",
			      inta_hw,
			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));

	/* Alive notification via Rx interrupt will do the real work */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
		isr_stats->alive++;
	}

	/* uCode wakes up after power-down sleep */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
		iwl_pcie_rxq_check_wrptr(trans);
		iwl_pcie_txq_check_wrptrs(trans);

		isr_stats->wakeup++;
	}

	/* Chip got too hot and stopped itself */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
		isr_stats->ctkill++;
	}

	/* HW RF KILL switch toggled */
	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
		bool hw_rfkill;

		hw_rfkill = iwl_is_rfkill_set(trans);
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
			 hw_rfkill ? "disable radio" : "enable radio");

		isr_stats->rfkill++;

		mutex_lock(&trans_pcie->mutex);
		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
		mutex_unlock(&trans_pcie->mutex);
		if (hw_rfkill) {
			set_bit(STATUS_RFKILL, &trans->status);
			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
					       &trans->status))
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
			clear_bit(STATUS_RFKILL, &trans->status);
		}
	}

	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
		IWL_ERR(trans,
			"Hardware error detected. Restarting.\n");

		isr_stats->hw++;
		iwl_pcie_irq_handle_error(trans);
	}

	iwl_pcie_clear_irq(trans, entry);

	lock_map_release(&trans->sync_cmd_lockdep_map);

	return IRQ_HANDLED;
}