rx.c 51.6 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5
 * Copyright(c) 2016 Intel Deutschland GmbH
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30 31 32
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
33
#include <linux/gfp.h>
34

35
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "internal.h"
38
#include "iwl-op-mode.h"
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
 *   When the interrupt handler is called, the request is processed.
 *   The page is either stolen - transferred to the upper layer
 *   or reused - added immediately to the iwl->rxq->rx_free list.
 * + When the page is stolen - the driver updates the matching queue's used
 *   count, detaches the RBD and transfers it to the queue used list.
 *   When there are two used RBDs - they are transferred to the allocator empty
 *   list. Work is then scheduled for the allocator to start allocating
 *   eight buffers.
 *   When there are another 6 used RBDs - they are transferred to the allocator
 *   empty list and the driver tries to claim the pre-allocated buffers and
 *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
 *   until ready.
 *   When there are 8+ buffers in the free list - either from allocation or from
 *   8 reused unstolen pages - restock is called to update the FW and indexes.
 * + In order to make sure the allocator always has RBDs to use for allocation
 *   the allocator has initial pool in the size of num_queues*(8-2) - the
 *   maximum missing RBDs per allocation request (request posted with 2
 *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
 *   The queues supplies the recycle of the rest of the RBDs.
98 99
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
100
 * + If there are no allocated buffers in iwl->rxq->rx_free,
101 102
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
103 104 105 106
 *
 *
 * Driver sequence:
 *
107 108
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
109 110
 *                            iwl_pcie_rxq_restock.
 *                            Used only during initialization.
111
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
112
 *                            queue, updates firmware pointers, and updates
113 114
 *                            the WRITE index.
 * iwl_pcie_rx_allocator()     Background work for allocating pages.
115 116
 *
 * -- enable interrupts --
117
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
118 119
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
120
 *                            Posts and claims requests to the allocator.
121
 *                            Calls iwl_pcie_rxq_restock to refill any empty
122
 *                            slots.
123 124 125 126 127 128 129 130 131 132 133 134
 *
 * RBD life-cycle:
 *
 * Init:
 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
 *
 * Regular Receive interrupt:
 * Page Stolen:
 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
 * Page not Stolen:
 * rxq.queue -> rxq.rx_free -> rxq.queue
135 136 137 138
 * ...
 *
 */

139 140
/*
 * iwl_rxq_space - Return number of free slots available in queue.
141
 */
142
static int iwl_rxq_space(const struct iwl_rxq *rxq)
143
{
144 145
	/* Make sure rx queue size is a power of 2 */
	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
146

147 148 149 150 151 152
	/*
	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
	 * between empty and completely full queues.
	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
	 * defined for negative dividends.
	 */
153
	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
154 155
}

156 157 158 159 160 161 162 163
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

164 165 166 167 168 169
static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
{
	iwl_write_prph(trans, ofs, val & 0xffffffff);
	iwl_write_prph(trans, ofs + 4, val >> 32);
}

170 171 172
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
173 174 175 176 177 178 179
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

180 181
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
182
 */
183 184
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
185 186 187
{
	u32 reg;

188
	lockdep_assert_held(&rxq->lock);
189

190 191 192 193 194 195 196 197 198 199 200 201 202 203
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
				       reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
204 205
			rxq->need_update = true;
			return;
206 207
		}
	}
208 209

	rxq->write_actual = round_down(rxq->write, 8);
210 211 212 213 214
	if (trans->cfg->mq_rx_supported)
		iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
			       rxq->write_actual);
	else
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
215 216 217 218 219
}

static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
220
	int i;
221

222 223
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
224

225 226 227 228 229 230 231
		if (!rxq->need_update)
			continue;
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		rxq->need_update = false;
		spin_unlock(&rxq->lock);
	}
232 233
}

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
{
	struct iwl_rx_mem_buffer *rxb;

	/*
	 * If the device isn't enabled - no need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
		return;

	spin_lock(&rxq->lock);
	while (rxq->free_count) {
		__le64 *bd = (__le64 *)rxq->bd;

		/* Get next free Rx buffer, remove from free list */
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);

		/* 12 first bits are expected to be empty */
		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
		/* Point to Rx buffer via next RBD in circular buffer */
		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
		rxq->free_count--;
	}
	spin_unlock(&rxq->lock);

	/*
	 * If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8.
	 */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock(&rxq->lock);
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
		spin_unlock(&rxq->lock);
	}
}

279 280
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
281 282 283 284 285 286 287 288 289
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
290
static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
291 292 293
{
	struct iwl_rx_mem_buffer *rxb;

294 295 296
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
297 298 299
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
300 301
	 * So don't try to restock if the APM has been already stopped.
	 */
302
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
303 304
		return;

305
	spin_lock(&rxq->lock);
306
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
307
		__le32 *bd = (__le32 *)rxq->bd;
308 309 310 311 312
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
313 314 315
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
316 317

		/* Point to Rx buffer via next RBD in circular buffer */
318
		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
319 320 321 322
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
323
	spin_unlock(&rxq->lock);
324 325 326 327

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
328
		spin_lock(&rxq->lock);
329
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
330
		spin_unlock(&rxq->lock);
331 332 333
	}
}

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/*
 * iwl_pcie_rx_alloc_page - allocates and returns a page.
 *
 */
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
					   gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct page *page;
	gfp_t gfp_mask = priority;

	if (trans_pcie->rx_page_order > 0)
		gfp_mask |= __GFP_COMP;

	/* Alloc a new receive buffer */
	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
	if (!page) {
		if (net_ratelimit())
			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
				       trans_pcie->rx_page_order);
354 355 356
		/*
		 * Issue an error if we don't have enough pre-allocated
		  * buffers.
357
`		 */
358
		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
359
			IWL_CRIT(trans,
360
				 "Failed to alloc_pages\n");
361 362 363 364 365
		return NULL;
	}
	return page;
}

366
/*
367
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
368
 *
369 370 371
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
372
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
373
 * allocated buffers.
374
 */
375 376
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
				   struct iwl_rxq *rxq)
377
{
378
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
379 380 381 382
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;

	while (1) {
383
		spin_lock(&rxq->lock);
384
		if (list_empty(&rxq->rx_used)) {
385
			spin_unlock(&rxq->lock);
386 387
			return;
		}
388
		spin_unlock(&rxq->lock);
389 390

		/* Alloc a new receive buffer */
391 392
		page = iwl_pcie_rx_alloc_page(trans, priority);
		if (!page)
393 394
			return;

395
		spin_lock(&rxq->lock);
396 397

		if (list_empty(&rxq->rx_used)) {
398
			spin_unlock(&rxq->lock);
399
			__free_pages(page, trans_pcie->rx_page_order);
400 401
			return;
		}
J
Johannes Berg 已提交
402 403 404
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
405
		spin_unlock(&rxq->lock);
406 407 408 409

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
410 411 412 413
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
414 415
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
416
			spin_lock(&rxq->lock);
417
			list_add(&rxb->list, &rxq->rx_used);
418
			spin_unlock(&rxq->lock);
419 420 421
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
422

423
		spin_lock(&rxq->lock);
424 425 426 427

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

428
		spin_unlock(&rxq->lock);
429 430 431
	}
}

432
static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
433 434 435 436
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

437
	for (i = 0; i < MQ_RX_POOL_SIZE; i++) {
438
		if (!trans_pcie->rx_pool[i].page)
439
			continue;
440
		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
441 442
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
443 444 445
		__free_pages(trans_pcie->rx_pool[i].page,
			     trans_pcie->rx_page_order);
		trans_pcie->rx_pool[i].page = NULL;
446 447 448
	}
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
		struct list_head local_allocated;
473 474 475 476 477
		gfp_t gfp_mask = GFP_KERNEL;

		/* Do not post a warning if there are only a few requests */
		if (pending < RX_PENDING_WATERMARK)
			gfp_mask |= __GFP_NOWARN;
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496

		INIT_LIST_HEAD(&local_allocated);

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
497
			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}

/*
 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
.*
.* Called by queue when the queue posted allocation request and
 * has freed 8 RBDs in order to restock itself.
 */
static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
				     struct iwl_rx_mem_buffer
				     *out[RX_CLAIM_REQ_ALLOC])
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

	/*
	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
	 * function will return -ENOMEM, as there are no ready requests.
	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
	 * req_ready > 0, i.e. - there are ready requests and the function
	 * hands one request to the caller.
	 */
	if (atomic_dec_if_positive(&rba->req_ready) < 0)
		return -ENOMEM;

	spin_lock(&rba->lock);
	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
		/* Get next free Rx buffer, remove it from free list */
		out[i] = list_first_entry(&rba->rbd_allocated,
			       struct iwl_rx_mem_buffer, list);
		list_del(&out[i]->list);
	}
	spin_unlock(&rba->lock);

	return 0;
}

static void iwl_pcie_rx_allocator_work(struct work_struct *data)
579
{
580 581
	struct iwl_rb_allocator *rba_p =
		container_of(data, struct iwl_rb_allocator, rx_alloc);
582
	struct iwl_trans_pcie *trans_pcie =
583
		container_of(rba_p, struct iwl_trans_pcie, rba);
584

585
	iwl_pcie_rx_allocator(trans_pcie->trans);
586 587
}

588 589 590
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
591
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
592
	struct device *dev = trans->dev;
593
	int i;
594 595
	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
						      sizeof(__le32);
596

597 598 599 600 601 602 603
	if (WARN_ON(trans_pcie->rxq))
		return -EINVAL;

	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
				  GFP_KERNEL);
	if (!trans_pcie->rxq)
		return -EINVAL;
604

605
	spin_lock_init(&rba->lock);
606

607 608
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
609

610
		spin_lock_init(&rxq->lock);
611 612 613 614 615
		if (trans->cfg->mq_rx_supported)
			rxq->queue_size = MQ_RX_TABLE_SIZE;
		else
			rxq->queue_size = RX_QUEUE_SIZE;

616 617 618 619 620
		/*
		 * Allocate the circular buffer of Read Buffer Descriptors
		 * (RBDs)
		 */
		rxq->bd = dma_zalloc_coherent(dev,
621 622
					     free_size * rxq->queue_size,
					     &rxq->bd_dma, GFP_KERNEL);
623 624
		if (!rxq->bd)
			goto err;
625

626 627 628 629 630 631 632 633 634
		if (trans->cfg->mq_rx_supported) {
			rxq->used_bd = dma_zalloc_coherent(dev,
							   sizeof(__le32) *
							   rxq->queue_size,
							   &rxq->used_bd_dma,
							   GFP_KERNEL);
			if (!rxq->used_bd)
				goto err;
		}
635

636 637 638 639 640 641 642
		/*Allocate the driver's pointer to receive buffer status */
		rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
						   &rxq->rb_stts_dma,
						   GFP_KERNEL);
		if (!rxq->rb_stts)
			goto err;
	}
643 644
	return 0;

645 646 647 648 649
err:
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		if (rxq->bd)
650
			dma_free_coherent(dev, free_size * rxq->queue_size,
651 652 653 654 655 656 657 658
					  rxq->bd, rxq->bd_dma);
		rxq->bd_dma = 0;
		rxq->bd = NULL;

		if (rxq->rb_stts)
			dma_free_coherent(trans->dev,
					  sizeof(struct iwl_rb_status),
					  rxq->rb_stts, rxq->rb_stts_dma);
659 660 661 662 663 664

		if (rxq->used_bd)
			dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
					  rxq->used_bd, rxq->used_bd_dma);
		rxq->used_bd_dma = 0;
		rxq->used_bd = NULL;
665 666
	}
	kfree(trans_pcie->rxq);
667

668
	return -ENOMEM;
669 670
}

671 672 673 674 675 676
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

677 678 679 680 681
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
682
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
683 684 685 686 687 688
		break;
	case IWL_AMSDU_12K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
689
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
690
	}
691 692 693

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
694 695 696 697
	/* reset and flush pointers */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
714
	 * Rx buffer size 4 or 8k or 12k
715 716 717 718 719 720 721 722
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
723
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
724 725 726 727
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
728 729 730 731

	/* W/A for interrupt coalescing bug in 7260 and 3160 */
	if (trans->cfg->host_interrupt_operation_mode)
		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
732 733
}

734
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
735
{
736 737 738
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size, enabled = 0;
	int i;
739

740 741 742 743 744 745 746 747 748 749 750 751 752 753
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
		break;
	case IWL_AMSDU_12K:
		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
	}
754

755 756 757 758
	/* Stop Rx DMA */
	iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
	/* disable free amd used rx queue operation */
	iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
759

760 761 762
	for (i = 0; i < trans->num_rx_queues; i++) {
		/* Tell device where to find RBD free table in DRAM */
		iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
763
				       (u64)(trans_pcie->rxq[i].bd_dma));
764 765
		/* Tell device where to find RBD used table in DRAM */
		iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
766
				       (u64)(trans_pcie->rxq[i].used_bd_dma));
767 768
		/* Tell device where in DRAM to update its Rx status */
		iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
769
				       trans_pcie->rxq[i].rb_stts_dma);
770 771 772 773 774 775 776
		/* Reset device indice tables */
		iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
		iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
		iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);

		enabled |= BIT(i) | BIT(i + 16);
	}
777

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
	/* restock default queue */
	iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);

	/*
	 * Enable Rx DMA
	 * Single frame mode
	 * Rx buffer size 4 or 8k or 12k
	 * Min RB size 4 or 8
	 * 512 RBDs
	 */
	iwl_write_prph(trans, RFH_RXF_DMA_CFG,
		       RFH_DMA_EN_ENABLE_VAL |
		       rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
		       RFH_RXF_DMA_MIN_RB_4_8 |
		       RFH_RXF_DMA_RBDCB_SIZE_512);

	iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
					  RFH_GEN_CFG_SERVICE_DMA_SNOOP);
	iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
797

798 799
	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
800 801
}

802
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
803
{
804
	lockdep_assert_held(&rxq->lock);
805

806 807 808 809
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;
	rxq->used_count = 0;
810 811
}

812 813 814 815 816 817
static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
	WARN_ON(1);
	return 0;
}

818 819 820
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
821
	struct iwl_rxq *def_rxq;
822
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
823
	int i, err, num_rbds, allocator_pool_size;
824

825
	if (!trans_pcie->rxq) {
826 827 828 829
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}
830
	def_rxq = trans_pcie->rxq;
831 832 833 834 835 836 837 838
	if (!rba->alloc_wq)
		rba->alloc_wq = alloc_workqueue("rb_allocator",
						WQ_HIGHPRI | WQ_UNBOUND, 1);
	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);

	spin_lock(&rba->lock);
	atomic_set(&rba->req_pending, 0);
	atomic_set(&rba->req_ready, 0);
839 840
	INIT_LIST_HEAD(&rba->rbd_allocated);
	INIT_LIST_HEAD(&rba->rbd_empty);
841
	spin_unlock(&rba->lock);
842

843
	/* free all first - we might be reconfigured for a different size */
844
	iwl_pcie_free_rbs_pool(trans);
845 846

	for (i = 0; i < RX_QUEUE_SIZE; i++)
847
		def_rxq->queue[i] = NULL;
848

849 850 851
	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

852 853
		rxq->id = i;

854 855 856 857 858 859 860 861 862 863
		spin_lock(&rxq->lock);
		/*
		 * Set read write pointer to reflect that we have processed
		 * and used all buffers, but have not restocked the Rx queue
		 * with fresh buffers
		 */
		rxq->read = 0;
		rxq->write = 0;
		rxq->write_actual = 0;
		memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
864

865 866
		iwl_pcie_rx_init_rxb_lists(rxq);

867 868 869 870
		if (!rxq->napi.poll)
			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
				       iwl_pcie_dummy_napi_poll, 64);

871 872
		spin_unlock(&rxq->lock);
	}
873

874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
	/* move the pool to the default queue and allocator ownerships */
	num_rbds = trans->cfg->mq_rx_supported ?
		     MQ_RX_POOL_SIZE : RX_QUEUE_SIZE;
	allocator_pool_size = trans->num_rx_queues *
		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
	for (i = 0; i < num_rbds; i++) {
		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];

		if (i < allocator_pool_size)
			list_add(&rxb->list, &rba->rbd_empty);
		else
			list_add(&rxb->list, &def_rxq->rx_used);
		trans_pcie->global_table[i] = rxb;
		rxb->vid = (u16)i;
	}
889

890
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
891
	if (trans->cfg->mq_rx_supported) {
892
		iwl_pcie_rx_mq_hw_init(trans);
893 894 895 896
	} else {
		iwl_pcie_rxq_restock(trans, def_rxq);
		iwl_pcie_rx_hw_init(trans, def_rxq);
	}
897 898 899 900

	spin_lock(&def_rxq->lock);
	iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
	spin_unlock(&def_rxq->lock);
901 902 903 904 905 906 907

	return 0;
}

void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
908
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
909 910
	int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
					      sizeof(__le32);
911
	int i;
912

913 914 915 916 917
	/*
	 * if rxq is NULL, it means that nothing has been allocated,
	 * exit now
	 */
	if (!trans_pcie->rxq) {
918 919 920 921
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

922 923 924 925 926 927
	cancel_work_sync(&rba->rx_alloc);
	if (rba->alloc_wq) {
		destroy_workqueue(rba->alloc_wq);
		rba->alloc_wq = NULL;
	}

928 929 930 931 932 933 934
	iwl_pcie_free_rbs_pool(trans);

	for (i = 0; i < trans->num_rx_queues; i++) {
		struct iwl_rxq *rxq = &trans_pcie->rxq[i];

		if (rxq->bd)
			dma_free_coherent(trans->dev,
935
					  free_size * rxq->queue_size,
936 937 938 939 940 941 942 943 944 945 946
					  rxq->bd, rxq->bd_dma);
		rxq->bd_dma = 0;
		rxq->bd = NULL;

		if (rxq->rb_stts)
			dma_free_coherent(trans->dev,
					  sizeof(struct iwl_rb_status),
					  rxq->rb_stts, rxq->rb_stts_dma);
		else
			IWL_DEBUG_INFO(trans,
				       "Free rxq->rb_stts which is NULL\n");
947

948 949 950 951 952 953
		if (rxq->used_bd)
			dma_free_coherent(trans->dev,
					  sizeof(__le32) * rxq->queue_size,
					  rxq->used_bd, rxq->used_bd_dma);
		rxq->used_bd_dma = 0;
		rxq->used_bd = NULL;
954 955 956

		if (rxq->napi.poll)
			netif_napi_del(&rxq->napi);
957
	}
958
	kfree(trans_pcie->rxq);
959 960
}

961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
/*
 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
 *
 * Called when a RBD can be reused. The RBD is transferred to the allocator.
 * When there are 2 empty RBDs - a request for allocation is posted
 */
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
				  struct iwl_rx_mem_buffer *rxb,
				  struct iwl_rxq *rxq, bool emergency)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;

	/* Move the RBD to the used list, will be moved to allocator in batches
	 * before claiming or posting a request*/
	list_add_tail(&rxb->list, &rxq->rx_used);

	if (unlikely(emergency))
		return;

	/* Count the allocator owned RBDs */
	rxq->used_count++;

	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
	 * after but we still need to post another request.
	 */
	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
		/* Move the 2 RBDs to the allocator ownership.
		 Allocator has another 6 from pool for the request completion*/
		spin_lock(&rba->lock);
		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_pending);
		queue_work(rba->alloc_wq, &rba->rx_alloc);
	}
}

1001
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1002
				struct iwl_rxq *rxq,
1003 1004
				struct iwl_rx_mem_buffer *rxb,
				bool emergency)
J
Johannes Berg 已提交
1005 1006
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1007
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1008
	bool page_stolen = false;
1009
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1010
	u32 offset = 0;
J
Johannes Berg 已提交
1011 1012 1013 1014

	if (WARN_ON(!rxb))
		return;

1015 1016 1017 1018 1019 1020
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		u16 sequence;
		bool reclaim;
1021
		int index, cmd_index, len;
1022 1023
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
1024
			._rx_page_order = trans_pcie->rx_page_order,
1025 1026
			._page = rxb->page,
			._page_stolen = false,
1027
			.truesize = max_len,
1028 1029 1030 1031 1032 1033 1034
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

1035 1036 1037
		IWL_DEBUG_RX(trans,
			     "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
			     rxcb._offset,
1038 1039 1040 1041
			     iwl_get_cmd_string(trans,
						iwl_cmd_id(pkt->hdr.cmd,
							   pkt->hdr.group_id,
							   0)),
1042
			     pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
1043

1044
		len = iwl_rx_packet_len(pkt);
1045
		len += sizeof(u32); /* account for status word */
1046 1047
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
1065 1066
			}
		}
J
Johannes Berg 已提交
1067

1068 1069 1070 1071
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

1072 1073 1074 1075 1076 1077
		if (rxq->id == 0)
			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
				       &rxcb);
		else
			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
					   &rxcb, rxq->id);
1078

1079
		if (reclaim) {
1080
			kzfree(txq->entries[cmd_index].free_buf);
1081
			txq->entries[cmd_index].free_buf = NULL;
1082 1083
		}

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
1095
				iwl_pcie_hcmd_complete(trans, &rxcb);
1096 1097 1098 1099 1100 1101
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
1102 1103
	}

1104 1105
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
1106
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
1107
		rxb->page = NULL;
1108
	}
J
Johannes Berg 已提交
1109 1110 1111 1112 1113 1114 1115

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
1116 1117
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
1118 1119 1120 1121 1122 1123 1124 1125
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
1126
			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1127 1128 1129 1130
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
J
Johannes Berg 已提交
1131
	} else
1132
		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
J
Johannes Berg 已提交
1133 1134
}

1135 1136
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1137
 */
1138
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
1139
{
J
Johannes Berg 已提交
1140
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1141
	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
1142 1143
	u32 r, i, j, count = 0;
	bool emergency = false;
1144

1145 1146
restart:
	spin_lock(&rxq->lock);
1147 1148
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
1149
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1150 1151 1152 1153
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
1154
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
1155 1156

	while (i != r) {
1157
		struct iwl_rx_mem_buffer *rxb;
1158

1159
		if (unlikely(rxq->used_count == rxq->queue_size / 2))
1160 1161
			emergency = true;

1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
		if (trans->cfg->mq_rx_supported) {
			/*
			 * used_bd is a 32 bit but only 12 are used to retrieve
			 * the vid
			 */
			u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]);

			rxb = trans_pcie->global_table[vid];
		} else {
			rxb = rxq->queue[i];
			rxq->queue[i] = NULL;
		}
1174

1175
		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
1176
		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1177

1178
		i = (i + 1) & (rxq->queue_size - 1);
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212

		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
		 * try to claim the pre-allocated buffers from the allocator */
		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
			struct iwl_rb_allocator *rba = &trans_pcie->rba;
			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];

			if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
			    !emergency) {
				/* Add the remaining 6 empty RBDs
				* for allocator use
				 */
				spin_lock(&rba->lock);
				list_splice_tail_init(&rxq->rx_used,
						      &rba->rbd_empty);
				spin_unlock(&rba->lock);
			}

			/* If not ready - continue, will try to reclaim later.
			* No need to reschedule work - allocator exits only on
			* success */
			if (!iwl_pcie_rx_allocator_get(trans, out)) {
				/* If success - then RX_CLAIM_REQ_ALLOC
				 * buffers were retrieved and should be added
				 * to free list */
				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
					list_add_tail(&out[j]->list,
						      &rxq->rx_free);
					rxq->free_count++;
				}
			}
		}
		if (emergency) {
1213
			count++;
1214
			if (count == 8) {
1215
				count = 0;
1216
				if (rxq->used_count < rxq->queue_size / 3)
1217 1218
					emergency = false;
				spin_unlock(&rxq->lock);
1219
				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1220
				spin_lock(&rxq->lock);
1221 1222
			}
		}
1223 1224 1225 1226 1227 1228 1229 1230
		/* handle restock for three cases, can be all of them at once:
		* - we just pulled buffers from the allocator
		* - we have 8+ unstolen pages accumulated
		* - we are in emergency and allocated buffers
		 */
		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
			rxq->read = i;
			spin_unlock(&rxq->lock);
1231 1232 1233 1234
			if (trans->cfg->mq_rx_supported)
				iwl_pcie_rxq_mq_restock(trans, rxq);
			else
				iwl_pcie_rxq_restock(trans, rxq);
1235 1236
			goto restart;
		}
1237 1238 1239 1240
	}

	/* Backtrack one entry */
	rxq->read = i;
1241 1242
	spin_unlock(&rxq->lock);

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
	/*
	 * handle a case where in emergency there are some unallocated RBDs.
	 * those RBDs are in the used list, but are not tracked by the queue's
	 * used_count which counts allocator owned RBDs.
	 * unallocated emergency RBDs must be allocated on exit, otherwise
	 * when called again the function may not be in emergency mode and
	 * they will be handed to the allocator with no tracking in the RBD
	 * allocator counters, which will lead to them never being claimed back
	 * by the queue.
	 * by allocating them here, they are now in the queue free list, and
	 * will be restocked by the next call of iwl_pcie_rxq_restock.
	 */
	if (unlikely(emergency && count))
1256
		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1257

1258 1259
	if (rxq->napi.poll)
		napi_gro_flush(&rxq->napi, false);
1260 1261
}

1262 1263
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1264
 */
1265
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1266
{
1267
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1268
	int i;
1269

1270
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1271
	if (trans->cfg->internal_wimax_coex &&
1272
	    !trans->cfg->apmg_not_supported &&
1273
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1274
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1275
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1276
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1277
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1278
		iwl_op_mode_wimax_active(trans->op_mode);
1279
		wake_up(&trans_pcie->wait_command_queue);
1280 1281 1282
		return;
	}

1283
	iwl_pcie_dump_csr(trans);
1284
	iwl_dump_fh(trans, NULL);
1285

1286
	local_bh_disable();
1287 1288 1289
	/* The STATUS_FW_ERROR bit is set in this function. This must happen
	 * before we wake up the command caller, to ensure a proper cleanup. */
	iwl_trans_fw_error(trans);
1290
	local_bh_enable();
1291

1292 1293 1294
	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
		del_timer(&trans_pcie->txq[i].stuck_timer);

1295 1296
	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	wake_up(&trans_pcie->wait_command_queue);
1297 1298
}

1299
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1300 1301 1302
{
	u32 inta;

1303
	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1304 1305 1306 1307 1308 1309 1310

	trace_iwlwifi_dev_irq(trans->dev);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(trans, CSR_INT);

	/* the thread will service interrupts and re-enable them */
1311
	return inta;
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
}

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1327
static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 inta;
	u32 val = 0;
	u32 read;

	trace_iwlwifi_dev_irq(trans->dev);

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1341 1342
	if (!read)
		return 0;
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353

	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
				trans_pcie->ict_index, read);
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
1354
			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375

		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
					   read);
	} while (read);

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1376
	return inta;
1377 1378
}

1379
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1380
{
1381
	struct iwl_trans *trans = dev_id;
1382 1383
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1384 1385 1386
	u32 inta = 0;
	u32 handled = 0;

1387 1388
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

1389
	spin_lock(&trans_pcie->irq_lock);
1390

1391 1392 1393 1394
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
	if (likely(trans_pcie->use_ict))
1395
		inta = iwl_pcie_int_cause_ict(trans);
1396
	else
1397
		inta = iwl_pcie_int_cause_non_ict(trans);
1398

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
	if (iwl_have_debug_level(IWL_DL_ISR)) {
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
			      inta, trans_pcie->inta_mask,
			      iwl_read32(trans, CSR_INT_MASK),
			      iwl_read32(trans, CSR_FH_INT_STATUS));
		if (inta & (~trans_pcie->inta_mask))
			IWL_DEBUG_ISR(trans,
				      "We got a masked interrupt (0x%08x)\n",
				      inta & (~trans_pcie->inta_mask));
	}

	inta &= trans_pcie->inta_mask;

	/*
	 * Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC.
	 */
1418
	if (unlikely(!inta)) {
1419 1420 1421 1422 1423 1424 1425
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		/*
		 * Re-enable interrupts here since we don't
		 * have anything to service
		 */
		if (test_bit(STATUS_INT_ENABLED, &trans->status))
			iwl_enable_interrupts(trans);
1426
		spin_unlock(&trans_pcie->irq_lock);
1427 1428 1429 1430
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

1431 1432 1433 1434 1435 1436
	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/*
		 * Hardware disappeared. It might have
		 * already raised an interrupt.
		 */
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1437
		spin_unlock(&trans_pcie->irq_lock);
1438
		goto out;
1439 1440
	}

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
1452
	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1453

1454
	if (iwl_have_debug_level(IWL_DL_ISR))
1455
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1456
			      inta, iwl_read32(trans, CSR_INT_MASK));
1457

1458
	spin_unlock(&trans_pcie->irq_lock);
1459

1460 1461
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
1462
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1463 1464

		/* Tell the device to stop sending interrupts */
1465
		iwl_disable_interrupts(trans);
1466

1467
		isr_stats->hw++;
1468
		iwl_pcie_irq_handle_error(trans);
1469 1470 1471

		handled |= CSR_INT_BIT_HW_ERR;

1472
		goto out;
1473 1474
	}

1475
	if (iwl_have_debug_level(IWL_DL_ISR)) {
1476 1477
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
1478 1479
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
1480
			isr_stats->sch++;
1481 1482 1483 1484
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
1485
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1486
			isr_stats->alive++;
1487 1488
		}
	}
1489

1490 1491 1492 1493 1494
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
1495
		bool hw_rfkill;
1496

1497
		hw_rfkill = iwl_is_rfkill_set(trans);
1498
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1499
			 hw_rfkill ? "disable radio" : "enable radio");
1500

1501
		isr_stats->rfkill++;
1502

1503
		mutex_lock(&trans_pcie->mutex);
1504
		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1505
		mutex_unlock(&trans_pcie->mutex);
1506
		if (hw_rfkill) {
1507 1508 1509
			set_bit(STATUS_RFKILL, &trans->status);
			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
					       &trans->status))
1510 1511 1512 1513
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
1514
			clear_bit(STATUS_RFKILL, &trans->status);
1515
		}
1516 1517 1518 1519 1520 1521

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
1522
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1523
		isr_stats->ctkill++;
1524 1525 1526 1527 1528
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
1529
		IWL_ERR(trans, "Microcode SW error detected. "
1530
			" Restarting 0x%X.\n", inta);
1531
		isr_stats->sw++;
1532
		iwl_pcie_irq_handle_error(trans);
1533 1534 1535 1536 1537
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
1538
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1539
		iwl_pcie_rxq_check_wrptr(trans);
1540
		iwl_pcie_txq_check_wrptrs(trans);
1541

1542
		isr_stats->wakeup++;
1543 1544 1545 1546 1547 1548 1549 1550

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1551
		    CSR_INT_BIT_RX_PERIODIC)) {
1552
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1553 1554
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1555
			iwl_write32(trans, CSR_FH_INT_STATUS,
1556 1557 1558 1559
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
1560
			iwl_write32(trans,
1561
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
1575
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1576
			    CSR_INT_PERIODIC_DIS);
1577

1578 1579 1580 1581 1582 1583 1584 1585
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1586
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1587
				   CSR_INT_PERIODIC_ENA);
1588

1589
		isr_stats->rx++;
1590 1591 1592 1593

		local_bh_disable();
		iwl_pcie_rx_handle(trans);
		local_bh_enable();
1594 1595 1596 1597
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
1598
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1599
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1600
		isr_stats->tx++;
1601 1602
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
1603 1604
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
1605 1606 1607
	}

	if (inta & ~handled) {
1608
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1609
		isr_stats->unhandled++;
1610 1611
	}

1612 1613 1614
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
1615 1616
	}

1617 1618 1619 1620 1621
	/* we are loading the firmware, enable FH_TX interrupt only */
	if (handled & CSR_INT_BIT_FH_TX)
		iwl_enable_fw_load_int(trans);
	/* only Re-enable all interrupt if disabled by irq */
	else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1622
		iwl_enable_interrupts(trans);
1623
	/* Re-enable RF_KILL if it occurred */
1624 1625
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1626 1627 1628 1629

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1630 1631
}

1632 1633 1634 1635 1636
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1637

1638
/* Free dram table */
1639
void iwl_pcie_free_ict(struct iwl_trans *trans)
1640
{
1641
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1642

1643
	if (trans_pcie->ict_tbl) {
1644
		dma_free_coherent(trans->dev, ICT_SIZE,
1645
				  trans_pcie->ict_tbl,
1646
				  trans_pcie->ict_tbl_dma);
1647 1648
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1649 1650 1651
	}
}

1652 1653 1654
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1655 1656
 * also reset all data related to ICT table interrupt.
 */
1657
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1658
{
1659
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1660

1661
	trans_pcie->ict_tbl =
1662
		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1663 1664 1665
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1666 1667
		return -ENOMEM;

1668 1669
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1670
		iwl_pcie_free_ict(trans);
1671 1672
		return -EINVAL;
	}
1673 1674 1675 1676 1677 1678 1679

	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1680
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1681
{
1682
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1683 1684
	u32 val;

1685
	if (!trans_pcie->ict_tbl)
1686
		return;
1687

1688
	spin_lock(&trans_pcie->irq_lock);
1689
	iwl_disable_interrupts(trans);
1690

1691
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1692

1693
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1694

1695 1696 1697
	val |= CSR_DRAM_INT_TBL_ENABLE |
	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1698

1699
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1700

1701
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1702 1703
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1704
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1705
	iwl_enable_interrupts(trans);
1706
	spin_unlock(&trans_pcie->irq_lock);
1707 1708 1709
}

/* Device is going down disable ict interrupt usage */
1710
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1711
{
1712
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1713

1714
	spin_lock(&trans_pcie->irq_lock);
1715
	trans_pcie->use_ict = false;
1716
	spin_unlock(&trans_pcie->irq_lock);
1717 1718
}

1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
irqreturn_t iwl_pcie_isr(int irq, void *data)
{
	struct iwl_trans *trans = data;

	if (!trans)
		return IRQ_NONE;

	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);

1733
	return IRQ_WAKE_THREAD;
1734
}