rx.c 46.8 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
26
 *  Intel Linux Wireless <linuxwifi@intel.com>
27 28 29 30 31
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
32
#include <linux/gfp.h>
33

34
#include "iwl-prph.h"
35
#include "iwl-io.h"
36
#include "internal.h"
37
#include "iwl-op-mode.h"
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
 *   When the interrupt handler is called, the request is processed.
 *   The page is either stolen - transferred to the upper layer
 *   or reused - added immediately to the iwl->rxq->rx_free list.
 * + When the page is stolen - the driver updates the matching queue's used
 *   count, detaches the RBD and transfers it to the queue used list.
 *   When there are two used RBDs - they are transferred to the allocator empty
 *   list. Work is then scheduled for the allocator to start allocating
 *   eight buffers.
 *   When there are another 6 used RBDs - they are transferred to the allocator
 *   empty list and the driver tries to claim the pre-allocated buffers and
 *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
 *   until ready.
 *   When there are 8+ buffers in the free list - either from allocation or from
 *   8 reused unstolen pages - restock is called to update the FW and indexes.
 * + In order to make sure the allocator always has RBDs to use for allocation
 *   the allocator has initial pool in the size of num_queues*(8-2) - the
 *   maximum missing RBDs per allocation request (request posted with 2
 *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
 *   The queues supplies the recycle of the rest of the RBDs.
97 98
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
99
 * + If there are no allocated buffers in iwl->rxq->rx_free,
100 101
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
102 103 104 105
 *
 *
 * Driver sequence:
 *
106 107
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
108 109
 *                            iwl_pcie_rxq_restock.
 *                            Used only during initialization.
110
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
111
 *                            queue, updates firmware pointers, and updates
112 113
 *                            the WRITE index.
 * iwl_pcie_rx_allocator()     Background work for allocating pages.
114 115
 *
 * -- enable interrupts --
116
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
117 118
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
119
 *                            Posts and claims requests to the allocator.
120
 *                            Calls iwl_pcie_rxq_restock to refill any empty
121
 *                            slots.
122 123 124 125 126 127 128 129 130 131 132 133
 *
 * RBD life-cycle:
 *
 * Init:
 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
 *
 * Regular Receive interrupt:
 * Page Stolen:
 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
 * Page not Stolen:
 * rxq.queue -> rxq.rx_free -> rxq.queue
134 135 136 137
 * ...
 *
 */

138 139
/*
 * iwl_rxq_space - Return number of free slots available in queue.
140
 */
141
static int iwl_rxq_space(const struct iwl_rxq *rxq)
142
{
143 144
	/* Make sure RX_QUEUE_SIZE is a power of 2 */
	BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
145

146 147 148 149 150 151 152
	/*
	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
	 * between empty and completely full queues.
	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
	 * defined for negative dividends.
	 */
	return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
153 154
}

155 156 157 158 159 160 161 162
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

163 164 165
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
166 167 168 169 170 171 172
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

173 174
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
175
 */
176
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
177
{
178 179
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
180 181
	u32 reg;

182
	lockdep_assert_held(&rxq->lock);
183

184 185 186 187 188 189 190 191 192 193 194 195 196 197
	/*
	 * explicitly wake up the NIC if:
	 * 1. shadow registers aren't enabled
	 * 2. there is a chance that the NIC is asleep
	 */
	if (!trans->cfg->base_params->shadow_reg_enable &&
	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
				       reg);
			iwl_set_bit(trans, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
198 199
			rxq->need_update = true;
			return;
200 201
		}
	}
202 203 204

	rxq->write_actual = round_down(rxq->write, 8);
	iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
205 206 207 208 209 210 211 212 213 214 215 216 217 218
}

static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;

	spin_lock(&rxq->lock);

	if (!rxq->need_update)
		goto exit_unlock;

	iwl_pcie_rxq_inc_wr_ptr(trans);
	rxq->need_update = false;
219 220

 exit_unlock:
221
	spin_unlock(&rxq->lock);
222 223
}

224 225
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
226 227 228 229 230 231 232 233 234
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
235
static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
236
{
237
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
238
	struct iwl_rxq *rxq = &trans_pcie->rxq;
239 240
	struct iwl_rx_mem_buffer *rxb;

241 242 243
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
244 245 246
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
247 248
	 * So don't try to restock if the APM has been already stopped.
	 */
249
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
250 251
		return;

252
	spin_lock(&rxq->lock);
253
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
254 255 256 257 258
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
259 260 261
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
262 263

		/* Point to Rx buffer via next RBD in circular buffer */
264
		rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
265 266 267 268
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
269
	spin_unlock(&rxq->lock);
270 271 272 273

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
274
		spin_lock(&rxq->lock);
275
		iwl_pcie_rxq_inc_wr_ptr(trans);
276
		spin_unlock(&rxq->lock);
277 278 279
	}
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
/*
 * iwl_pcie_rx_alloc_page - allocates and returns a page.
 *
 */
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
					   gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct page *page;
	gfp_t gfp_mask = priority;

	if (rxq->free_count > RX_LOW_WATERMARK)
		gfp_mask |= __GFP_NOWARN;

	if (trans_pcie->rx_page_order > 0)
		gfp_mask |= __GFP_COMP;

	/* Alloc a new receive buffer */
	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
	if (!page) {
		if (net_ratelimit())
			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
				       trans_pcie->rx_page_order);
		/* Issue an error if the hardware has consumed more than half
		 * of its free buffer list and we don't have enough
		 * pre-allocated buffers.
`		 */
		if (rxq->free_count <= RX_LOW_WATERMARK &&
		    iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
		    net_ratelimit())
			IWL_CRIT(trans,
				 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
				 rxq->free_count);
		return NULL;
	}
	return page;
}

319
/*
320
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
321
 *
322 323 324
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
325
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
326
 * allocated buffers.
327
 */
328
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
329
{
330
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
331
	struct iwl_rxq *rxq = &trans_pcie->rxq;
332 333 334 335
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;

	while (1) {
336
		spin_lock(&rxq->lock);
337
		if (list_empty(&rxq->rx_used)) {
338
			spin_unlock(&rxq->lock);
339 340
			return;
		}
341
		spin_unlock(&rxq->lock);
342 343

		/* Alloc a new receive buffer */
344 345
		page = iwl_pcie_rx_alloc_page(trans, priority);
		if (!page)
346 347
			return;

348
		spin_lock(&rxq->lock);
349 350

		if (list_empty(&rxq->rx_used)) {
351
			spin_unlock(&rxq->lock);
352
			__free_pages(page, trans_pcie->rx_page_order);
353 354
			return;
		}
J
Johannes Berg 已提交
355 356 357
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
358
		spin_unlock(&rxq->lock);
359 360 361 362

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
363 364 365 366
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
367 368
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
369
			spin_lock(&rxq->lock);
370
			list_add(&rxb->list, &rxq->rx_used);
371
			spin_unlock(&rxq->lock);
372 373 374
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
375 376 377 378 379
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

380
		spin_lock(&rxq->lock);
381 382 383 384

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

385
		spin_unlock(&rxq->lock);
386 387 388
	}
}

389 390 391 392 393 394
static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i;

395 396
	lockdep_assert_held(&rxq->lock);

397
	for (i = 0; i < RX_QUEUE_SIZE; i++) {
398 399 400 401 402 403 404
		if (!rxq->pool[i].page)
			continue;
		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
		__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
		rxq->pool[i].page = NULL;
405 406 407
	}
}

408
/*
409
 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
410 411 412
 *
 * When moving to rx_free an page is allocated for the slot.
 *
413
 * Also restock the Rx queue via iwl_pcie_rxq_restock.
414
 * This is called only during initialization
415
 */
416
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
417
{
418
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
419

420
	iwl_pcie_rxq_restock(trans);
421 422
}

423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
		struct list_head local_allocated;

		INIT_LIST_HEAD(&local_allocated);

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
			page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}
			/* dma address must be no more than 36 bits */
			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
			/* and also 256 byte aligned! */
			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}

/*
 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
.*
.* Called by queue when the queue posted allocation request and
 * has freed 8 RBDs in order to restock itself.
 */
static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
				     struct iwl_rx_mem_buffer
				     *out[RX_CLAIM_REQ_ALLOC])
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

	/*
	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
	 * function will return -ENOMEM, as there are no ready requests.
	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
	 * req_ready > 0, i.e. - there are ready requests and the function
	 * hands one request to the caller.
	 */
	if (atomic_dec_if_positive(&rba->req_ready) < 0)
		return -ENOMEM;

	spin_lock(&rba->lock);
	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
		/* Get next free Rx buffer, remove it from free list */
		out[i] = list_first_entry(&rba->rbd_allocated,
			       struct iwl_rx_mem_buffer, list);
		list_del(&out[i]->list);
	}
	spin_unlock(&rba->lock);

	return 0;
}

static void iwl_pcie_rx_allocator_work(struct work_struct *data)
552
{
553 554
	struct iwl_rb_allocator *rba_p =
		container_of(data, struct iwl_rb_allocator, rx_alloc);
555
	struct iwl_trans_pcie *trans_pcie =
556
		container_of(rba_p, struct iwl_trans_pcie, rba);
557

558
	iwl_pcie_rx_allocator(trans_pcie->trans);
559 560
}

561 562 563 564
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
565
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
566 567 568 569 570
	struct device *dev = trans->dev;

	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));

	spin_lock_init(&rxq->lock);
571
	spin_lock_init(&rba->lock);
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				      &rxq->bd_dma, GFP_KERNEL);
	if (!rxq->bd)
		goto err_bd;

	/*Allocate the driver's pointer to receive buffer status */
	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
					   &rxq->rb_stts_dma, GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err_rb_stts;

	return 0;

err_rb_stts:
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
593
	rxq->bd_dma = 0;
594 595 596
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
597 598
}

599 600 601 602 603 604
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

605 606 607 608 609
	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
610
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
611 612 613 614 615 616
		break;
	case IWL_AMSDU_12K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
617
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
618
	}
619 620 621

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
622 623 624 625
	/* reset and flush pointers */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
642
	 * Rx buffer size 4 or 8k or 12k
643 644 645 646 647 648 649 650
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
651
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
652 653 654 655
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
656 657 658 659

	/* W/A for interrupt coalescing bug in 7260 and 3160 */
	if (trans->cfg->host_interrupt_operation_mode)
		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
660 661
}

662 663 664 665 666 667 668 669 670
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
	int i;

	lockdep_assert_held(&rxq->lock);

	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;
671
	rxq->used_count = 0;
672

673
	for (i = 0; i < RX_QUEUE_SIZE; i++)
674 675 676
		list_add(&rxq->pool[i].list, &rxq->rx_used);
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
{
	int i;

	lockdep_assert_held(&rba->lock);

	INIT_LIST_HEAD(&rba->rbd_allocated);
	INIT_LIST_HEAD(&rba->rbd_empty);

	for (i = 0; i < RX_POOL_SIZE; i++)
		list_add(&rba->pool[i].list, &rba->rbd_empty);
}

static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	int i;

	lockdep_assert_held(&rba->lock);

	for (i = 0; i < RX_POOL_SIZE; i++) {
		if (!rba->pool[i].page)
			continue;
		dma_unmap_page(trans->dev, rba->pool[i].page_dma,
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
		__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
		rba->pool[i].page = NULL;
	}
}

709 710 711 712
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
713
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
714 715 716 717 718 719 720
	int i, err;

	if (!rxq->bd) {
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}
721 722 723 724 725 726 727 728 729 730 731 732
	if (!rba->alloc_wq)
		rba->alloc_wq = alloc_workqueue("rb_allocator",
						WQ_HIGHPRI | WQ_UNBOUND, 1);
	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);

	spin_lock(&rba->lock);
	atomic_set(&rba->req_pending, 0);
	atomic_set(&rba->req_ready, 0);
	/* free all first - we might be reconfigured for a different size */
	iwl_pcie_rx_free_rba(trans);
	iwl_pcie_rx_init_rba(rba);
	spin_unlock(&rba->lock);
733

734
	spin_lock(&rxq->lock);
735

736
	/* free all first - we might be reconfigured for a different size */
737
	iwl_pcie_rxq_free_rbs(trans);
738
	iwl_pcie_rx_init_rxb_lists(rxq);
739 740 741 742 743 744 745 746

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
747
	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
748
	spin_unlock(&rxq->lock);
749

750
	iwl_pcie_rx_replenish(trans);
751 752 753

	iwl_pcie_rx_hw_init(trans, rxq);

754 755 756
	spin_lock(&rxq->lock);
	iwl_pcie_rxq_inc_wr_ptr(trans);
	spin_unlock(&rxq->lock);
757 758 759 760 761 762 763 764

	return 0;
}

void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
765
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
766 767 768 769 770 771 772 773

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

774 775 776 777 778 779 780 781 782
	cancel_work_sync(&rba->rx_alloc);
	if (rba->alloc_wq) {
		destroy_workqueue(rba->alloc_wq);
		rba->alloc_wq = NULL;
	}

	spin_lock(&rba->lock);
	iwl_pcie_rx_free_rba(trans);
	spin_unlock(&rba->lock);
783

784
	spin_lock(&rxq->lock);
785
	iwl_pcie_rxq_free_rbs(trans);
786
	spin_unlock(&rxq->lock);
787 788 789

	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
790
	rxq->bd_dma = 0;
791 792 793 794 795 796 797 798
	rxq->bd = NULL;

	if (rxq->rb_stts)
		dma_free_coherent(trans->dev,
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
799
	rxq->rb_stts_dma = 0;
800 801 802
	rxq->rb_stts = NULL;
}

803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
/*
 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
 *
 * Called when a RBD can be reused. The RBD is transferred to the allocator.
 * When there are 2 empty RBDs - a request for allocation is posted
 */
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
				  struct iwl_rx_mem_buffer *rxb,
				  struct iwl_rxq *rxq, bool emergency)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;

	/* Move the RBD to the used list, will be moved to allocator in batches
	 * before claiming or posting a request*/
	list_add_tail(&rxb->list, &rxq->rx_used);

	if (unlikely(emergency))
		return;

	/* Count the allocator owned RBDs */
	rxq->used_count++;

	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
	 * after but we still need to post another request.
	 */
	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
		/* Move the 2 RBDs to the allocator ownership.
		 Allocator has another 6 from pool for the request completion*/
		spin_lock(&rba->lock);
		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_pending);
		queue_work(rba->alloc_wq, &rba->rx_alloc);
	}
}

843
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
844 845
				struct iwl_rx_mem_buffer *rxb,
				bool emergency)
J
Johannes Berg 已提交
846 847
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
848 849
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
850
	bool page_stolen = false;
851
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
852
	u32 offset = 0;
J
Johannes Berg 已提交
853 854 855 856

	if (WARN_ON(!rxb))
		return;

857 858 859 860 861 862
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		u16 sequence;
		bool reclaim;
863
		int index, cmd_index, len;
864 865
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
866
			._rx_page_order = trans_pcie->rx_page_order,
867 868
			._page = rxb->page,
			._page_stolen = false,
869
			.truesize = max_len,
870 871 872 873 874 875 876
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

877 878 879
		IWL_DEBUG_RX(trans,
			     "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
			     rxcb._offset,
880 881 882 883
			     iwl_get_cmd_string(trans,
						iwl_cmd_id(pkt->hdr.cmd,
							   pkt->hdr.group_id,
							   0)),
884
			     pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
885

886
		len = iwl_rx_packet_len(pkt);
887
		len += sizeof(u32); /* account for status word */
888 889
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
907 908
			}
		}
J
Johannes Berg 已提交
909

910 911 912 913
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

914
		iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
915

916
		if (reclaim) {
917
			kzfree(txq->entries[cmd_index].free_buf);
918
			txq->entries[cmd_index].free_buf = NULL;
919 920
		}

921 922 923 924 925 926 927 928 929 930 931
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
932
				iwl_pcie_hcmd_complete(trans, &rxcb);
933 934 935 936 937 938
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
939 940
	}

941 942
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
943
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
944
		rxb->page = NULL;
945
	}
J
Johannes Berg 已提交
946 947 948 949 950 951 952

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
953 954
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
955 956 957 958 959 960 961 962
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
963
			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
964 965 966 967
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
J
Johannes Berg 已提交
968
	} else
969
		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
J
Johannes Berg 已提交
970 971
}

972 973
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
974
 */
975
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
976
{
J
Johannes Berg 已提交
977
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
978
	struct iwl_rxq *rxq = &trans_pcie->rxq;
979 980
	u32 r, i, j, count = 0;
	bool emergency = false;
981

982 983
restart:
	spin_lock(&rxq->lock);
984 985
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
986
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
987 988 989 990
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
991
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
992 993

	while (i != r) {
994
		struct iwl_rx_mem_buffer *rxb;
995

996 997 998
		if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
			emergency = true;

999 1000 1001
		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;

1002
		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
1003
		iwl_pcie_rx_handle_rb(trans, rxb, emergency);
1004 1005

		i = (i + 1) & RX_QUEUE_MASK;
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
		 * try to claim the pre-allocated buffers from the allocator */
		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
			struct iwl_rb_allocator *rba = &trans_pcie->rba;
			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];

			if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
			    !emergency) {
				/* Add the remaining 6 empty RBDs
				* for allocator use
				 */
				spin_lock(&rba->lock);
				list_splice_tail_init(&rxq->rx_used,
						      &rba->rbd_empty);
				spin_unlock(&rba->lock);
			}

			/* If not ready - continue, will try to reclaim later.
			* No need to reschedule work - allocator exits only on
			* success */
			if (!iwl_pcie_rx_allocator_get(trans, out)) {
				/* If success - then RX_CLAIM_REQ_ALLOC
				 * buffers were retrieved and should be added
				 * to free list */
				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
					list_add_tail(&out[j]->list,
						      &rxq->rx_free);
					rxq->free_count++;
				}
			}
		}
		if (emergency) {
1040
			count++;
1041
			if (count == 8) {
1042
				count = 0;
1043 1044 1045 1046 1047
				if (rxq->used_count < RX_QUEUE_SIZE / 3)
					emergency = false;
				spin_unlock(&rxq->lock);
				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
				spin_lock(&rxq->lock);
1048 1049
			}
		}
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
		/* handle restock for three cases, can be all of them at once:
		* - we just pulled buffers from the allocator
		* - we have 8+ unstolen pages accumulated
		* - we are in emergency and allocated buffers
		 */
		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
			rxq->read = i;
			spin_unlock(&rxq->lock);
			iwl_pcie_rxq_restock(trans);
			goto restart;
		}
1061 1062 1063 1064
	}

	/* Backtrack one entry */
	rxq->read = i;
1065 1066
	spin_unlock(&rxq->lock);

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	/*
	 * handle a case where in emergency there are some unallocated RBDs.
	 * those RBDs are in the used list, but are not tracked by the queue's
	 * used_count which counts allocator owned RBDs.
	 * unallocated emergency RBDs must be allocated on exit, otherwise
	 * when called again the function may not be in emergency mode and
	 * they will be handed to the allocator with no tracking in the RBD
	 * allocator counters, which will lead to them never being claimed back
	 * by the queue.
	 * by allocating them here, they are now in the queue free list, and
	 * will be restocked by the next call of iwl_pcie_rxq_restock.
	 */
	if (unlikely(emergency && count))
		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
1081

1082 1083
	if (trans_pcie->napi.poll)
		napi_gro_flush(&trans_pcie->napi, false);
1084 1085
}

1086 1087
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1088
 */
1089
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1090
{
1091
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1092
	int i;
1093

1094
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1095
	if (trans->cfg->internal_wimax_coex &&
1096
	    !trans->cfg->apmg_not_supported &&
1097
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1098
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1099
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1100
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1101
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1102
		iwl_op_mode_wimax_active(trans->op_mode);
1103
		wake_up(&trans_pcie->wait_command_queue);
1104 1105 1106
		return;
	}

1107
	iwl_pcie_dump_csr(trans);
1108
	iwl_dump_fh(trans, NULL);
1109

1110
	local_bh_disable();
1111 1112 1113
	/* The STATUS_FW_ERROR bit is set in this function. This must happen
	 * before we wake up the command caller, to ensure a proper cleanup. */
	iwl_trans_fw_error(trans);
1114
	local_bh_enable();
1115

1116 1117 1118
	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
		del_timer(&trans_pcie->txq[i].stuck_timer);

1119 1120
	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	wake_up(&trans_pcie->wait_command_queue);
1121 1122
}

1123
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1124 1125 1126
{
	u32 inta;

1127
	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1128 1129 1130 1131 1132 1133 1134

	trace_iwlwifi_dev_irq(trans->dev);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(trans, CSR_INT);

	/* the thread will service interrupts and re-enable them */
1135
	return inta;
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
}

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1151
static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 inta;
	u32 val = 0;
	u32 read;

	trace_iwlwifi_dev_irq(trans->dev);

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1165 1166
	if (!read)
		return 0;
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177

	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
				trans_pcie->ict_index, read);
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
1178
			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199

		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
					   read);
	} while (read);

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1200
	return inta;
1201 1202
}

1203
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1204
{
1205
	struct iwl_trans *trans = dev_id;
1206 1207
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1208 1209 1210
	u32 inta = 0;
	u32 handled = 0;

1211 1212
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

1213
	spin_lock(&trans_pcie->irq_lock);
1214

1215 1216 1217 1218
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
	if (likely(trans_pcie->use_ict))
1219
		inta = iwl_pcie_int_cause_ict(trans);
1220
	else
1221
		inta = iwl_pcie_int_cause_non_ict(trans);
1222

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
	if (iwl_have_debug_level(IWL_DL_ISR)) {
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
			      inta, trans_pcie->inta_mask,
			      iwl_read32(trans, CSR_INT_MASK),
			      iwl_read32(trans, CSR_FH_INT_STATUS));
		if (inta & (~trans_pcie->inta_mask))
			IWL_DEBUG_ISR(trans,
				      "We got a masked interrupt (0x%08x)\n",
				      inta & (~trans_pcie->inta_mask));
	}

	inta &= trans_pcie->inta_mask;

	/*
	 * Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC.
	 */
1242
	if (unlikely(!inta)) {
1243 1244 1245 1246 1247 1248 1249
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
		/*
		 * Re-enable interrupts here since we don't
		 * have anything to service
		 */
		if (test_bit(STATUS_INT_ENABLED, &trans->status))
			iwl_enable_interrupts(trans);
1250
		spin_unlock(&trans_pcie->irq_lock);
1251 1252 1253 1254
		lock_map_release(&trans->sync_cmd_lockdep_map);
		return IRQ_NONE;
	}

1255 1256 1257 1258 1259 1260
	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/*
		 * Hardware disappeared. It might have
		 * already raised an interrupt.
		 */
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1261
		spin_unlock(&trans_pcie->irq_lock);
1262
		goto out;
1263 1264
	}

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
1276
	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1277

1278
	if (iwl_have_debug_level(IWL_DL_ISR))
1279
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1280
			      inta, iwl_read32(trans, CSR_INT_MASK));
1281

1282
	spin_unlock(&trans_pcie->irq_lock);
1283

1284 1285
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
1286
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1287 1288

		/* Tell the device to stop sending interrupts */
1289
		iwl_disable_interrupts(trans);
1290

1291
		isr_stats->hw++;
1292
		iwl_pcie_irq_handle_error(trans);
1293 1294 1295

		handled |= CSR_INT_BIT_HW_ERR;

1296
		goto out;
1297 1298
	}

1299
	if (iwl_have_debug_level(IWL_DL_ISR)) {
1300 1301
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
1302 1303
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
1304
			isr_stats->sch++;
1305 1306 1307 1308
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
1309
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1310
			isr_stats->alive++;
1311 1312
		}
	}
1313

1314 1315 1316 1317 1318
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
1319
		bool hw_rfkill;
1320

1321
		hw_rfkill = iwl_is_rfkill_set(trans);
1322
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1323
			 hw_rfkill ? "disable radio" : "enable radio");
1324

1325
		isr_stats->rfkill++;
1326

1327
		mutex_lock(&trans_pcie->mutex);
1328
		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1329
		mutex_unlock(&trans_pcie->mutex);
1330
		if (hw_rfkill) {
1331 1332 1333
			set_bit(STATUS_RFKILL, &trans->status);
			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
					       &trans->status))
1334 1335 1336 1337
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
1338
			clear_bit(STATUS_RFKILL, &trans->status);
1339
		}
1340 1341 1342 1343 1344 1345

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
1346
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1347
		isr_stats->ctkill++;
1348 1349 1350 1351 1352
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
1353
		IWL_ERR(trans, "Microcode SW error detected. "
1354
			" Restarting 0x%X.\n", inta);
1355
		isr_stats->sw++;
1356
		iwl_pcie_irq_handle_error(trans);
1357 1358 1359 1360 1361
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
1362
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1363
		iwl_pcie_rxq_check_wrptr(trans);
1364
		iwl_pcie_txq_check_wrptrs(trans);
1365

1366
		isr_stats->wakeup++;
1367 1368 1369 1370 1371 1372 1373 1374

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1375
		    CSR_INT_BIT_RX_PERIODIC)) {
1376
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1377 1378
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1379
			iwl_write32(trans, CSR_FH_INT_STATUS,
1380 1381 1382 1383
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
1384
			iwl_write32(trans,
1385
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
1399
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1400
			    CSR_INT_PERIODIC_DIS);
1401

1402 1403 1404 1405 1406 1407 1408 1409
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1410
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1411
				   CSR_INT_PERIODIC_ENA);
1412

1413
		isr_stats->rx++;
1414 1415 1416 1417

		local_bh_disable();
		iwl_pcie_rx_handle(trans);
		local_bh_enable();
1418 1419 1420 1421
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
1422
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1423
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1424
		isr_stats->tx++;
1425 1426
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
1427 1428
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
1429 1430 1431
	}

	if (inta & ~handled) {
1432
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1433
		isr_stats->unhandled++;
1434 1435
	}

1436 1437 1438
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
1439 1440 1441 1442
	}

	/* Re-enable all interrupts */
	/* only Re-enable if disabled by irq */
1443
	if (test_bit(STATUS_INT_ENABLED, &trans->status))
1444
		iwl_enable_interrupts(trans);
1445
	/* Re-enable RF_KILL if it occurred */
1446 1447
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1448 1449 1450 1451

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1452 1453
}

1454 1455 1456 1457 1458
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1459

1460
/* Free dram table */
1461
void iwl_pcie_free_ict(struct iwl_trans *trans)
1462
{
1463
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1464

1465
	if (trans_pcie->ict_tbl) {
1466
		dma_free_coherent(trans->dev, ICT_SIZE,
1467
				  trans_pcie->ict_tbl,
1468
				  trans_pcie->ict_tbl_dma);
1469 1470
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1471 1472 1473
	}
}

1474 1475 1476
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1477 1478
 * also reset all data related to ICT table interrupt.
 */
1479
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1480
{
1481
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1482

1483
	trans_pcie->ict_tbl =
1484
		dma_zalloc_coherent(trans->dev, ICT_SIZE,
1485 1486 1487
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1488 1489
		return -ENOMEM;

1490 1491
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1492
		iwl_pcie_free_ict(trans);
1493 1494
		return -EINVAL;
	}
1495 1496 1497 1498 1499 1500 1501

	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1502
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1503
{
1504
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1505 1506
	u32 val;

1507
	if (!trans_pcie->ict_tbl)
1508
		return;
1509

1510
	spin_lock(&trans_pcie->irq_lock);
1511
	iwl_disable_interrupts(trans);
1512

1513
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1514

1515
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1516

1517 1518 1519
	val |= CSR_DRAM_INT_TBL_ENABLE |
	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
1520

1521
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1522

1523
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1524 1525
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1526
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1527
	iwl_enable_interrupts(trans);
1528
	spin_unlock(&trans_pcie->irq_lock);
1529 1530 1531
}

/* Device is going down disable ict interrupt usage */
1532
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1533
{
1534
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1535

1536
	spin_lock(&trans_pcie->irq_lock);
1537
	trans_pcie->use_ict = false;
1538
	spin_unlock(&trans_pcie->irq_lock);
1539 1540
}

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
irqreturn_t iwl_pcie_isr(int irq, void *data)
{
	struct iwl_trans *trans = data;

	if (!trans)
		return IRQ_NONE;

	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);

1555
	return IRQ_WAKE_THREAD;
1556
}