rx.c 38.5 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
31
#include <linux/gfp.h>
32

33
#include "iwl-prph.h"
34
#include "iwl-io.h"
35
#include "internal.h"
36
#include "iwl-op-mode.h"
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
 *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
 *   to replenish the iwl->rxq->rx_free.
79
 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 81 82 83 84 85 86 87 88 89 90 91
 *   iwl->rxq is replenished and the READ INDEX is updated (updating the
 *   'processed' and 'read' driver indexes as well)
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
 *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
 *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
 *   were enough free buffers and RX_STALLED is set it is cleared.
 *
 *
 * Driver sequence:
 *
92 93 94 95
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
 *                            iwl_pcie_rxq_restock
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
96 97
 *                            queue, updates firmware pointers, and updates
 *                            the WRITE index.  If insufficient rx_free buffers
98
 *                            are available, schedules iwl_pcie_rx_replenish
99 100
 *
 * -- enable interrupts --
101
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
102 103
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
104
 *                            Calls iwl_pcie_rxq_restock to refill any empty
105 106 107 108 109
 *                            slots.
 * ...
 *
 */

110 111
/*
 * iwl_rxq_space - Return number of free slots available in queue.
112
 */
113
static int iwl_rxq_space(const struct iwl_rxq *q)
114 115 116 117 118 119 120 121 122 123 124
{
	int s = q->read - q->write;
	if (s <= 0)
		s += RX_QUEUE_SIZE;
	/* keep some buffer to not confuse full and empty queue */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

125 126 127 128 129 130 131 132
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

133 134 135
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
136 137 138 139 140 141 142
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

143 144
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
145
 * TODO - could be made static
146
 */
147
void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
148 149 150 151 152 153 154 155 156
{
	unsigned long flags;
	u32 reg;

	spin_lock_irqsave(&q->lock, flags);

	if (q->need_update == 0)
		goto exit_unlock;

157
	if (trans->cfg->base_params->shadow_reg_enable) {
158 159 160
		/* shadow register enabled */
		/* Device expects a multiple of 8 */
		q->write_actual = (q->write & ~0x7);
161
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
162
	} else {
D
Don Fry 已提交
163 164 165
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);

166
		/* If power-saving is in use, make sure device is awake */
167
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
168
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
169 170

			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
171
				IWL_DEBUG_INFO(trans,
172 173
					"Rx queue requesting wakeup,"
					" GP1 = 0x%x\n", reg);
174
				iwl_set_bit(trans, CSR_GP_CNTRL,
175 176 177 178 179
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				goto exit_unlock;
			}

			q->write_actual = (q->write & ~0x7);
180
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
181 182 183 184 185 186
					q->write_actual);

		/* Else device is assumed to be awake */
		} else {
			/* Device expects a multiple of 8 */
			q->write_actual = (q->write & ~0x7);
187
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
188 189 190 191 192 193 194 195 196
				q->write_actual);
		}
	}
	q->need_update = 0;

 exit_unlock:
	spin_unlock_irqrestore(&q->lock, flags);
}

197 198
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
199 200 201 202 203 204 205 206 207
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
208
static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
209
{
210
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
211
	struct iwl_rxq *rxq = &trans_pcie->rxq;
212 213 214
	struct iwl_rx_mem_buffer *rxb;
	unsigned long flags;

215 216 217 218 219 220 221 222 223 224 225
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
	 * pending. We stop the APM before we sync the interrupts / tasklets
	 * because we have to (see comment there). On the other hand, since
	 * the APM is stopped, we cannot access the HW (in particular not prph).
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
		return;

226
	spin_lock_irqsave(&rxq->lock, flags);
227
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
228 229 230 231 232
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
233 234 235
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
236 237

		/* Point to Rx buffer via next RBD in circular buffer */
238
		rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
239 240 241 242 243 244 245 246
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
	spin_unlock_irqrestore(&rxq->lock, flags);
	/* If the pre-allocated buffer pool is dropping low, schedule to
	 * refill it */
	if (rxq->free_count <= RX_LOW_WATERMARK)
J
Johannes Berg 已提交
247
		schedule_work(&trans_pcie->rx_replenish);
248 249 250 251 252 253 254

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock_irqsave(&rxq->lock, flags);
		rxq->need_update = 1;
		spin_unlock_irqrestore(&rxq->lock, flags);
255
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
256 257 258
	}
}

259
/*
260
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
261
 *
262 263 264
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
265
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
266
 * allocated buffers.
267
 */
268
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
269
{
270
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
271
	struct iwl_rxq *rxq = &trans_pcie->rxq;
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

288
		if (trans_pcie->rx_page_order > 0)
289 290 291
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
292
		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
293 294
		if (!page) {
			if (net_ratelimit())
295
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
296
					   "order: %d\n",
297
					   trans_pcie->rx_page_order);
298 299 300

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
301
				IWL_CRIT(trans, "Failed to alloc_pages with %s."
302 303 304 305 306 307 308 309 310 311 312 313 314 315
					 "Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?
					 "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
316
			__free_pages(page, trans_pcie->rx_page_order);
317 318
			return;
		}
J
Johannes Berg 已提交
319 320 321
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
322 323 324 325 326
		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
327 328 329 330
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
331 332 333 334 335 336 337 338 339 340 341 342 343 344
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i;

	/* Fill the rx_used queue with _all_ of the Rx buffers */
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
		/* In the reset function, these buffers may have been allocated
		 * to an SKB, so we need to unmap and free potential storage */
		if (rxq->pool[i].page != NULL) {
			dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
				       PAGE_SIZE << trans_pcie->rx_page_order,
				       DMA_FROM_DEVICE);
			__free_pages(rxq->pool[i].page,
				     trans_pcie->rx_page_order);
			rxq->pool[i].page = NULL;
		}
		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
	}
}

367
/*
368
 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
369 370 371
 *
 * When moving to rx_free an page is allocated for the slot.
 *
372
 * Also restock the Rx queue via iwl_pcie_rxq_restock.
373 374
 * This is called as a scheduled work item (except for during initialization)
 */
375
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
376
{
J
Johannes Berg 已提交
377
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
378 379
	unsigned long flags;

380
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
381

J
Johannes Berg 已提交
382
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
383
	iwl_pcie_rxq_restock(trans);
J
Johannes Berg 已提交
384
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
385 386
}

387
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
388
{
389
	iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
390

391
	iwl_pcie_rxq_restock(trans);
392 393
}

394
static void iwl_pcie_rx_replenish_work(struct work_struct *data)
395
{
396 397
	struct iwl_trans_pcie *trans_pcie =
	    container_of(data, struct iwl_trans_pcie, rx_replenish);
398

399
	iwl_pcie_rx_replenish(trans_pcie->trans);
400 401
}

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct device *dev = trans->dev;

	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));

	spin_lock_init(&rxq->lock);

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				      &rxq->bd_dma, GFP_KERNEL);
	if (!rxq->bd)
		goto err_bd;

	/*Allocate the driver's pointer to receive buffer status */
	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
					   &rxq->rb_stts_dma, GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err_rb_stts;

	return 0;

err_rb_stts:
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
}

static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

	if (trans_pcie->rx_buf_size_8k)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
476
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}

int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;

	int i, err;
	unsigned long flags;

	if (!rxq->bd) {
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}

	spin_lock_irqsave(&rxq->lock, flags);
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);

	INIT_WORK(&trans_pcie->rx_replenish,
		  iwl_pcie_rx_replenish_work);

	iwl_pcie_rxq_free_rbs(trans);

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
	rxq->free_count = 0;
	spin_unlock_irqrestore(&rxq->lock, flags);

	iwl_pcie_rx_replenish(trans);

	iwl_pcie_rx_hw_init(trans, rxq);

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	rxq->need_update = 1;
	iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	return 0;
}

void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	unsigned long flags;

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

	spin_lock_irqsave(&rxq->lock, flags);
	iwl_pcie_rxq_free_rbs(trans);
	spin_unlock_irqrestore(&rxq->lock, flags);

	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;

	if (rxq->rb_stts)
		dma_free_coherent(trans->dev,
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
	memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
	rxq->rb_stts = NULL;
}

static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
J
Johannes Berg 已提交
561 562 563
				struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
564 565
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
J
Johannes Berg 已提交
566
	unsigned long flags;
567
	bool page_stolen = false;
568
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
569
	u32 offset = 0;
J
Johannes Berg 已提交
570 571 572 573

	if (WARN_ON(!rxb))
		return;

574 575 576 577 578 579 580 581 582 583 584 585
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		struct iwl_device_cmd *cmd;
		u16 sequence;
		bool reclaim;
		int index, cmd_index, err, len;
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
			._page = rxb->page,
			._page_stolen = false,
586
			.truesize = max_len,
587 588 589 590 591 592 593 594
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
595
			rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
J
Johannes Berg 已提交
596
			pkt->hdr.cmd);
597 598 599

		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
600 601
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
619 620
			}
		}
J
Johannes Berg 已提交
621

622 623 624 625
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

626
		if (reclaim) {
627
			struct iwl_pcie_txq_entry *ent;
628 629 630 631
			ent = &txq->entries[cmd_index];
			cmd = ent->copy_cmd;
			WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
		} else {
632
			cmd = NULL;
633
		}
634 635 636

		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);

637 638 639 640
		if (reclaim) {
			/* The original command isn't needed any more */
			kfree(txq->entries[cmd_index].copy_cmd);
			txq->entries[cmd_index].copy_cmd = NULL;
641 642 643
			/* nor is the duplicated part of the command */
			kfree(txq->entries[cmd_index].free_buf);
			txq->entries[cmd_index].free_buf = NULL;
644 645
		}

646 647 648 649 650 651 652 653 654 655 656
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
657
				iwl_pcie_hcmd_complete(trans, &rxcb, err);
658 659 660 661 662 663
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
664 665
	}

666 667
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
668
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
669
		rxb->page = NULL;
670
	}
J
Johannes Berg 已提交
671 672 673 674 675 676 677 678

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	spin_lock_irqsave(&rxq->lock, flags);
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
679 680
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
J
Johannes Berg 已提交
681 682 683 684 685 686 687
		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;
	} else
		list_add_tail(&rxb->list, &rxq->rx_used);
	spin_unlock_irqrestore(&rxq->lock, flags);
}

688 689
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
690
 */
691
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
692
{
J
Johannes Berg 已提交
693
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
694
	struct iwl_rxq *rxq = &trans_pcie->rxq;
695 696 697 698 699 700 701 702 703 704 705 706
	u32 r, i;
	u8 fill_rx = 0;
	u32 count = 8;
	int total_empty;

	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
707
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
708 709 710 711 712 713 714 715 716 717

	/* calculate total frames need to be restock after handling RX */
	total_empty = r - rxq->write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
		fill_rx = 1;

	while (i != r) {
718
		struct iwl_rx_mem_buffer *rxb;
719 720 721 722

		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;

723 724
		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
			     r, i, rxb);
725
		iwl_pcie_rx_handle_rb(trans, rxb);
726 727 728 729 730 731 732 733

		i = (i + 1) & RX_QUEUE_MASK;
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				rxq->read = i;
734
				iwl_pcie_rx_replenish_now(trans);
735 736 737 738 739 740 741 742
				count = 0;
			}
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	if (fill_rx)
743
		iwl_pcie_rx_replenish_now(trans);
744
	else
745
		iwl_pcie_rxq_restock(trans);
746 747
}

748 749
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
750
 */
751
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
752
{
753 754
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

755
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
756
	if (trans->cfg->internal_wimax_coex &&
757
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
758
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
759
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
760
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
D
Don Fry 已提交
761
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
762
		iwl_op_mode_wimax_active(trans->op_mode);
763
		wake_up(&trans_pcie->wait_command_queue);
764 765 766
		return;
	}

767 768
	iwl_pcie_dump_csr(trans);
	iwl_pcie_dump_fh(trans, NULL);
769

770
	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
771 772 773
	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
	wake_up(&trans_pcie->wait_command_queue);

774
	iwl_op_mode_nic_error(trans->op_mode);
775 776
}

777
void iwl_pcie_tasklet(struct iwl_trans *trans)
778
{
779 780
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
781 782 783 784 785 786 787 788
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
	u32 i;
#ifdef CONFIG_IWLWIFI_DEBUG
	u32 inta_mask;
#endif

J
Johannes Berg 已提交
789
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
790 791 792 793 794 795 796 797 798 799 800 801

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
802
	iwl_write32(trans, CSR_INT,
803
		    trans_pcie->inta | ~trans_pcie->inta_mask);
804

805
	inta = trans_pcie->inta;
806 807

#ifdef CONFIG_IWLWIFI_DEBUG
808
	if (iwl_have_debug_level(IWL_DL_ISR)) {
809
		/* just for debug */
810
		inta_mask = iwl_read32(trans, CSR_INT_MASK);
811
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
812
			      inta, inta_mask);
813 814 815
	}
#endif

816 817
	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
	trans_pcie->inta = 0;
818

J
Johannes Berg 已提交
819
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
820

821 822
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
823
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
824 825

		/* Tell the device to stop sending interrupts */
826
		iwl_disable_interrupts(trans);
827

828
		isr_stats->hw++;
829
		iwl_pcie_irq_handle_error(trans);
830 831 832 833 834 835 836

		handled |= CSR_INT_BIT_HW_ERR;

		return;
	}

#ifdef CONFIG_IWLWIFI_DEBUG
837
	if (iwl_have_debug_level(IWL_DL_ISR)) {
838 839
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
840
			IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
841
				      "the frame/frames.\n");
842
			isr_stats->sch++;
843 844 845 846
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
847
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
848
			isr_stats->alive++;
849 850 851 852 853 854 855 856
		}
	}
#endif
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
857
		bool hw_rfkill;
858

859
		hw_rfkill = iwl_is_rfkill_set(trans);
860
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
861
			 hw_rfkill ? "disable radio" : "enable radio");
862

863
		isr_stats->rfkill++;
864

865
		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
866 867 868 869 870 871 872 873 874 875
		if (hw_rfkill) {
			set_bit(STATUS_RFKILL, &trans_pcie->status);
			if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
					       &trans_pcie->status))
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
			clear_bit(STATUS_RFKILL, &trans_pcie->status);
		}
876 877 878 879 880 881

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
882
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
883
		isr_stats->ctkill++;
884 885 886 887 888
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
889
		IWL_ERR(trans, "Microcode SW error detected. "
890
			" Restarting 0x%X.\n", inta);
891
		isr_stats->sw++;
892
		iwl_pcie_irq_handle_error(trans);
893 894 895 896 897
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
898
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
899
		iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
900
		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
901
			iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
902

903
		isr_stats->wakeup++;
904 905 906 907 908 909 910 911

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
912
		    CSR_INT_BIT_RX_PERIODIC)) {
913
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
914 915
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
916
			iwl_write32(trans, CSR_FH_INT_STATUS,
917 918 919 920
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
921
			iwl_write32(trans,
922
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
923 924 925 926 927 928 929 930 931 932 933 934 935
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
936
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
937
			    CSR_INT_PERIODIC_DIS);
938

939
		iwl_pcie_rx_handle(trans);
940

941 942 943 944 945 946 947 948
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
949
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
950
				   CSR_INT_PERIODIC_ENA);
951

952
		isr_stats->rx++;
953 954 955 956
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
957
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
958
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
959
		isr_stats->tx++;
960 961
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
962 963
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
964 965 966
	}

	if (inta & ~handled) {
967
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
968
		isr_stats->unhandled++;
969 970
	}

971 972 973
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
974 975 976 977
	}

	/* Re-enable all interrupts */
	/* only Re-enable if disabled by irq */
D
Don Fry 已提交
978
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
979
		iwl_enable_interrupts(trans);
980
	/* Re-enable RF_KILL if it occurred */
981 982
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
983 984
}

985 986 987 988 989
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
990 991 992 993 994

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
995 996

/* Free dram table */
997
void iwl_pcie_free_ict(struct iwl_trans *trans)
998
{
999
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1000

1001
	if (trans_pcie->ict_tbl) {
1002
		dma_free_coherent(trans->dev, ICT_SIZE,
1003
				  trans_pcie->ict_tbl,
1004
				  trans_pcie->ict_tbl_dma);
1005 1006
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1007 1008 1009
	}
}

1010 1011 1012
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1013 1014
 * also reset all data related to ICT table interrupt.
 */
1015
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1016
{
1017
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1018

1019
	trans_pcie->ict_tbl =
1020
		dma_alloc_coherent(trans->dev, ICT_SIZE,
1021 1022 1023
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1024 1025
		return -ENOMEM;

1026 1027
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1028
		iwl_pcie_free_ict(trans);
1029 1030
		return -EINVAL;
	}
1031

1032 1033
	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
		      (unsigned long long)trans_pcie->ict_tbl_dma);
1034

1035
	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1036 1037

	/* reset table and index to all 0 */
1038
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1039
	trans_pcie->ict_index = 0;
1040 1041

	/* add periodic RX interrupt */
1042
	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1043 1044 1045 1046 1047 1048
	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1049
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1050
{
1051
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1052 1053 1054
	u32 val;
	unsigned long flags;

1055
	if (!trans_pcie->ict_tbl)
1056
		return;
1057

J
Johannes Berg 已提交
1058
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1059
	iwl_disable_interrupts(trans);
1060

1061
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1062

1063
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1064 1065 1066 1067

	val |= CSR_DRAM_INT_TBL_ENABLE;
	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;

1068
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1069

1070
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1071 1072
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1073
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1074
	iwl_enable_interrupts(trans);
J
Johannes Berg 已提交
1075
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1076 1077 1078
}

/* Device is going down disable ict interrupt usage */
1079
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1080
{
1081
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1082 1083
	unsigned long flags;

J
Johannes Berg 已提交
1084
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1085
	trans_pcie->use_ict = false;
J
Johannes Berg 已提交
1086
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1087 1088
}

1089
/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
1090
static irqreturn_t iwl_pcie_isr(int irq, void *data)
1091
{
1092
	struct iwl_trans *trans = data;
1093
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1094 1095 1096 1097
	u32 inta, inta_mask;
#ifdef CONFIG_IWLWIFI_DEBUG
	u32 inta_fh;
#endif
1098 1099 1100

	lockdep_assert_held(&trans_pcie->irq_lock);

1101
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1102

1103 1104 1105 1106
	/* Disable (but don't clear!) interrupts here to avoid
	 *    back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here. */
1107 1108
	inta_mask = iwl_read32(trans, CSR_INT_MASK);  /* just for debug */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1109 1110

	/* Discover which interrupts are active/pending */
1111
	inta = iwl_read32(trans, CSR_INT);
1112 1113 1114 1115 1116

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	if (!inta) {
1117
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1118 1119 1120 1121 1122 1123
		goto none;
	}

	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/* Hardware disappeared. It might have already raised
		 * an interrupt */
1124
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1125
		return IRQ_HANDLED;
1126 1127 1128
	}

#ifdef CONFIG_IWLWIFI_DEBUG
1129
	if (iwl_have_debug_level(IWL_DL_ISR)) {
1130
		inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
1131
		IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
1132 1133 1134 1135
			      "fh 0x%08x\n", inta, inta_mask, inta_fh);
	}
#endif

1136
	trans_pcie->inta |= inta;
1137
	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
1138
	if (likely(inta))
1139
		tasklet_schedule(&trans_pcie->irq_tasklet);
D
Don Fry 已提交
1140
	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1141
		 !trans_pcie->inta)
1142
		iwl_enable_interrupts(trans);
1143

1144
none:
1145 1146
	/* re-enable interrupts here since we don't have anything to service. */
	/* only Re-enable if disabled by irq  and no schedules tasklet. */
D
Don Fry 已提交
1147
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1148
	    !trans_pcie->inta)
1149
		iwl_enable_interrupts(trans);
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161

	return IRQ_NONE;
}

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1162
irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1163
{
1164 1165
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie;
1166 1167
	u32 inta, inta_mask;
	u32 val = 0;
J
Johannes Berg 已提交
1168
	u32 read;
1169 1170
	unsigned long flags;

1171
	if (!trans)
1172 1173
		return IRQ_NONE;

1174 1175
	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1176 1177
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

1178 1179 1180
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
1181
	if (unlikely(!trans_pcie->use_ict)) {
1182
		irqreturn_t ret = iwl_pcie_isr(irq, data);
1183 1184 1185
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return ret;
	}
1186

1187
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1188

1189 1190 1191 1192 1193
	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
1194 1195
	inta_mask = iwl_read32(trans, CSR_INT_MASK);  /* just for debug */
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1196 1197 1198 1199

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
J
Johannes Berg 已提交
1200
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1201
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
J
Johannes Berg 已提交
1202
	if (!read) {
1203
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1204 1205 1206
		goto none;
	}

J
Johannes Berg 已提交
1207 1208 1209 1210 1211 1212
	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
1213
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
J
Johannes Berg 已提交
1214
				trans_pcie->ict_index, read);
1215 1216 1217
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1218

J
Johannes Berg 已提交
1219
		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1220
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
J
Johannes Berg 已提交
1221 1222
					   read);
	} while (read);
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1239
	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1240
		      inta, inta_mask, val);
1241

1242 1243
	inta &= trans_pcie->inta_mask;
	trans_pcie->inta |= inta;
1244

1245
	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
1246
	if (likely(inta))
1247
		tasklet_schedule(&trans_pcie->irq_tasklet);
D
Don Fry 已提交
1248
	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
J
Johannes Berg 已提交
1249
		 !trans_pcie->inta) {
1250 1251 1252 1253
		/* Allow interrupt if was disabled by this handler and
		 * no tasklet was schedules, We should not enable interrupt,
		 * tasklet will enable it.
		 */
1254
		iwl_enable_interrupts(trans);
1255 1256
	}

J
Johannes Berg 已提交
1257
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1258 1259 1260 1261 1262 1263
	return IRQ_HANDLED;

 none:
	/* re-enable interrupts here since we don't have anything to service.
	 * only Re-enable if disabled by irq.
	 */
D
Don Fry 已提交
1264
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
J
Johannes Berg 已提交
1265
	    !trans_pcie->inta)
1266
		iwl_enable_interrupts(trans);
1267

J
Johannes Berg 已提交
1268
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1269 1270
	return IRQ_NONE;
}