rx.c 39.4 KB
Newer Older
1 2
/******************************************************************************
 *
J
Johannes Berg 已提交
3
 * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
31
#include <linux/gfp.h>
32

33
#include "iwl-prph.h"
34
#include "iwl-io.h"
35
#include "internal.h"
36
#include "iwl-op-mode.h"
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
 *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
 *   to replenish the iwl->rxq->rx_free.
79
 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 81 82 83
 *   iwl->rxq is replenished and the READ INDEX is updated (updating the
 *   'processed' and 'read' driver indexes as well)
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
84 85 86 87
 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
 *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
88 89 90 91
 *
 *
 * Driver sequence:
 *
92 93 94 95
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
 *                            iwl_pcie_rxq_restock
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
96 97
 *                            queue, updates firmware pointers, and updates
 *                            the WRITE index.  If insufficient rx_free buffers
98
 *                            are available, schedules iwl_pcie_rx_replenish
99 100
 *
 * -- enable interrupts --
101
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
102 103
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
104
 *                            Calls iwl_pcie_rxq_restock to refill any empty
105 106 107 108 109
 *                            slots.
 * ...
 *
 */

110 111
/*
 * iwl_rxq_space - Return number of free slots available in queue.
112
 */
113
static int iwl_rxq_space(const struct iwl_rxq *q)
114 115 116 117 118 119 120 121 122 123 124
{
	int s = q->read - q->write;
	if (s <= 0)
		s += RX_QUEUE_SIZE;
	/* keep some buffer to not confuse full and empty queue */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

125 126 127 128 129 130 131 132
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

133 134 135
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
136 137 138 139 140 141 142
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

143 144
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
145
 */
146
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
147 148 149 150 151 152 153 154 155
{
	unsigned long flags;
	u32 reg;

	spin_lock_irqsave(&q->lock, flags);

	if (q->need_update == 0)
		goto exit_unlock;

156
	if (trans->cfg->base_params->shadow_reg_enable) {
157 158 159
		/* shadow register enabled */
		/* Device expects a multiple of 8 */
		q->write_actual = (q->write & ~0x7);
160
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
161
	} else {
D
Don Fry 已提交
162 163 164
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);

165
		/* If power-saving is in use, make sure device is awake */
166
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
167
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
168 169

			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
170
				IWL_DEBUG_INFO(trans,
171 172
					"Rx queue requesting wakeup,"
					" GP1 = 0x%x\n", reg);
173
				iwl_set_bit(trans, CSR_GP_CNTRL,
174 175 176 177 178
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				goto exit_unlock;
			}

			q->write_actual = (q->write & ~0x7);
179
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
180 181 182 183 184 185
					q->write_actual);

		/* Else device is assumed to be awake */
		} else {
			/* Device expects a multiple of 8 */
			q->write_actual = (q->write & ~0x7);
186
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
187 188 189 190 191 192 193 194 195
				q->write_actual);
		}
	}
	q->need_update = 0;

 exit_unlock:
	spin_unlock_irqrestore(&q->lock, flags);
}

196 197
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
198 199 200 201 202 203 204 205 206
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
207
static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
208
{
209
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
210
	struct iwl_rxq *rxq = &trans_pcie->rxq;
211 212 213
	struct iwl_rx_mem_buffer *rxb;
	unsigned long flags;

214 215 216
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
217 218 219
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
220 221 222 223 224
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
		return;

225
	spin_lock_irqsave(&rxq->lock, flags);
226
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
227 228 229 230 231
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
232 233 234
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
235 236

		/* Point to Rx buffer via next RBD in circular buffer */
237
		rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
238 239 240 241 242 243 244 245
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
	spin_unlock_irqrestore(&rxq->lock, flags);
	/* If the pre-allocated buffer pool is dropping low, schedule to
	 * refill it */
	if (rxq->free_count <= RX_LOW_WATERMARK)
J
Johannes Berg 已提交
246
		schedule_work(&trans_pcie->rx_replenish);
247 248 249 250 251 252 253

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock_irqsave(&rxq->lock, flags);
		rxq->need_update = 1;
		spin_unlock_irqrestore(&rxq->lock, flags);
254
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
255 256 257
	}
}

258
/*
259
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
260
 *
261 262 263
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
264
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
265
 * allocated buffers.
266
 */
267
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
268
{
269
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
270
	struct iwl_rxq *rxq = &trans_pcie->rxq;
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

287
		if (trans_pcie->rx_page_order > 0)
288 289 290
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
291
		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
292 293
		if (!page) {
			if (net_ratelimit())
294
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
295
					   "order: %d\n",
296
					   trans_pcie->rx_page_order);
297 298 299

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
300
				IWL_CRIT(trans, "Failed to alloc_pages with %s."
301 302 303 304 305 306 307 308 309 310 311 312 313 314
					 "Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?
					 "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
315
			__free_pages(page, trans_pcie->rx_page_order);
316 317
			return;
		}
J
Johannes Berg 已提交
318 319 320
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
321 322 323 324 325
		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
326 327 328 329
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
330 331 332 333 334 335 336 337
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
			spin_lock_irqsave(&rxq->lock, flags);
			list_add(&rxb->list, &rxq->rx_used);
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
338 339 340 341 342 343 344 345 346 347 348 349 350 351
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}

352 353 354 355 356 357
static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i;

358 359
	lockdep_assert_held(&rxq->lock);

360
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
361 362 363 364 365 366 367
		if (!rxq->pool[i].page)
			continue;
		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
		__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
		rxq->pool[i].page = NULL;
368 369 370
	}
}

371
/*
372
 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
373 374 375
 *
 * When moving to rx_free an page is allocated for the slot.
 *
376
 * Also restock the Rx queue via iwl_pcie_rxq_restock.
377 378
 * This is called as a scheduled work item (except for during initialization)
 */
379
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
380
{
J
Johannes Berg 已提交
381
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
382 383
	unsigned long flags;

384
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
385

J
Johannes Berg 已提交
386
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
387
	iwl_pcie_rxq_restock(trans);
J
Johannes Berg 已提交
388
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
389 390
}

391
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
392
{
393
	iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
394

395
	iwl_pcie_rxq_restock(trans);
396 397
}

398
static void iwl_pcie_rx_replenish_work(struct work_struct *data)
399
{
400 401
	struct iwl_trans_pcie *trans_pcie =
	    container_of(data, struct iwl_trans_pcie, rx_replenish);
402

403
	iwl_pcie_rx_replenish(trans_pcie->trans);
404 405
}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct device *dev = trans->dev;

	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));

	spin_lock_init(&rxq->lock);

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				      &rxq->bd_dma, GFP_KERNEL);
	if (!rxq->bd)
		goto err_bd;

	/*Allocate the driver's pointer to receive buffer status */
	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
					   &rxq->rb_stts_dma, GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err_rb_stts;

	return 0;

err_rb_stts:
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
436
	rxq->bd_dma = 0;
437 438 439
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
440 441
}

442 443 444 445 446 447 448 449 450 451 452 453 454
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

	if (trans_pcie->rx_buf_size_8k)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
455 456 457 458
	/* reset and flush pointers */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
484
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
485 486 487 488 489 490
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}

491 492 493 494 495 496 497 498 499 500 501 502 503 504
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
	int i;

	lockdep_assert_held(&rxq->lock);

	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;

	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
		list_add(&rxq->pool[i].list, &rxq->rx_used);
}

505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i, err;
	unsigned long flags;

	if (!rxq->bd) {
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}

	spin_lock_irqsave(&rxq->lock, flags);

520
	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
521

522
	/* free all first - we might be reconfigured for a different size */
523
	iwl_pcie_rxq_free_rbs(trans);
524
	iwl_pcie_rx_init_rxb_lists(rxq);
525 526 527 528 529 530 531 532

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
533
	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
	spin_unlock_irqrestore(&rxq->lock, flags);

	iwl_pcie_rx_replenish(trans);

	iwl_pcie_rx_hw_init(trans, rxq);

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	rxq->need_update = 1;
	iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	return 0;
}

void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	unsigned long flags;

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

561 562
	cancel_work_sync(&trans_pcie->rx_replenish);

563 564 565 566 567 568
	spin_lock_irqsave(&rxq->lock, flags);
	iwl_pcie_rxq_free_rbs(trans);
	spin_unlock_irqrestore(&rxq->lock, flags);

	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
569
	rxq->bd_dma = 0;
570 571 572 573 574 575 576 577
	rxq->bd = NULL;

	if (rxq->rb_stts)
		dma_free_coherent(trans->dev,
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
578
	rxq->rb_stts_dma = 0;
579 580 581 582
	rxq->rb_stts = NULL;
}

static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
J
Johannes Berg 已提交
583 584 585
				struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
586 587
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
J
Johannes Berg 已提交
588
	unsigned long flags;
589
	bool page_stolen = false;
590
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
591
	u32 offset = 0;
J
Johannes Berg 已提交
592 593 594 595

	if (WARN_ON(!rxb))
		return;

596 597 598 599 600 601 602 603 604 605
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		struct iwl_device_cmd *cmd;
		u16 sequence;
		bool reclaim;
		int index, cmd_index, err, len;
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
606
			._rx_page_order = trans_pcie->rx_page_order,
607 608
			._page = rxb->page,
			._page_stolen = false,
609
			.truesize = max_len,
610 611 612 613 614 615 616 617
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
618
			rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
J
Johannes Berg 已提交
619
			pkt->hdr.cmd);
620 621 622

		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
623 624
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
642 643
			}
		}
J
Johannes Berg 已提交
644

645 646 647 648
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

649 650 651
		if (reclaim)
			cmd = txq->entries[cmd_index].cmd;
		else
652 653 654 655
			cmd = NULL;

		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);

656
		if (reclaim) {
657 658
			kfree(txq->entries[cmd_index].free_buf);
			txq->entries[cmd_index].free_buf = NULL;
659 660
		}

661 662 663 664 665 666 667 668 669 670 671
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
672
				iwl_pcie_hcmd_complete(trans, &rxcb, err);
673 674 675 676 677 678
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
679 680
	}

681 682
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
683
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
684
		rxb->page = NULL;
685
	}
J
Johannes Berg 已提交
686 687 688 689 690 691 692 693

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	spin_lock_irqsave(&rxq->lock, flags);
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
694 695
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
696 697 698 699 700 701 702 703 704 705 706 707 708
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
			list_add_tail(&rxb->list, &rxq->rx_used);
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
J
Johannes Berg 已提交
709 710 711 712 713
	} else
		list_add_tail(&rxb->list, &rxq->rx_used);
	spin_unlock_irqrestore(&rxq->lock, flags);
}

714 715
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
716
 */
717
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
718
{
J
Johannes Berg 已提交
719
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
720
	struct iwl_rxq *rxq = &trans_pcie->rxq;
721 722 723 724 725 726 727
	u32 r, i;
	u8 fill_rx = 0;
	u32 count = 8;
	int total_empty;

	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
728
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
729 730 731 732
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
733
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
734 735 736 737 738 739 740 741 742 743

	/* calculate total frames need to be restock after handling RX */
	total_empty = r - rxq->write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
		fill_rx = 1;

	while (i != r) {
744
		struct iwl_rx_mem_buffer *rxb;
745 746 747 748

		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;

749 750
		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
			     r, i, rxb);
751
		iwl_pcie_rx_handle_rb(trans, rxb);
752 753 754 755 756 757 758 759

		i = (i + 1) & RX_QUEUE_MASK;
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				rxq->read = i;
760
				iwl_pcie_rx_replenish_now(trans);
761 762 763 764 765 766 767 768
				count = 0;
			}
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	if (fill_rx)
769
		iwl_pcie_rx_replenish_now(trans);
770
	else
771
		iwl_pcie_rxq_restock(trans);
772 773
}

774 775
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
776
 */
777
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
778
{
779 780
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

781
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
782
	if (trans->cfg->internal_wimax_coex &&
783
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
784
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
785
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
786
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
D
Don Fry 已提交
787
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
788
		iwl_op_mode_wimax_active(trans->op_mode);
789
		wake_up(&trans_pcie->wait_command_queue);
790 791 792
		return;
	}

793 794
	iwl_pcie_dump_csr(trans);
	iwl_pcie_dump_fh(trans, NULL);
795

796
	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
797 798 799
	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
	wake_up(&trans_pcie->wait_command_queue);

800
	local_bh_disable();
801
	iwl_op_mode_nic_error(trans->op_mode);
802
	local_bh_enable();
803 804
}

805
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
806
{
807
	struct iwl_trans *trans = dev_id;
808 809
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
810 811 812 813 814
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
	u32 i;

815 816
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

J
Johannes Berg 已提交
817
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
818 819 820 821 822 823 824 825 826 827 828 829

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
830
	iwl_write32(trans, CSR_INT,
831
		    trans_pcie->inta | ~trans_pcie->inta_mask);
832

833
	inta = trans_pcie->inta;
834

835
	if (iwl_have_debug_level(IWL_DL_ISR))
836
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
837
			      inta, iwl_read32(trans, CSR_INT_MASK));
838

839 840
	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
	trans_pcie->inta = 0;
841

J
Johannes Berg 已提交
842
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
843

844 845
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
846
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
847 848

		/* Tell the device to stop sending interrupts */
849
		iwl_disable_interrupts(trans);
850

851
		isr_stats->hw++;
852
		iwl_pcie_irq_handle_error(trans);
853 854 855

		handled |= CSR_INT_BIT_HW_ERR;

856
		goto out;
857 858
	}

859
	if (iwl_have_debug_level(IWL_DL_ISR)) {
860 861
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
862 863
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
864
			isr_stats->sch++;
865 866 867 868
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
869
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
870
			isr_stats->alive++;
871 872
		}
	}
873

874 875 876 877 878
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
879
		bool hw_rfkill;
880

881
		hw_rfkill = iwl_is_rfkill_set(trans);
882
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
883
			 hw_rfkill ? "disable radio" : "enable radio");
884

885
		isr_stats->rfkill++;
886

887
		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
888 889 890 891 892 893 894 895 896 897
		if (hw_rfkill) {
			set_bit(STATUS_RFKILL, &trans_pcie->status);
			if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
					       &trans_pcie->status))
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
			clear_bit(STATUS_RFKILL, &trans_pcie->status);
		}
898 899 900 901 902 903

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
904
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
905
		isr_stats->ctkill++;
906 907 908 909 910
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
911
		IWL_ERR(trans, "Microcode SW error detected. "
912
			" Restarting 0x%X.\n", inta);
913
		isr_stats->sw++;
914
		iwl_pcie_irq_handle_error(trans);
915 916 917 918 919
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
920
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
921
		iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
922
		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
923
			iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
924

925
		isr_stats->wakeup++;
926 927 928 929 930 931 932 933

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
934
		    CSR_INT_BIT_RX_PERIODIC)) {
935
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
936 937
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
938
			iwl_write32(trans, CSR_FH_INT_STATUS,
939 940 941 942
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
943
			iwl_write32(trans,
944
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
945 946 947 948 949 950 951 952 953 954 955 956 957
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
958
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
959
			    CSR_INT_PERIODIC_DIS);
960

961
		iwl_pcie_rx_handle(trans);
962

963 964 965 966 967 968 969 970
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
971
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
972
				   CSR_INT_PERIODIC_ENA);
973

974
		isr_stats->rx++;
975 976 977 978
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
979
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
980
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
981
		isr_stats->tx++;
982 983
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
984 985
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
986 987 988
	}

	if (inta & ~handled) {
989
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
990
		isr_stats->unhandled++;
991 992
	}

993 994 995
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
996 997 998 999
	}

	/* Re-enable all interrupts */
	/* only Re-enable if disabled by irq */
D
Don Fry 已提交
1000
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
1001
		iwl_enable_interrupts(trans);
1002
	/* Re-enable RF_KILL if it occurred */
1003 1004
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1005 1006 1007 1008

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1009 1010
}

1011 1012 1013 1014 1015
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1016 1017 1018 1019 1020

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1021 1022

/* Free dram table */
1023
void iwl_pcie_free_ict(struct iwl_trans *trans)
1024
{
1025
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1026

1027
	if (trans_pcie->ict_tbl) {
1028
		dma_free_coherent(trans->dev, ICT_SIZE,
1029
				  trans_pcie->ict_tbl,
1030
				  trans_pcie->ict_tbl_dma);
1031 1032
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1033 1034 1035
	}
}

1036 1037 1038
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1039 1040
 * also reset all data related to ICT table interrupt.
 */
1041
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1042
{
1043
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1044

1045
	trans_pcie->ict_tbl =
1046
		dma_alloc_coherent(trans->dev, ICT_SIZE,
1047 1048 1049
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1050 1051
		return -ENOMEM;

1052 1053
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1054
		iwl_pcie_free_ict(trans);
1055 1056
		return -EINVAL;
	}
1057

1058 1059
	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
		      (unsigned long long)trans_pcie->ict_tbl_dma);
1060

1061
	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1062 1063

	/* reset table and index to all 0 */
1064
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1065
	trans_pcie->ict_index = 0;
1066 1067

	/* add periodic RX interrupt */
1068
	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1069 1070 1071 1072 1073 1074
	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1075
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1076
{
1077
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1078 1079 1080
	u32 val;
	unsigned long flags;

1081
	if (!trans_pcie->ict_tbl)
1082
		return;
1083

J
Johannes Berg 已提交
1084
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1085
	iwl_disable_interrupts(trans);
1086

1087
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1088

1089
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1090 1091 1092 1093

	val |= CSR_DRAM_INT_TBL_ENABLE;
	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;

1094
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1095

1096
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1097 1098
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1099
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1100
	iwl_enable_interrupts(trans);
J
Johannes Berg 已提交
1101
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1102 1103 1104
}

/* Device is going down disable ict interrupt usage */
1105
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1106
{
1107
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1108 1109
	unsigned long flags;

J
Johannes Berg 已提交
1110
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1111
	trans_pcie->use_ict = false;
J
Johannes Berg 已提交
1112
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1113 1114
}

1115
/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
1116
static irqreturn_t iwl_pcie_isr(int irq, void *data)
1117
{
1118
	struct iwl_trans *trans = data;
1119
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1120
	u32 inta, inta_mask;
1121 1122 1123

	lockdep_assert_held(&trans_pcie->irq_lock);

1124
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1125

1126 1127
	/* Disable (but don't clear!) interrupts here to avoid
	 *    back-to-back ISRs and sporadic interrupts from our NIC.
1128
	 * If we have something to service, the irq thread will re-enable ints.
1129
	 * If we *don't* have something, we'll re-enable before leaving here. */
1130
	inta_mask = iwl_read32(trans, CSR_INT_MASK);
1131
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1132 1133

	/* Discover which interrupts are active/pending */
1134
	inta = iwl_read32(trans, CSR_INT);
1135

1136 1137 1138 1139 1140 1141 1142 1143
	if (inta & (~inta_mask)) {
		IWL_DEBUG_ISR(trans,
			      "We got a masked interrupt (0x%08x)...Ack and ignore\n",
			      inta & (~inta_mask));
		iwl_write32(trans, CSR_INT, inta & (~inta_mask));
		inta &= inta_mask;
	}

1144 1145 1146 1147
	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	if (!inta) {
1148
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1149 1150 1151 1152 1153 1154
		goto none;
	}

	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/* Hardware disappeared. It might have already raised
		 * an interrupt */
1155
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1156
		return IRQ_HANDLED;
1157 1158
	}

1159 1160 1161 1162 1163
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
			      inta, inta_mask,
			      iwl_read32(trans, CSR_FH_INT_STATUS));
1164

1165
	trans_pcie->inta |= inta;
1166
	/* the thread will service interrupts and re-enable them */
1167
	if (likely(inta))
1168
		return IRQ_WAKE_THREAD;
D
Don Fry 已提交
1169
	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1170
		 !trans_pcie->inta)
1171
		iwl_enable_interrupts(trans);
1172
	return IRQ_HANDLED;
1173

1174
none:
1175 1176
	/* re-enable interrupts here since we don't have anything to service. */
	/* only Re-enable if disabled by irq  and no schedules tasklet. */
D
Don Fry 已提交
1177
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1178
	    !trans_pcie->inta)
1179
		iwl_enable_interrupts(trans);
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191

	return IRQ_NONE;
}

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1192
irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1193
{
1194 1195
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie;
1196
	u32 inta;
1197
	u32 val = 0;
J
Johannes Berg 已提交
1198
	u32 read;
1199 1200
	unsigned long flags;

1201
	if (!trans)
1202 1203
		return IRQ_NONE;

1204 1205
	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1206 1207
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

1208 1209 1210
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
1211
	if (unlikely(!trans_pcie->use_ict)) {
1212
		irqreturn_t ret = iwl_pcie_isr(irq, data);
1213 1214 1215
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return ret;
	}
1216

1217
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1218

1219 1220 1221 1222 1223
	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
1224
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1225 1226 1227 1228

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
J
Johannes Berg 已提交
1229
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1230
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
J
Johannes Berg 已提交
1231
	if (!read) {
1232
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1233 1234 1235
		goto none;
	}

J
Johannes Berg 已提交
1236 1237 1238 1239 1240 1241
	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
1242
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
J
Johannes Berg 已提交
1243
				trans_pcie->ict_index, read);
1244 1245 1246
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1247

J
Johannes Berg 已提交
1248
		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1249
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
J
Johannes Berg 已提交
1250 1251
					   read);
	} while (read);
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1268 1269 1270 1271 1272
	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
		      inta, trans_pcie->inta_mask, val);
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
			      iwl_read32(trans, CSR_INT_MASK));
1273

1274 1275
	inta &= trans_pcie->inta_mask;
	trans_pcie->inta |= inta;
1276

1277
	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
1278 1279 1280 1281
	if (likely(inta)) {
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return IRQ_WAKE_THREAD;
	} else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
J
Johannes Berg 已提交
1282
		 !trans_pcie->inta) {
1283 1284 1285 1286
		/* Allow interrupt if was disabled by this handler and
		 * no tasklet was schedules, We should not enable interrupt,
		 * tasklet will enable it.
		 */
1287
		iwl_enable_interrupts(trans);
1288 1289
	}

J
Johannes Berg 已提交
1290
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1291 1292 1293 1294 1295 1296
	return IRQ_HANDLED;

 none:
	/* re-enable interrupts here since we don't have anything to service.
	 * only Re-enable if disabled by irq.
	 */
D
Don Fry 已提交
1297
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
J
Johannes Berg 已提交
1298
	    !trans_pcie->inta)
1299
		iwl_enable_interrupts(trans);
1300

J
Johannes Berg 已提交
1301
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1302 1303
	return IRQ_NONE;
}