rx.c 39.6 KB
Newer Older
1 2
/******************************************************************************
 *
J
Johannes Berg 已提交
3
 * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
31
#include <linux/gfp.h>
32

33
#include "iwl-prph.h"
34
#include "iwl-io.h"
35
#include "internal.h"
36
#include "iwl-op-mode.h"
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
 *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
 *   to replenish the iwl->rxq->rx_free.
79
 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 81 82 83
 *   iwl->rxq is replenished and the READ INDEX is updated (updating the
 *   'processed' and 'read' driver indexes as well)
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
84 85 86 87
 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
 *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
88 89 90 91
 *
 *
 * Driver sequence:
 *
92 93 94 95
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
 *                            iwl_pcie_rxq_restock
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
96 97
 *                            queue, updates firmware pointers, and updates
 *                            the WRITE index.  If insufficient rx_free buffers
98
 *                            are available, schedules iwl_pcie_rx_replenish
99 100
 *
 * -- enable interrupts --
101
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
102 103
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
104
 *                            Calls iwl_pcie_rxq_restock to refill any empty
105 106 107 108 109
 *                            slots.
 * ...
 *
 */

110 111
/*
 * iwl_rxq_space - Return number of free slots available in queue.
112
 */
113
static int iwl_rxq_space(const struct iwl_rxq *rxq)
114
{
115 116
	/* Make sure RX_QUEUE_SIZE is a power of 2 */
	BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
117

118 119 120 121 122 123 124
	/*
	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
	 * between empty and completely full queues.
	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
	 * defined for negative dividends.
	 */
	return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
125 126
}

127 128 129 130 131 132 133 134
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

135 136 137
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
138 139 140 141 142 143 144
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

145 146
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
147
 */
148 149
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
150 151 152 153
{
	unsigned long flags;
	u32 reg;

154
	spin_lock_irqsave(&rxq->lock, flags);
155

156
	if (rxq->need_update == 0)
157 158
		goto exit_unlock;

159
	if (trans->cfg->base_params->shadow_reg_enable) {
160 161
		/* shadow register enabled */
		/* Device expects a multiple of 8 */
162 163
		rxq->write_actual = (rxq->write & ~0x7);
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
164
	} else {
D
Don Fry 已提交
165 166 167
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);

168
		/* If power-saving is in use, make sure device is awake */
169
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
170
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
171 172

			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
173
				IWL_DEBUG_INFO(trans,
174 175
					"Rx queue requesting wakeup,"
					" GP1 = 0x%x\n", reg);
176
				iwl_set_bit(trans, CSR_GP_CNTRL,
177 178 179 180
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				goto exit_unlock;
			}

181
			rxq->write_actual = (rxq->write & ~0x7);
182
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
183
					   rxq->write_actual);
184 185 186 187

		/* Else device is assumed to be awake */
		} else {
			/* Device expects a multiple of 8 */
188
			rxq->write_actual = (rxq->write & ~0x7);
189
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
190
					   rxq->write_actual);
191 192
		}
	}
193
	rxq->need_update = 0;
194 195

 exit_unlock:
196
	spin_unlock_irqrestore(&rxq->lock, flags);
197 198
}

199 200
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
201 202 203 204 205 206 207 208 209
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
210
static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
211
{
212
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
213
	struct iwl_rxq *rxq = &trans_pcie->rxq;
214 215 216
	struct iwl_rx_mem_buffer *rxb;
	unsigned long flags;

217 218 219
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
220 221 222
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
223 224 225 226 227
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
		return;

228
	spin_lock_irqsave(&rxq->lock, flags);
229
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
230 231 232 233 234
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
235 236 237
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
238 239

		/* Point to Rx buffer via next RBD in circular buffer */
240
		rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
241 242 243 244 245 246 247 248
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
	spin_unlock_irqrestore(&rxq->lock, flags);
	/* If the pre-allocated buffer pool is dropping low, schedule to
	 * refill it */
	if (rxq->free_count <= RX_LOW_WATERMARK)
J
Johannes Berg 已提交
249
		schedule_work(&trans_pcie->rx_replenish);
250 251 252 253 254 255 256

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock_irqsave(&rxq->lock, flags);
		rxq->need_update = 1;
		spin_unlock_irqrestore(&rxq->lock, flags);
257
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
258 259 260
	}
}

261
/*
262
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
263
 *
264 265 266
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
267
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
268
 * allocated buffers.
269
 */
270
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
271
{
272
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
273
	struct iwl_rxq *rxq = &trans_pcie->rxq;
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

290
		if (trans_pcie->rx_page_order > 0)
291 292 293
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
294
		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
295 296
		if (!page) {
			if (net_ratelimit())
297
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
298
					   "order: %d\n",
299
					   trans_pcie->rx_page_order);
300 301 302

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
303
				IWL_CRIT(trans, "Failed to alloc_pages with %s."
304 305 306 307 308 309 310 311 312 313 314 315 316 317
					 "Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?
					 "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
318
			__free_pages(page, trans_pcie->rx_page_order);
319 320
			return;
		}
J
Johannes Berg 已提交
321 322 323
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
324 325 326 327 328
		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
329 330 331 332
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
333 334 335 336 337 338 339 340
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
			spin_lock_irqsave(&rxq->lock, flags);
			list_add(&rxb->list, &rxq->rx_used);
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
341 342 343 344 345 346 347 348 349 350 351 352 353 354
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}

355 356 357 358 359 360
static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i;

361 362
	lockdep_assert_held(&rxq->lock);

363
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
364 365 366 367 368 369 370
		if (!rxq->pool[i].page)
			continue;
		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
		__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
		rxq->pool[i].page = NULL;
371 372 373
	}
}

374
/*
375
 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
376 377 378
 *
 * When moving to rx_free an page is allocated for the slot.
 *
379
 * Also restock the Rx queue via iwl_pcie_rxq_restock.
380 381
 * This is called as a scheduled work item (except for during initialization)
 */
382
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
383
{
J
Johannes Berg 已提交
384
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385 386
	unsigned long flags;

387
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
388

J
Johannes Berg 已提交
389
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
390
	iwl_pcie_rxq_restock(trans);
J
Johannes Berg 已提交
391
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
392 393
}

394
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
395
{
396
	iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
397

398
	iwl_pcie_rxq_restock(trans);
399 400
}

401
static void iwl_pcie_rx_replenish_work(struct work_struct *data)
402
{
403 404
	struct iwl_trans_pcie *trans_pcie =
	    container_of(data, struct iwl_trans_pcie, rx_replenish);
405

406
	iwl_pcie_rx_replenish(trans_pcie->trans);
407 408
}

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct device *dev = trans->dev;

	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));

	spin_lock_init(&rxq->lock);

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				      &rxq->bd_dma, GFP_KERNEL);
	if (!rxq->bd)
		goto err_bd;

	/*Allocate the driver's pointer to receive buffer status */
	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
					   &rxq->rb_stts_dma, GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err_rb_stts;

	return 0;

err_rb_stts:
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
439
	rxq->bd_dma = 0;
440 441 442
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
443 444
}

445 446 447 448 449 450 451 452 453 454 455 456 457
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

	if (trans_pcie->rx_buf_size_8k)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
458 459 460 461
	/* reset and flush pointers */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
487
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
488 489 490 491 492 493
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
	int i;

	lockdep_assert_held(&rxq->lock);

	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;

	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
		list_add(&rxq->pool[i].list, &rxq->rx_used);
}

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i, err;
	unsigned long flags;

	if (!rxq->bd) {
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}

	spin_lock_irqsave(&rxq->lock, flags);

523
	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
524

525
	/* free all first - we might be reconfigured for a different size */
526
	iwl_pcie_rxq_free_rbs(trans);
527
	iwl_pcie_rx_init_rxb_lists(rxq);
528 529 530 531 532 533 534 535

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
536
	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	spin_unlock_irqrestore(&rxq->lock, flags);

	iwl_pcie_rx_replenish(trans);

	iwl_pcie_rx_hw_init(trans, rxq);

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	rxq->need_update = 1;
	iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	return 0;
}

void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	unsigned long flags;

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

564 565
	cancel_work_sync(&trans_pcie->rx_replenish);

566 567 568 569 570 571
	spin_lock_irqsave(&rxq->lock, flags);
	iwl_pcie_rxq_free_rbs(trans);
	spin_unlock_irqrestore(&rxq->lock, flags);

	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
572
	rxq->bd_dma = 0;
573 574 575 576 577 578 579 580
	rxq->bd = NULL;

	if (rxq->rb_stts)
		dma_free_coherent(trans->dev,
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
581
	rxq->rb_stts_dma = 0;
582 583 584 585
	rxq->rb_stts = NULL;
}

static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
J
Johannes Berg 已提交
586 587 588
				struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
589 590
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
J
Johannes Berg 已提交
591
	unsigned long flags;
592
	bool page_stolen = false;
593
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
594
	u32 offset = 0;
J
Johannes Berg 已提交
595 596 597 598

	if (WARN_ON(!rxb))
		return;

599 600 601 602 603 604 605 606 607 608
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		struct iwl_device_cmd *cmd;
		u16 sequence;
		bool reclaim;
		int index, cmd_index, err, len;
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
609
			._rx_page_order = trans_pcie->rx_page_order,
610 611
			._page = rxb->page,
			._page_stolen = false,
612
			.truesize = max_len,
613 614 615 616 617 618 619 620
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
621
			rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
J
Johannes Berg 已提交
622
			pkt->hdr.cmd);
623 624 625

		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
626 627
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
645 646
			}
		}
J
Johannes Berg 已提交
647

648 649 650 651
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

652 653 654
		if (reclaim)
			cmd = txq->entries[cmd_index].cmd;
		else
655 656 657 658
			cmd = NULL;

		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);

659
		if (reclaim) {
660 661
			kfree(txq->entries[cmd_index].free_buf);
			txq->entries[cmd_index].free_buf = NULL;
662 663
		}

664 665 666 667 668 669 670 671 672 673 674
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
675
				iwl_pcie_hcmd_complete(trans, &rxcb, err);
676 677 678 679 680 681
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
682 683
	}

684 685
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
686
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
687
		rxb->page = NULL;
688
	}
J
Johannes Berg 已提交
689 690 691 692 693 694 695 696

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	spin_lock_irqsave(&rxq->lock, flags);
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
697 698
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
699 700 701 702 703 704 705 706 707 708 709 710 711
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
			list_add_tail(&rxb->list, &rxq->rx_used);
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
J
Johannes Berg 已提交
712 713 714 715 716
	} else
		list_add_tail(&rxb->list, &rxq->rx_used);
	spin_unlock_irqrestore(&rxq->lock, flags);
}

717 718
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
719
 */
720
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
721
{
J
Johannes Berg 已提交
722
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
723
	struct iwl_rxq *rxq = &trans_pcie->rxq;
724 725 726 727 728 729 730
	u32 r, i;
	u8 fill_rx = 0;
	u32 count = 8;
	int total_empty;

	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
731
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
732 733 734 735
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
736
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
737 738 739 740 741 742 743 744 745 746

	/* calculate total frames need to be restock after handling RX */
	total_empty = r - rxq->write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
		fill_rx = 1;

	while (i != r) {
747
		struct iwl_rx_mem_buffer *rxb;
748 749 750 751

		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;

752 753
		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
			     r, i, rxb);
754
		iwl_pcie_rx_handle_rb(trans, rxb);
755 756 757 758 759 760 761 762

		i = (i + 1) & RX_QUEUE_MASK;
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				rxq->read = i;
763
				iwl_pcie_rx_replenish_now(trans);
764 765 766 767 768 769 770 771
				count = 0;
			}
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	if (fill_rx)
772
		iwl_pcie_rx_replenish_now(trans);
773
	else
774
		iwl_pcie_rxq_restock(trans);
775 776
}

777 778
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
779
 */
780
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
781
{
782 783
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

784
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
785
	if (trans->cfg->internal_wimax_coex &&
786
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
787
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
788
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
789
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
D
Don Fry 已提交
790
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
791
		iwl_op_mode_wimax_active(trans->op_mode);
792
		wake_up(&trans_pcie->wait_command_queue);
793 794 795
		return;
	}

796
	iwl_pcie_dump_csr(trans);
797
	iwl_dump_fh(trans, NULL);
798

799
	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
800 801 802
	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
	wake_up(&trans_pcie->wait_command_queue);

803
	local_bh_disable();
804
	iwl_op_mode_nic_error(trans->op_mode);
805
	local_bh_enable();
806 807
}

808
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
809
{
810
	struct iwl_trans *trans = dev_id;
811 812
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
813 814 815 816 817
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
	u32 i;

818 819
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

J
Johannes Berg 已提交
820
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
821 822 823 824 825 826 827 828 829 830 831 832

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
833
	iwl_write32(trans, CSR_INT,
834
		    trans_pcie->inta | ~trans_pcie->inta_mask);
835

836
	inta = trans_pcie->inta;
837

838
	if (iwl_have_debug_level(IWL_DL_ISR))
839
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
840
			      inta, iwl_read32(trans, CSR_INT_MASK));
841

842 843
	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
	trans_pcie->inta = 0;
844

J
Johannes Berg 已提交
845
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
846

847 848
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
849
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
850 851

		/* Tell the device to stop sending interrupts */
852
		iwl_disable_interrupts(trans);
853

854
		isr_stats->hw++;
855
		iwl_pcie_irq_handle_error(trans);
856 857 858

		handled |= CSR_INT_BIT_HW_ERR;

859
		goto out;
860 861
	}

862
	if (iwl_have_debug_level(IWL_DL_ISR)) {
863 864
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
865 866
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
867
			isr_stats->sch++;
868 869 870 871
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
872
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
873
			isr_stats->alive++;
874 875
		}
	}
876

877 878 879 880 881
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
882
		bool hw_rfkill;
883

884
		hw_rfkill = iwl_is_rfkill_set(trans);
885
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
886
			 hw_rfkill ? "disable radio" : "enable radio");
887

888
		isr_stats->rfkill++;
889

890
		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
891 892 893 894 895 896 897 898 899 900
		if (hw_rfkill) {
			set_bit(STATUS_RFKILL, &trans_pcie->status);
			if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
					       &trans_pcie->status))
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
			clear_bit(STATUS_RFKILL, &trans_pcie->status);
		}
901 902 903 904 905 906

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
907
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
908
		isr_stats->ctkill++;
909 910 911 912 913
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
914
		IWL_ERR(trans, "Microcode SW error detected. "
915
			" Restarting 0x%X.\n", inta);
916
		isr_stats->sw++;
917
		iwl_pcie_irq_handle_error(trans);
918 919 920 921 922
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
923
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
924
		iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
925
		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
926
			iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
927

928
		isr_stats->wakeup++;
929 930 931 932 933 934 935 936

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
937
		    CSR_INT_BIT_RX_PERIODIC)) {
938
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
939 940
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
941
			iwl_write32(trans, CSR_FH_INT_STATUS,
942 943 944 945
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
946
			iwl_write32(trans,
947
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
948 949 950 951 952 953 954 955 956 957 958 959 960
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
961
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
962
			    CSR_INT_PERIODIC_DIS);
963

964
		iwl_pcie_rx_handle(trans);
965

966 967 968 969 970 971 972 973
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
974
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
975
				   CSR_INT_PERIODIC_ENA);
976

977
		isr_stats->rx++;
978 979 980 981
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
982
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
983
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
984
		isr_stats->tx++;
985 986
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
987 988
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
989 990 991
	}

	if (inta & ~handled) {
992
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
993
		isr_stats->unhandled++;
994 995
	}

996 997 998
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
999 1000 1001 1002
	}

	/* Re-enable all interrupts */
	/* only Re-enable if disabled by irq */
D
Don Fry 已提交
1003
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
1004
		iwl_enable_interrupts(trans);
1005
	/* Re-enable RF_KILL if it occurred */
1006 1007
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1008 1009 1010 1011

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1012 1013
}

1014 1015 1016 1017 1018
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1019 1020 1021 1022 1023

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1024 1025

/* Free dram table */
1026
void iwl_pcie_free_ict(struct iwl_trans *trans)
1027
{
1028
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1029

1030
	if (trans_pcie->ict_tbl) {
1031
		dma_free_coherent(trans->dev, ICT_SIZE,
1032
				  trans_pcie->ict_tbl,
1033
				  trans_pcie->ict_tbl_dma);
1034 1035
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1036 1037 1038
	}
}

1039 1040 1041
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1042 1043
 * also reset all data related to ICT table interrupt.
 */
1044
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1045
{
1046
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1047

1048
	trans_pcie->ict_tbl =
1049
		dma_alloc_coherent(trans->dev, ICT_SIZE,
1050 1051 1052
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1053 1054
		return -ENOMEM;

1055 1056
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1057
		iwl_pcie_free_ict(trans);
1058 1059
		return -EINVAL;
	}
1060

1061 1062
	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
		      (unsigned long long)trans_pcie->ict_tbl_dma);
1063

1064
	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1065 1066

	/* reset table and index to all 0 */
1067
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1068
	trans_pcie->ict_index = 0;
1069 1070

	/* add periodic RX interrupt */
1071
	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1072 1073 1074 1075 1076 1077
	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1078
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1079
{
1080
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1081 1082 1083
	u32 val;
	unsigned long flags;

1084
	if (!trans_pcie->ict_tbl)
1085
		return;
1086

J
Johannes Berg 已提交
1087
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1088
	iwl_disable_interrupts(trans);
1089

1090
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1091

1092
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1093 1094 1095 1096

	val |= CSR_DRAM_INT_TBL_ENABLE;
	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;

1097
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1098

1099
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1100 1101
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1102
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1103
	iwl_enable_interrupts(trans);
J
Johannes Berg 已提交
1104
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1105 1106 1107
}

/* Device is going down disable ict interrupt usage */
1108
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1109
{
1110
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1111 1112
	unsigned long flags;

J
Johannes Berg 已提交
1113
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1114
	trans_pcie->use_ict = false;
J
Johannes Berg 已提交
1115
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1116 1117
}

1118
/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
1119
static irqreturn_t iwl_pcie_isr(int irq, void *data)
1120
{
1121
	struct iwl_trans *trans = data;
1122
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1123
	u32 inta, inta_mask;
1124 1125 1126

	lockdep_assert_held(&trans_pcie->irq_lock);

1127
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1128

1129 1130
	/* Disable (but don't clear!) interrupts here to avoid
	 *    back-to-back ISRs and sporadic interrupts from our NIC.
1131
	 * If we have something to service, the irq thread will re-enable ints.
1132
	 * If we *don't* have something, we'll re-enable before leaving here. */
1133
	inta_mask = iwl_read32(trans, CSR_INT_MASK);
1134
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1135 1136

	/* Discover which interrupts are active/pending */
1137
	inta = iwl_read32(trans, CSR_INT);
1138

1139 1140 1141 1142 1143 1144 1145 1146
	if (inta & (~inta_mask)) {
		IWL_DEBUG_ISR(trans,
			      "We got a masked interrupt (0x%08x)...Ack and ignore\n",
			      inta & (~inta_mask));
		iwl_write32(trans, CSR_INT, inta & (~inta_mask));
		inta &= inta_mask;
	}

1147 1148 1149 1150
	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	if (!inta) {
1151
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1152 1153 1154 1155 1156 1157
		goto none;
	}

	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/* Hardware disappeared. It might have already raised
		 * an interrupt */
1158
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1159
		return IRQ_HANDLED;
1160 1161
	}

1162 1163 1164 1165 1166
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
			      inta, inta_mask,
			      iwl_read32(trans, CSR_FH_INT_STATUS));
1167

1168
	trans_pcie->inta |= inta;
1169
	/* the thread will service interrupts and re-enable them */
1170
	if (likely(inta))
1171
		return IRQ_WAKE_THREAD;
D
Don Fry 已提交
1172
	else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1173
		 !trans_pcie->inta)
1174
		iwl_enable_interrupts(trans);
1175
	return IRQ_HANDLED;
1176

1177
none:
1178 1179
	/* re-enable interrupts here since we don't have anything to service. */
	/* only Re-enable if disabled by irq  and no schedules tasklet. */
D
Don Fry 已提交
1180
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1181
	    !trans_pcie->inta)
1182
		iwl_enable_interrupts(trans);
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

	return IRQ_NONE;
}

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1195
irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1196
{
1197 1198
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie;
1199
	u32 inta;
1200
	u32 val = 0;
J
Johannes Berg 已提交
1201
	u32 read;
1202 1203
	unsigned long flags;

1204
	if (!trans)
1205 1206
		return IRQ_NONE;

1207 1208
	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1209 1210
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

1211 1212 1213
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
1214
	if (unlikely(!trans_pcie->use_ict)) {
1215
		irqreturn_t ret = iwl_pcie_isr(irq, data);
1216 1217 1218
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return ret;
	}
1219

1220
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1221

1222 1223 1224 1225 1226
	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
1227
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1228 1229 1230 1231

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
J
Johannes Berg 已提交
1232
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1233
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
J
Johannes Berg 已提交
1234
	if (!read) {
1235
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1236 1237 1238
		goto none;
	}

J
Johannes Berg 已提交
1239 1240 1241 1242 1243 1244
	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
1245
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
J
Johannes Berg 已提交
1246
				trans_pcie->ict_index, read);
1247 1248 1249
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1250

J
Johannes Berg 已提交
1251
		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1252
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
J
Johannes Berg 已提交
1253 1254
					   read);
	} while (read);
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1271 1272 1273 1274 1275
	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
		      inta, trans_pcie->inta_mask, val);
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
			      iwl_read32(trans, CSR_INT_MASK));
1276

1277 1278
	inta &= trans_pcie->inta_mask;
	trans_pcie->inta |= inta;
1279

1280
	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
1281 1282 1283 1284
	if (likely(inta)) {
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return IRQ_WAKE_THREAD;
	} else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
J
Johannes Berg 已提交
1285
		 !trans_pcie->inta) {
1286 1287 1288 1289
		/* Allow interrupt if was disabled by this handler and
		 * no tasklet was schedules, We should not enable interrupt,
		 * tasklet will enable it.
		 */
1290
		iwl_enable_interrupts(trans);
1291 1292
	}

J
Johannes Berg 已提交
1293
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1294 1295 1296 1297 1298 1299
	return IRQ_HANDLED;

 none:
	/* re-enable interrupts here since we don't have anything to service.
	 * only Re-enable if disabled by irq.
	 */
D
Don Fry 已提交
1300
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
J
Johannes Berg 已提交
1301
	    !trans_pcie->inta)
1302
		iwl_enable_interrupts(trans);
1303

J
Johannes Berg 已提交
1304
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1305 1306
	return IRQ_NONE;
}