rx.c 39.4 KB
Newer Older
1 2
/******************************************************************************
 *
J
Johannes Berg 已提交
3
 * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
31
#include <linux/gfp.h>
32

33
#include "iwl-prph.h"
34
#include "iwl-io.h"
35
#include "internal.h"
36
#include "iwl-op-mode.h"
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

/******************************************************************************
 *
 * RX path functions
 *
 ******************************************************************************/

/*
 * Rx theory of operation
 *
 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
 * each of which point to Receive Buffers to be filled by the NIC.  These get
 * used not only for Rx frames, but for any command response or notification
 * from the NIC.  The driver and NIC manage the Rx buffers by means
 * of indexes into the circular buffer.
 *
 * Rx Queue Indexes
 * The host/firmware share two index registers for managing the Rx buffers.
 *
 * The READ index maps to the first position that the firmware may be writing
 * to -- the driver can read up to (but not including) this position and get
 * good data.
 * The READ index is managed by the firmware once the card is enabled.
 *
 * The WRITE index maps to the last position the driver has read from -- the
 * position preceding WRITE is the last slot the firmware can place a packet.
 *
 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
 * WRITE = READ.
 *
 * During initialization, the host sets up the READ queue position to the first
 * INDEX position, and WRITE to the last (READ - 1 wrapped)
 *
 * When the firmware places a packet in a buffer, it will advance the READ index
 * and fire the RX interrupt.  The driver can then query the READ index and
 * process as many packets as possible, moving the WRITE index forward as it
 * resets the Rx queue buffers with new memory.
 *
 * The management in the driver is as follows:
 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
 *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
 *   to replenish the iwl->rxq->rx_free.
79
 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 81 82 83
 *   iwl->rxq is replenished and the READ INDEX is updated (updating the
 *   'processed' and 'read' driver indexes as well)
 * + A received packet is processed and handed to the kernel network stack,
 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
84 85 86 87
 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
 *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
 *   If there were enough free buffers and RX_STALLED is set it is cleared.
88 89 90 91
 *
 *
 * Driver sequence:
 *
92 93 94 95
 * iwl_rxq_alloc()            Allocates rx_free
 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
 *                            iwl_pcie_rxq_restock
 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
96 97
 *                            queue, updates firmware pointers, and updates
 *                            the WRITE index.  If insufficient rx_free buffers
98
 *                            are available, schedules iwl_pcie_rx_replenish
99 100
 *
 * -- enable interrupts --
101
 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
102 103
 *                            READ INDEX, detaching the SKB from the pool.
 *                            Moves the packet buffer from queue to rx_used.
104
 *                            Calls iwl_pcie_rxq_restock to refill any empty
105 106 107 108 109
 *                            slots.
 * ...
 *
 */

110 111
/*
 * iwl_rxq_space - Return number of free slots available in queue.
112
 */
113
static int iwl_rxq_space(const struct iwl_rxq *rxq)
114
{
115 116
	/* Make sure RX_QUEUE_SIZE is a power of 2 */
	BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
117

118 119 120 121 122 123 124
	/*
	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
	 * between empty and completely full queues.
	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
	 * defined for negative dividends.
	 */
	return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
125 126
}

127 128 129 130 131 132 133 134
/*
 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

135 136 137
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
138 139 140 141 142 143 144
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

145 146
/*
 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
147
 */
148 149
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
				    struct iwl_rxq *rxq)
150 151 152 153
{
	unsigned long flags;
	u32 reg;

154
	spin_lock_irqsave(&rxq->lock, flags);
155

156
	if (rxq->need_update == 0)
157 158
		goto exit_unlock;

159
	if (trans->cfg->base_params->shadow_reg_enable) {
160 161
		/* shadow register enabled */
		/* Device expects a multiple of 8 */
162 163
		rxq->write_actual = (rxq->write & ~0x7);
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
164
	} else {
D
Don Fry 已提交
165 166 167
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);

168
		/* If power-saving is in use, make sure device is awake */
169
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
170
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
171 172

			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
173
				IWL_DEBUG_INFO(trans,
174 175
					"Rx queue requesting wakeup,"
					" GP1 = 0x%x\n", reg);
176
				iwl_set_bit(trans, CSR_GP_CNTRL,
177 178 179 180
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				goto exit_unlock;
			}

181
			rxq->write_actual = (rxq->write & ~0x7);
182
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
183
					   rxq->write_actual);
184 185 186 187

		/* Else device is assumed to be awake */
		} else {
			/* Device expects a multiple of 8 */
188
			rxq->write_actual = (rxq->write & ~0x7);
189
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
190
					   rxq->write_actual);
191 192
		}
	}
193
	rxq->need_update = 0;
194 195

 exit_unlock:
196
	spin_unlock_irqrestore(&rxq->lock, flags);
197 198
}

199 200
/*
 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
201 202 203 204 205 206 207 208 209
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
210
static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
211
{
212
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
213
	struct iwl_rxq *rxq = &trans_pcie->rxq;
214 215 216
	struct iwl_rx_mem_buffer *rxb;
	unsigned long flags;

217 218 219
	/*
	 * If the device isn't enabled - not need to try to add buffers...
	 * This can happen when we stop the device and still have an interrupt
220 221 222
	 * pending. We stop the APM before we sync the interrupts because we
	 * have to (see comment there). On the other hand, since the APM is
	 * stopped, we cannot access the HW (in particular not prph).
223 224 225 226 227
	 * So don't try to restock if the APM has been already stopped.
	 */
	if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
		return;

228
	spin_lock_irqsave(&rxq->lock, flags);
229
	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
230 231 232 233 234
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

		/* Get next free Rx buffer, remove from free list */
J
Johannes Berg 已提交
235 236 237
		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
238 239

		/* Point to Rx buffer via next RBD in circular buffer */
240
		rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
241 242 243 244 245 246 247 248
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
	spin_unlock_irqrestore(&rxq->lock, flags);
	/* If the pre-allocated buffer pool is dropping low, schedule to
	 * refill it */
	if (rxq->free_count <= RX_LOW_WATERMARK)
J
Johannes Berg 已提交
249
		schedule_work(&trans_pcie->rx_replenish);
250 251 252 253 254 255 256

	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock_irqsave(&rxq->lock, flags);
		rxq->need_update = 1;
		spin_unlock_irqrestore(&rxq->lock, flags);
257
		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
258 259 260
	}
}

261
/*
262
 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
263
 *
264 265 266
 * A used RBD is an Rx buffer that has been given to the stack. To use it again
 * a page must be allocated and the RBD must point to the page. This function
 * doesn't change the HW pointer but handles the list of pages that is used by
267
 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
268
 * allocated buffers.
269
 */
270
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
271
{
272
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
273
	struct iwl_rxq *rxq = &trans_pcie->rxq;
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

290
		if (trans_pcie->rx_page_order > 0)
291 292 293
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
294
		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
295 296
		if (!page) {
			if (net_ratelimit())
297
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
298
					   "order: %d\n",
299
					   trans_pcie->rx_page_order);
300 301 302

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
303
				IWL_CRIT(trans, "Failed to alloc_pages with %s."
304 305 306 307 308 309 310 311 312 313 314 315 316 317
					 "Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?
					 "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
318
			__free_pages(page, trans_pcie->rx_page_order);
319 320
			return;
		}
J
Johannes Berg 已提交
321 322 323
		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
				       list);
		list_del(&rxb->list);
324 325 326 327 328
		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
329 330 331 332
		rxb->page_dma =
			dma_map_page(trans->dev, page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
333 334 335 336 337 338 339 340
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			rxb->page = NULL;
			spin_lock_irqsave(&rxq->lock, flags);
			list_add(&rxb->list, &rxq->rx_used);
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
341 342 343 344 345 346 347 348 349 350 351 352 353 354
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}

355 356 357 358 359 360
static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i;

361 362
	lockdep_assert_held(&rxq->lock);

363
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
364 365 366 367 368 369 370
		if (!rxq->pool[i].page)
			continue;
		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
			       PAGE_SIZE << trans_pcie->rx_page_order,
			       DMA_FROM_DEVICE);
		__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
		rxq->pool[i].page = NULL;
371 372 373
	}
}

374
/*
375
 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
376 377 378
 *
 * When moving to rx_free an page is allocated for the slot.
 *
379
 * Also restock the Rx queue via iwl_pcie_rxq_restock.
380 381
 * This is called as a scheduled work item (except for during initialization)
 */
382
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
383
{
J
Johannes Berg 已提交
384
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385 386
	unsigned long flags;

387
	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
388

J
Johannes Berg 已提交
389
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
390
	iwl_pcie_rxq_restock(trans);
J
Johannes Berg 已提交
391
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
392 393
}

394
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
395
{
396
	iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
397

398
	iwl_pcie_rxq_restock(trans);
399 400
}

401
static void iwl_pcie_rx_replenish_work(struct work_struct *data)
402
{
403 404
	struct iwl_trans_pcie *trans_pcie =
	    container_of(data, struct iwl_trans_pcie, rx_replenish);
405

406
	iwl_pcie_rx_replenish(trans_pcie->trans);
407 408
}

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct device *dev = trans->dev;

	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));

	spin_lock_init(&rxq->lock);

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				      &rxq->bd_dma, GFP_KERNEL);
	if (!rxq->bd)
		goto err_bd;

	/*Allocate the driver's pointer to receive buffer status */
	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
					   &rxq->rb_stts_dma, GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err_rb_stts;

	return 0;

err_rb_stts:
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
439
	rxq->bd_dma = 0;
440 441 442
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
443 444
}

445 446 447 448 449 450 451 452 453 454 455 456 457
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

	if (trans_pcie->rx_buf_size_8k)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
458 459 460 461
	/* reset and flush pointers */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
487
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
488 489 490 491
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
492 493 494 495

	/* W/A for interrupt coalescing bug in 7260 and 3160 */
	if (trans->cfg->host_interrupt_operation_mode)
		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
496 497
}

498 499 500 501 502 503 504 505 506 507 508 509 510 511
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
	int i;

	lockdep_assert_held(&rxq->lock);

	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	rxq->free_count = 0;

	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
		list_add(&rxq->pool[i].list, &rxq->rx_used);
}

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	int i, err;
	unsigned long flags;

	if (!rxq->bd) {
		err = iwl_pcie_rx_alloc(trans);
		if (err)
			return err;
	}

	spin_lock_irqsave(&rxq->lock, flags);

527
	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
528

529
	/* free all first - we might be reconfigured for a different size */
530
	iwl_pcie_rxq_free_rbs(trans);
531
	iwl_pcie_rx_init_rxb_lists(rxq);
532 533 534 535 536 537 538 539

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
540
	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
	spin_unlock_irqrestore(&rxq->lock, flags);

	iwl_pcie_rx_replenish(trans);

	iwl_pcie_rx_hw_init(trans, rxq);

	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
	rxq->need_update = 1;
	iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);

	return 0;
}

void iwl_pcie_rx_free(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	unsigned long flags;

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
		return;
	}

568 569
	cancel_work_sync(&trans_pcie->rx_replenish);

570 571 572 573 574 575
	spin_lock_irqsave(&rxq->lock, flags);
	iwl_pcie_rxq_free_rbs(trans);
	spin_unlock_irqrestore(&rxq->lock, flags);

	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
			  rxq->bd, rxq->bd_dma);
576
	rxq->bd_dma = 0;
577 578 579 580 581 582 583 584
	rxq->bd = NULL;

	if (rxq->rb_stts)
		dma_free_coherent(trans->dev,
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
585
	rxq->rb_stts_dma = 0;
586 587 588 589
	rxq->rb_stts = NULL;
}

static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
J
Johannes Berg 已提交
590 591 592
				struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
593 594
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
J
Johannes Berg 已提交
595
	unsigned long flags;
596
	bool page_stolen = false;
597
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
598
	u32 offset = 0;
J
Johannes Berg 已提交
599 600 601 602

	if (WARN_ON(!rxb))
		return;

603 604 605 606 607 608 609 610 611 612
	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		struct iwl_device_cmd *cmd;
		u16 sequence;
		bool reclaim;
		int index, cmd_index, err, len;
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
613
			._rx_page_order = trans_pcie->rx_page_order,
614 615
			._page = rxb->page,
			._page_stolen = false,
616
			.truesize = max_len,
617 618 619 620 621 622 623 624
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
625
			rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
J
Johannes Berg 已提交
626
			pkt->hdr.cmd);
627 628 629

		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
630 631
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
649 650
			}
		}
J
Johannes Berg 已提交
651

652 653 654 655
		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

656 657 658
		if (reclaim)
			cmd = txq->entries[cmd_index].cmd;
		else
659 660 661 662
			cmd = NULL;

		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);

663
		if (reclaim) {
664 665
			kfree(txq->entries[cmd_index].free_buf);
			txq->entries[cmd_index].free_buf = NULL;
666 667
		}

668 669 670 671 672 673 674 675 676 677 678
		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
679
				iwl_pcie_hcmd_complete(trans, &rxcb, err);
680 681 682 683 684 685
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
J
Johannes Berg 已提交
686 687
	}

688 689
	/* page was stolen from us -- free our reference */
	if (page_stolen) {
690
		__free_pages(rxb->page, trans_pcie->rx_page_order);
J
Johannes Berg 已提交
691
		rxb->page = NULL;
692
	}
J
Johannes Berg 已提交
693 694 695 696 697 698 699 700

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	spin_lock_irqsave(&rxq->lock, flags);
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
701 702
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
703 704 705 706 707 708 709 710 711 712 713 714 715
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
			list_add_tail(&rxb->list, &rxq->rx_used);
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
J
Johannes Berg 已提交
716 717 718 719 720
	} else
		list_add_tail(&rxb->list, &rxq->rx_used);
	spin_unlock_irqrestore(&rxq->lock, flags);
}

721 722
/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
723
 */
724
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
725
{
J
Johannes Berg 已提交
726
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
727
	struct iwl_rxq *rxq = &trans_pcie->rxq;
728 729 730 731 732 733 734
	u32 r, i;
	u8 fill_rx = 0;
	u32 count = 8;
	int total_empty;

	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
735
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
736 737 738 739
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
740
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
741 742 743 744 745 746 747 748 749 750

	/* calculate total frames need to be restock after handling RX */
	total_empty = r - rxq->write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
		fill_rx = 1;

	while (i != r) {
751
		struct iwl_rx_mem_buffer *rxb;
752 753 754 755

		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;

756 757
		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
			     r, i, rxb);
758
		iwl_pcie_rx_handle_rb(trans, rxb);
759 760 761 762 763 764 765 766

		i = (i + 1) & RX_QUEUE_MASK;
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				rxq->read = i;
767
				iwl_pcie_rx_replenish_now(trans);
768 769 770 771 772 773 774 775
				count = 0;
			}
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	if (fill_rx)
776
		iwl_pcie_rx_replenish_now(trans);
777
	else
778
		iwl_pcie_rxq_restock(trans);
779 780
}

781 782
/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
783
 */
784
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
785
{
786 787
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

788
	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
789
	if (trans->cfg->internal_wimax_coex &&
790
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
791
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
792
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
793
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
D
Don Fry 已提交
794
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
795
		iwl_op_mode_wimax_active(trans->op_mode);
796
		wake_up(&trans_pcie->wait_command_queue);
797 798 799
		return;
	}

800
	iwl_pcie_dump_csr(trans);
801
	iwl_dump_fh(trans, NULL);
802

803
	/* set the ERROR bit before we wake up the caller */
804
	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
805 806 807
	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
	wake_up(&trans_pcie->wait_command_queue);

808
	local_bh_disable();
809
	iwl_nic_error(trans);
810
	local_bh_enable();
811 812
}

813
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
814
{
815
	struct iwl_trans *trans = dev_id;
816 817
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
818 819 820 821 822
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
	u32 i;

823 824
	lock_map_acquire(&trans->sync_cmd_lockdep_map);

J
Johannes Berg 已提交
825
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
826 827 828 829 830 831 832 833 834 835 836 837

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
838
	iwl_write32(trans, CSR_INT,
839
		    trans_pcie->inta | ~trans_pcie->inta_mask);
840

841
	inta = trans_pcie->inta;
842

843
	if (iwl_have_debug_level(IWL_DL_ISR))
844
		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
845
			      inta, iwl_read32(trans, CSR_INT_MASK));
846

847 848
	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
	trans_pcie->inta = 0;
849

J
Johannes Berg 已提交
850
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
851

852 853
	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
854
		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
855 856

		/* Tell the device to stop sending interrupts */
857
		iwl_disable_interrupts(trans);
858

859
		isr_stats->hw++;
860
		iwl_pcie_irq_handle_error(trans);
861 862 863

		handled |= CSR_INT_BIT_HW_ERR;

864
		goto out;
865 866
	}

867
	if (iwl_have_debug_level(IWL_DL_ISR)) {
868 869
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
870 871
			IWL_DEBUG_ISR(trans,
				      "Scheduler finished to transmit the frame/frames.\n");
872
			isr_stats->sch++;
873 874 875 876
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
877
			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
878
			isr_stats->alive++;
879 880
		}
	}
881

882 883 884 885 886
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
887
		bool hw_rfkill;
888

889
		hw_rfkill = iwl_is_rfkill_set(trans);
890
		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
891
			 hw_rfkill ? "disable radio" : "enable radio");
892

893
		isr_stats->rfkill++;
894

895
		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
896 897 898 899 900 901 902 903 904 905
		if (hw_rfkill) {
			set_bit(STATUS_RFKILL, &trans_pcie->status);
			if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
					       &trans_pcie->status))
				IWL_DEBUG_RF_KILL(trans,
						  "Rfkill while SYNC HCMD in flight\n");
			wake_up(&trans_pcie->wait_command_queue);
		} else {
			clear_bit(STATUS_RFKILL, &trans_pcie->status);
		}
906 907 908 909 910 911

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
912
		IWL_ERR(trans, "Microcode CT kill error detected.\n");
913
		isr_stats->ctkill++;
914 915 916 917 918
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
919
		IWL_ERR(trans, "Microcode SW error detected. "
920
			" Restarting 0x%X.\n", inta);
921
		isr_stats->sw++;
922
		iwl_pcie_irq_handle_error(trans);
923 924 925 926 927
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
928
		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
929
		iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
930
		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
931
			iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
932

933
		isr_stats->wakeup++;
934 935 936 937 938 939 940 941

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
942
		    CSR_INT_BIT_RX_PERIODIC)) {
943
		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
944 945
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
946
			iwl_write32(trans, CSR_FH_INT_STATUS,
947 948 949 950
					CSR_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
951
			iwl_write32(trans,
952
				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
953 954 955 956 957 958 959 960 961 962 963 964 965
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
		 */

		/* Disable periodic interrupt; we use it as just a one-shot. */
966
		iwl_write8(trans, CSR_INT_PERIODIC_REG,
967
			    CSR_INT_PERIODIC_DIS);
968

969
		iwl_pcie_rx_handle(trans);
970

971 972 973 974 975 976 977 978
		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
979
			iwl_write8(trans, CSR_INT_PERIODIC_REG,
980
				   CSR_INT_PERIODIC_ENA);
981

982
		isr_stats->rx++;
983 984 985 986
	}

	/* This "Tx" DMA channel is used only for loading uCode */
	if (inta & CSR_INT_BIT_FH_TX) {
987
		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
988
		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
989
		isr_stats->tx++;
990 991
		handled |= CSR_INT_BIT_FH_TX;
		/* Wake up uCode load routine, now that load is complete */
992 993
		trans_pcie->ucode_write_complete = true;
		wake_up(&trans_pcie->ucode_write_waitq);
994 995 996
	}

	if (inta & ~handled) {
997
		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
998
		isr_stats->unhandled++;
999 1000
	}

1001 1002 1003
	if (inta & ~(trans_pcie->inta_mask)) {
		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
			 inta & ~trans_pcie->inta_mask);
1004 1005 1006 1007
	}

	/* Re-enable all interrupts */
	/* only Re-enable if disabled by irq */
D
Don Fry 已提交
1008
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
1009
		iwl_enable_interrupts(trans);
1010
	/* Re-enable RF_KILL if it occurred */
1011 1012
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(trans);
1013 1014 1015 1016

out:
	lock_map_release(&trans->sync_cmd_lockdep_map);
	return IRQ_HANDLED;
1017 1018
}

1019 1020 1021 1022 1023
/******************************************************************************
 *
 * ICT functions
 *
 ******************************************************************************/
1024 1025 1026 1027 1028

/* a device (PCI-E) page is 4096 bytes long */
#define ICT_SHIFT	12
#define ICT_SIZE	(1 << ICT_SHIFT)
#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1029 1030

/* Free dram table */
1031
void iwl_pcie_free_ict(struct iwl_trans *trans)
1032
{
1033
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1034

1035
	if (trans_pcie->ict_tbl) {
1036
		dma_free_coherent(trans->dev, ICT_SIZE,
1037
				  trans_pcie->ict_tbl,
1038
				  trans_pcie->ict_tbl_dma);
1039 1040
		trans_pcie->ict_tbl = NULL;
		trans_pcie->ict_tbl_dma = 0;
1041 1042 1043
	}
}

1044 1045 1046
/*
 * allocate dram shared table, it is an aligned memory
 * block of ICT_SIZE.
1047 1048
 * also reset all data related to ICT table interrupt.
 */
1049
int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1050
{
1051
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1052

1053
	trans_pcie->ict_tbl =
1054
		dma_alloc_coherent(trans->dev, ICT_SIZE,
1055 1056 1057
				   &trans_pcie->ict_tbl_dma,
				   GFP_KERNEL);
	if (!trans_pcie->ict_tbl)
1058 1059
		return -ENOMEM;

1060 1061
	/* just an API sanity check ... it is guaranteed to be aligned */
	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1062
		iwl_pcie_free_ict(trans);
1063 1064
		return -EINVAL;
	}
1065

1066 1067
	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
		      (unsigned long long)trans_pcie->ict_tbl_dma);
1068

1069
	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1070 1071

	/* reset table and index to all 0 */
1072
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1073
	trans_pcie->ict_index = 0;
1074 1075

	/* add periodic RX interrupt */
1076
	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1077 1078 1079 1080 1081 1082
	return 0;
}

/* Device is going up inform it about using ICT interrupt table,
 * also we need to tell the driver to start using ICT interrupt.
 */
1083
void iwl_pcie_reset_ict(struct iwl_trans *trans)
1084
{
1085
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1086 1087 1088
	u32 val;
	unsigned long flags;

1089
	if (!trans_pcie->ict_tbl)
1090
		return;
1091

J
Johannes Berg 已提交
1092
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1093
	iwl_disable_interrupts(trans);
1094

1095
	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1096

1097
	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1098 1099 1100 1101

	val |= CSR_DRAM_INT_TBL_ENABLE;
	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;

1102
	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1103

1104
	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1105 1106
	trans_pcie->use_ict = true;
	trans_pcie->ict_index = 0;
1107
	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1108
	iwl_enable_interrupts(trans);
J
Johannes Berg 已提交
1109
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1110 1111 1112
}

/* Device is going down disable ict interrupt usage */
1113
void iwl_pcie_disable_ict(struct iwl_trans *trans)
1114
{
1115
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1116 1117
	unsigned long flags;

J
Johannes Berg 已提交
1118
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1119
	trans_pcie->use_ict = false;
J
Johannes Berg 已提交
1120
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1121 1122
}

1123
/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
1124
static irqreturn_t iwl_pcie_isr(int irq, void *data)
1125
{
1126
	struct iwl_trans *trans = data;
1127
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1128
	u32 inta, inta_mask;
1129
	irqreturn_t ret = IRQ_NONE;
1130 1131 1132

	lockdep_assert_held(&trans_pcie->irq_lock);

1133
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1134

1135 1136
	/* Disable (but don't clear!) interrupts here to avoid
	 *    back-to-back ISRs and sporadic interrupts from our NIC.
1137
	 * If we have something to service, the irq thread will re-enable ints.
1138
	 * If we *don't* have something, we'll re-enable before leaving here. */
1139
	inta_mask = iwl_read32(trans, CSR_INT_MASK);
1140
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1141 1142

	/* Discover which interrupts are active/pending */
1143
	inta = iwl_read32(trans, CSR_INT);
1144

1145 1146 1147 1148 1149 1150 1151 1152
	if (inta & (~inta_mask)) {
		IWL_DEBUG_ISR(trans,
			      "We got a masked interrupt (0x%08x)...Ack and ignore\n",
			      inta & (~inta_mask));
		iwl_write32(trans, CSR_INT, inta & (~inta_mask));
		inta &= inta_mask;
	}

1153 1154 1155 1156
	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
	if (!inta) {
1157
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1158 1159 1160 1161 1162 1163
		goto none;
	}

	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
		/* Hardware disappeared. It might have already raised
		 * an interrupt */
1164
		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1165
		return IRQ_HANDLED;
1166 1167
	}

1168 1169 1170 1171 1172
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans,
			      "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
			      inta, inta_mask,
			      iwl_read32(trans, CSR_FH_INT_STATUS));
1173

1174
	trans_pcie->inta |= inta;
1175
	/* the thread will service interrupts and re-enable them */
1176
	if (likely(inta))
1177
		return IRQ_WAKE_THREAD;
1178 1179

	ret = IRQ_HANDLED;
1180

1181
none:
1182 1183
	/* re-enable interrupts here since we don't have anything to service. */
	/* only Re-enable if disabled by irq  and no schedules tasklet. */
D
Don Fry 已提交
1184
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1185
	    !trans_pcie->inta)
1186
		iwl_enable_interrupts(trans);
1187

1188
	return ret;
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
}

/* interrupt handler using ict table, with this interrupt driver will
 * stop using INTA register to get device's interrupt, reading this register
 * is expensive, device will write interrupts in ICT dram table, increment
 * index then will fire interrupt to driver, driver will OR all ICT table
 * entries from current index up to table entry with 0 value. the result is
 * the interrupt we need to service, driver will set the entries back to 0 and
 * set index.
 */
1199
irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1200
{
1201 1202
	struct iwl_trans *trans = data;
	struct iwl_trans_pcie *trans_pcie;
1203
	u32 inta;
1204
	u32 val = 0;
J
Johannes Berg 已提交
1205
	u32 read;
1206
	unsigned long flags;
1207
	irqreturn_t ret = IRQ_NONE;
1208

1209
	if (!trans)
1210 1211
		return IRQ_NONE;

1212 1213
	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1214 1215
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);

1216 1217 1218
	/* dram interrupt table not set yet,
	 * use legacy interrupt.
	 */
1219
	if (unlikely(!trans_pcie->use_ict)) {
1220
		ret = iwl_pcie_isr(irq, data);
1221 1222 1223
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return ret;
	}
1224

1225
	trace_iwlwifi_dev_irq(trans->dev);
J
Johannes Berg 已提交
1226

1227 1228 1229 1230 1231
	/* Disable (but don't clear!) interrupts here to avoid
	 * back-to-back ISRs and sporadic interrupts from our NIC.
	 * If we have something to service, the tasklet will re-enable ints.
	 * If we *don't* have something, we'll re-enable before leaving here.
	 */
1232
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1233 1234 1235 1236

	/* Ignore interrupt if there's nothing in NIC to service.
	 * This may be due to IRQ shared with another device,
	 * or due to sporadic interrupts thrown from our NIC. */
J
Johannes Berg 已提交
1237
	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1238
	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
J
Johannes Berg 已提交
1239
	if (!read) {
1240
		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1241 1242 1243
		goto none;
	}

J
Johannes Berg 已提交
1244 1245 1246 1247 1248 1249
	/*
	 * Collect all entries up to the first 0, starting from ict_index;
	 * note we already read at ict_index.
	 */
	do {
		val |= read;
1250
		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
J
Johannes Berg 已提交
1251
				trans_pcie->ict_index, read);
1252 1253 1254
		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
		trans_pcie->ict_index =
			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1255

J
Johannes Berg 已提交
1256
		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1257
		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
J
Johannes Berg 已提交
1258 1259
					   read);
	} while (read);
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275

	/* We should not get this value, just ignore it. */
	if (val == 0xffffffff)
		val = 0;

	/*
	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
	 * (bit 15 before shifting it to 31) to clear when using interrupt
	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
	 * so we use them to decide on the real state of the Rx bit.
	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
	 */
	if (val & 0xC0000)
		val |= 0x8000;

	inta = (0xff & val) | ((0xff00 & val) << 16);
1276 1277 1278 1279 1280
	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
		      inta, trans_pcie->inta_mask, val);
	if (iwl_have_debug_level(IWL_DL_ISR))
		IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
			      iwl_read32(trans, CSR_INT_MASK));
1281

1282 1283
	inta &= trans_pcie->inta_mask;
	trans_pcie->inta |= inta;
1284

1285
	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
1286 1287 1288
	if (likely(inta)) {
		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
		return IRQ_WAKE_THREAD;
1289 1290
	}

1291
	ret = IRQ_HANDLED;
1292 1293 1294 1295 1296

 none:
	/* re-enable interrupts here since we don't have anything to service.
	 * only Re-enable if disabled by irq.
	 */
D
Don Fry 已提交
1297
	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
J
Johannes Berg 已提交
1298
	    !trans_pcie->inta)
1299
		iwl_enable_interrupts(trans);
1300

J
Johannes Berg 已提交
1301
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1302
	return ret;
1303
}