iwl-trans-pcie.c 64.4 KB
Newer Older
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
W
Wey-Yi Guy 已提交
8
 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
W
Wey-Yi Guy 已提交
33
 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
63 64
#include <linux/pci.h>
#include <linux/pci-aspm.h>
65
#include <linux/interrupt.h>
66
#include <linux/debugfs.h>
67
#include <linux/sched.h>
68 69
#include <linux/bitops.h>
#include <linux/gfp.h>
70

71
#include "iwl-trans.h"
72
#include "iwl-trans-pcie-int.h"
73 74
#include "iwl-csr.h"
#include "iwl-prph.h"
75
#include "iwl-shared.h"
76
#include "iwl-eeprom.h"
77
#include "iwl-agn-hw.h"
78
#include "iwl-core.h"
79

80
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
81
{
82 83 84
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
85
	struct device *dev = trans->dev;
86

87
	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
88 89 90 91 92 93 94

	spin_lock_init(&rxq->lock);

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
95 96
	rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				      &rxq->bd_dma, GFP_KERNEL);
97 98 99 100
	if (!rxq->bd)
		goto err_bd;

	/*Allocate the driver's pointer to receive buffer status */
101 102
	rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
					   &rxq->rb_stts_dma, GFP_KERNEL);
103 104 105 106 107 108
	if (!rxq->rb_stts)
		goto err_rb_stts;

	return 0;

err_rb_stts:
109 110
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			rxq->bd, rxq->bd_dma);
111 112 113 114 115 116
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
}

117
static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
118
{
119 120 121
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
122
	int i;
123 124 125 126 127 128

	/* Fill the rx_used queue with _all_ of the Rx buffers */
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
		/* In the reset function, these buffers may have been allocated
		 * to an SKB, so we need to unmap and free potential storage */
		if (rxq->pool[i].page != NULL) {
129
			dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
130
				PAGE_SIZE << hw_params(trans).rx_page_order,
131
				DMA_FROM_DEVICE);
132 133
			__free_pages(rxq->pool[i].page,
				     hw_params(trans).rx_page_order);
134 135 136 137
			rxq->pool[i].page = NULL;
		}
		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
	}
138 139
}

140
static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
141 142 143 144
				 struct iwl_rx_queue *rxq)
{
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
145
	u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
146 147 148 149 150 151 152

	if (iwlagn_mod_params.amsdu_size_8K)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
153
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
154 155

	/* Reset driver's Rx queue write index */
156
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
157 158

	/* Tell device where to find RBD circular buffer in DRAM */
159
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
160 161 162
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
163
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
164 165 166 167 168 169 170 171 172 173
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
174
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
175 176 177 178 179 180 181 182 183
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
			   rb_size|
			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
184
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
185 186
}

187
static int iwl_rx_init(struct iwl_trans *trans)
188
{
189 190 191 192
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;

193 194 195 196
	int i, err;
	unsigned long flags;

	if (!rxq->bd) {
197
		err = iwl_trans_rx_alloc(trans);
198 199 200 201 202 203 204 205
		if (err)
			return err;
	}

	spin_lock_irqsave(&rxq->lock, flags);
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);

206
	iwl_trans_rxq_free_rx_bufs(trans);
207 208 209 210 211 212 213 214 215 216 217

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
	rxq->free_count = 0;
	spin_unlock_irqrestore(&rxq->lock, flags);

218
	iwlagn_rx_replenish(trans);
219

220
	iwl_trans_rx_hw_init(trans, rxq);
221

222
	spin_lock_irqsave(&trans->shrd->lock, flags);
223
	rxq->need_update = 1;
224 225
	iwl_rx_queue_update_write_ptr(trans, rxq);
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
226

227 228 229
	return 0;
}

230
static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
231
{
232 233 234 235
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;

236 237 238 239 240
	unsigned long flags;

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
241
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
242 243 244 245
		return;
	}

	spin_lock_irqsave(&rxq->lock, flags);
246
	iwl_trans_rxq_free_rx_bufs(trans);
247 248
	spin_unlock_irqrestore(&rxq->lock, flags);

249
	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
250 251 252 253 254
			  rxq->bd, rxq->bd_dma);
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;

	if (rxq->rb_stts)
255
		dma_free_coherent(trans->dev,
256 257 258
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
259
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
260 261 262 263
	memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
	rxq->rb_stts = NULL;
}

264
static int iwl_trans_rx_stop(struct iwl_trans *trans)
265 266 267
{

	/* stop Rx DMA */
268 269
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
270 271 272
			    FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

273
static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
274 275 276 277 278
				    struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

279
	ptr->addr = dma_alloc_coherent(trans->dev, size,
280 281 282 283 284 285 286
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

287
static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
288 289 290 291 292
				    struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

293
	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
294 295 296
	memset(ptr, 0, sizeof(*ptr));
}

297 298 299
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
				struct iwl_tx_queue *txq, int slots_num,
				u32 txq_id)
300
{
301
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
302 303
	int i;

304
	if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
305 306
		return -EINVAL;

307 308
	txq->q.n_window = slots_num;

309 310
	txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
	txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
311 312 313 314

	if (!txq->meta || !txq->cmd)
		goto error;

315 316 317 318 319 320 321
	if (txq_id == trans->shrd->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
						GFP_KERNEL);
			if (!txq->cmd[i])
				goto error;
		}
322 323 324 325

	/* Alloc driver data array and TFD circular buffer */
	/* Driver private data, only for Tx (not command) queues,
	 * not shared with device. */
326
	if (txq_id != trans->shrd->cmd_queue) {
327 328
		txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
				    GFP_KERNEL);
329
		if (!txq->skbs) {
330
			IWL_ERR(trans, "kmalloc for auxiliary BD "
331 332 333 334
				  "structures failed\n");
			goto error;
		}
	} else {
335
		txq->skbs = NULL;
336 337 338 339
	}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
340
	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
341
				       &txq->q.dma_addr, GFP_KERNEL);
342
	if (!txq->tfds) {
343
		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
344 345 346 347 348 349
		goto error;
	}
	txq->q.id = txq_id;

	return 0;
error:
350 351
	kfree(txq->skbs);
	txq->skbs = NULL;
352 353
	/* since txq->cmd has been zeroed,
	 * all non allocated cmd[i] will be NULL */
354
	if (txq->cmd && txq_id == trans->shrd->cmd_queue)
355 356 357 358 359 360 361 362 363 364 365
		for (i = 0; i < slots_num; i++)
			kfree(txq->cmd[i]);
	kfree(txq->meta);
	kfree(txq->cmd);
	txq->meta = NULL;
	txq->cmd = NULL;

	return -ENOMEM;

}

366
static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
		      int slots_num, u32 txq_id)
{
	int ret;

	txq->need_update = 0;
	memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);

	/*
	 * For the default queues 0-3, set up the swq_id
	 * already -- all others need to get one later
	 * (if they need one at all).
	 */
	if (txq_id < 4)
		iwl_set_swq_id(txq, txq_id, txq_id);

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
387
	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
388 389 390 391 392 393 394 395
			txq_id);
	if (ret)
		return ret;

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
396
	iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
397 398 399 400 401
			     txq->q.dma_addr >> 8);

	return 0;
}

402 403 404
/**
 * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
 */
405
static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
406
{
407 408
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
409
	struct iwl_queue *q = &txq->q;
410
	enum dma_data_direction dma_dir;
411
	unsigned long flags;
412
	spinlock_t *lock;
413 414 415 416

	if (!q->n_bd)
		return;

417 418 419
	/* In the command queue, all the TBs are mapped as BIDI
	 * so unmap them as such.
	 */
420
	if (txq_id == trans->shrd->cmd_queue) {
421
		dma_dir = DMA_BIDIRECTIONAL;
422 423
		lock = &trans->hcmd_lock;
	} else {
424
		dma_dir = DMA_TO_DEVICE;
425 426
		lock = &trans->shrd->sta_lock;
	}
427

428
	spin_lock_irqsave(lock, flags);
429 430
	while (q->write_ptr != q->read_ptr) {
		/* The read_ptr needs to bound by q->n_window */
431 432
		iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
				    dma_dir);
433 434
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
435
	spin_unlock_irqrestore(lock, flags);
436 437
}

438 439 440 441 442 443 444 445
/**
 * iwl_tx_queue_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
446
static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
447
{
448 449
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
450
	struct device *dev = trans->dev;
451 452 453 454
	int i;
	if (WARN_ON(!txq))
		return;

455
	iwl_tx_queue_unmap(trans, txq_id);
456 457

	/* De-alloc array of command/tx buffers */
458 459 460 461

	if (txq_id == trans->shrd->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++)
			kfree(txq->cmd[i]);
462 463 464

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd) {
465
		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
466 467 468 469 470
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
	}

	/* De-alloc array of per-TFD driver data */
471 472
	kfree(txq->skbs);
	txq->skbs = NULL;
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488

	/* deallocate arrays */
	kfree(txq->cmd);
	kfree(txq->meta);
	txq->cmd = NULL;
	txq->meta = NULL;

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

/**
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
489
static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
490 491
{
	int txq_id;
492
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 494

	/* Tx queues */
495
	if (trans_pcie->txq) {
496
		for (txq_id = 0;
497 498
		     txq_id < hw_params(trans).max_txq_num; txq_id++)
			iwl_tx_queue_free(trans, txq_id);
499 500
	}

501 502
	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;
503

504
	iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
505

506
	iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
507 508
}

509 510 511 512 513 514 515
/**
 * iwl_trans_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 *
 * @param priv
 * @return error code
 */
516
static int iwl_trans_tx_alloc(struct iwl_trans *trans)
517 518 519
{
	int ret;
	int txq_id, slots_num;
520
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
521

522
	u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
523 524
			sizeof(struct iwlagn_scd_bc_tbl);

525 526
	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
527
	if (WARN_ON(trans_pcie->txq)) {
528 529 530 531
		ret = -EINVAL;
		goto error;
	}

532
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
533
				   scd_bc_tbls_size);
534
	if (ret) {
535
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
536 537 538 539
		goto error;
	}

	/* Alloc keep-warm buffer */
540
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
541
	if (ret) {
542
		IWL_ERR(trans, "Keep Warm allocation failed\n");
543 544 545
		goto error;
	}

546 547
	trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
				  sizeof(struct iwl_tx_queue), GFP_KERNEL);
548
	if (!trans_pcie->txq) {
549
		IWL_ERR(trans, "Not enough memory for txq\n");
550 551 552 553 554
		ret = ENOMEM;
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
555 556
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
		slots_num = (txq_id == trans->shrd->cmd_queue) ?
557
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
558 559
		ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
560
		if (ret) {
561
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
562 563 564 565 566 567 568
			goto error;
		}
	}

	return 0;

error:
569
	iwl_trans_pcie_tx_free(trans);
570 571 572

	return ret;
}
573
static int iwl_tx_init(struct iwl_trans *trans)
574 575 576 577 578
{
	int ret;
	int txq_id, slots_num;
	unsigned long flags;
	bool alloc = false;
579
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580

581
	if (!trans_pcie->txq) {
582
		ret = iwl_trans_tx_alloc(trans);
583 584 585 586 587
		if (ret)
			goto error;
		alloc = true;
	}

588
	spin_lock_irqsave(&trans->shrd->lock, flags);
589 590

	/* Turn off all Tx DMA fifos */
591
	iwl_write_prph(trans, SCD_TXFACT, 0);
592 593

	/* Tell NIC where to find the "keep warm" buffer */
594
	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
595
			   trans_pcie->kw.dma >> 4);
596

597
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
598 599

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
600 601
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
		slots_num = (txq_id == trans->shrd->cmd_queue) ?
602
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
603 604
		ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
605
		if (ret) {
606
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
607 608 609 610 611 612 613 614
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
615
		iwl_trans_pcie_tx_free(trans);
616 617 618
	return ret;
}

619
static void iwl_set_pwr_vmain(struct iwl_trans *trans)
620 621 622 623 624 625
{
/*
 * (for documentation purposes)
 * to set power to V_AUX, do:

		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
626
			iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
627 628 629 630
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
 */

631
	iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
632 633 634 635
			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
			       ~APMG_PS_CTRL_MSK_PWR_SRC);
}

E
Emmanuel Grumbach 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT	0x041
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN	0x01
#define PCI_CFG_LINK_CTRL_VAL_L1_EN	0x02

static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
{
	int pos;
	u16 pci_lnk_ctl;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	struct pci_dev *pci_dev = trans_pcie->pci_dev;

	pos = pci_pcie_cap(pci_dev);
	pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
	return pci_lnk_ctl;
}

static void iwl_apm_config(struct iwl_trans *trans)
{
	/*
	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
	 * If so (likely), disable L0S, so device moves directly L0->L1;
	 *    costs negligible amount of power savings.
	 * If not (unlikely), enable L0S, so there is at least some
	 *    power savings, even without L1.
	 */
	u16 lctl = iwl_pciexp_link_ctrl(trans);

	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
				PCI_CFG_LINK_CTRL_VAL_L1_EN) {
		/* L1-ASPM enabled; disable(!) L0S */
		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
		dev_printk(KERN_INFO, trans->dev,
			   "L1 Enabled; Disabling L0S\n");
	} else {
		/* L1-ASPM disabled; enable(!) L0S */
		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
		dev_printk(KERN_INFO, trans->dev,
			   "L1 Disabled; Enabling L0S\n");
	}
}

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
/*
 * Start up NIC's basic functionality after it has been reset
 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
 * NOTE:  This does not load uCode nor start the embedded processor
 */
static int iwl_apm_init(struct iwl_trans *trans)
{
	int ret = 0;
	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");

	/*
	 * Use "set_bit" below rather than "write", to preserve any hardware
	 * bits already set by default after reset.
	 */

	/* Disable L0S exit timer (platform NMI Work/Around) */
	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);

	/*
	 * Disable L0s without affecting L1;
	 *  don't wait for ICH L0s (ICH bug W/A)
	 */
	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);

	/* Set FH wait threshold to maximum (HW error during stress W/A) */
	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);

	/*
	 * Enable HAP INTA (interrupt from management bus) to
	 * wake device's PCI Express link L1a -> L0s
	 */
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
				    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);

E
Emmanuel Grumbach 已提交
717
	iwl_apm_config(trans);
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762

	/* Configure analog phase-lock-loop before activating to D0A */
	if (cfg(trans)->base_params->pll_cfg_val)
		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
			    cfg(trans)->base_params->pll_cfg_val);

	/*
	 * Set "initialization complete" bit to move adapter from
	 * D0U* --> D0A* (powered-up active) state.
	 */
	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/*
	 * Wait for clock stabilization; once stabilized, access to
	 * device-internal resources is supported, e.g. iwl_write_prph()
	 * and accesses to uCode SRAM.
	 */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
	if (ret < 0) {
		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
		goto out;
	}

	/*
	 * Enable DMA clock and wait for it to stabilize.
	 *
	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
	 * do not disable clocks.  This preserves any hardware bits already
	 * set by default in "CLK_CTRL_REG" after reset.
	 */
	iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
	udelay(20);

	/* Disable L1-Active */
	iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);

	set_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status);

out:
	return ret;
}

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
static int iwl_apm_stop_master(struct iwl_trans *trans)
{
	int ret = 0;

	/* stop device's busmaster DMA activity */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);

	ret = iwl_poll_bit(trans, CSR_RESET,
			CSR_RESET_REG_FLAG_MASTER_DISABLED,
			CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
	if (ret)
		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");

	IWL_DEBUG_INFO(trans, "stop master\n");

	return ret;
}

static void iwl_apm_stop(struct iwl_trans *trans)
{
	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");

	clear_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status);

	/* Stop device's DMA activity */
	iwl_apm_stop_master(trans);

	/* Reset the entire device */
	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);

	udelay(10);

	/*
	 * Clear "initialization complete" bit to move adapter from
	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
	 */
	iwl_clear_bit(trans, CSR_GP_CNTRL,
		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}

803
static int iwl_nic_init(struct iwl_trans *trans)
804 805 806 807
{
	unsigned long flags;

	/* nic_init */
808
	spin_lock_irqsave(&trans->shrd->lock, flags);
809
	iwl_apm_init(trans);
810 811

	/* Set interrupt coalescing calibration timer to default (512 usecs) */
812
	iwl_write8(trans, CSR_INT_COALESCING,
813
		IWL_HOST_INT_CALIB_TIMEOUT_DEF);
814

815
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
816

817
	iwl_set_pwr_vmain(trans);
818

819
	iwl_nic_config(priv(trans));
820

821
#ifndef CONFIG_IWLWIFI_IDI
822
	/* Allocate the RX queue, or reset if it is already allocated */
823
	iwl_rx_init(trans);
824
#endif
825 826

	/* Allocate or reset and init all Tx and Command queues */
827
	if (iwl_tx_init(trans))
828 829
		return -ENOMEM;

830
	if (hw_params(trans).shadow_reg_enable) {
831
		/* enable shadow regs in HW */
832
		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
833 834 835
			0x800FFFFF);
	}

836
	set_bit(STATUS_INIT, &trans->shrd->status);
837 838 839 840 841 842 843

	return 0;
}

#define HW_READY_TIMEOUT (50)

/* Note: returns poll_bit return value, which is >= 0 if success */
844
static int iwl_set_hw_ready(struct iwl_trans *trans)
845 846 847
{
	int ret;

848
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
849 850 851
		CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);

	/* See if we got it */
852
	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
853 854 855 856
				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
				HW_READY_TIMEOUT);

857
	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
858 859 860 861
	return ret;
}

/* Note: returns standard 0/-ERROR code */
862
static int iwl_prepare_card_hw(struct iwl_trans *trans)
863 864 865
{
	int ret;

866
	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
867

868
	ret = iwl_set_hw_ready(trans);
869
	/* If the card is ready, exit 0 */
870 871 872 873
	if (ret >= 0)
		return 0;

	/* If HW is not ready, prepare the conditions to check again */
874
	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
875 876
			CSR_HW_IF_CONFIG_REG_PREPARE);

877
	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
878 879 880 881 882 883 884
			~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
			CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);

	if (ret < 0)
		return ret;

	/* HW should be ready by now, check again. */
885
	ret = iwl_set_hw_ready(trans);
886 887 888 889 890
	if (ret >= 0)
		return 0;
	return ret;
}

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
#define IWL_AC_UNSET -1

struct queue_to_fifo_ac {
	s8 fifo, ac;
};

static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
};

static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
	{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
	{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_BE_IPAN, 2, },
	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
};

static const u8 iwlagn_bss_ac_to_fifo[] = {
	IWL_TX_FIFO_VO,
	IWL_TX_FIFO_VI,
	IWL_TX_FIFO_BE,
	IWL_TX_FIFO_BK,
};
static const u8 iwlagn_bss_ac_to_queue[] = {
	0, 1, 2, 3,
};
static const u8 iwlagn_pan_ac_to_fifo[] = {
	IWL_TX_FIFO_VO_IPAN,
	IWL_TX_FIFO_VI_IPAN,
	IWL_TX_FIFO_BE_IPAN,
	IWL_TX_FIFO_BK_IPAN,
};
static const u8 iwlagn_pan_ac_to_queue[] = {
	7, 6, 5, 4,
};

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
/*
 * ucode
 */
static int iwl_load_section(struct iwl_trans *trans, const char *name,
				struct fw_desc *image, u32 dst_addr)
{
	dma_addr_t phy_addr = image->p_addr;
	u32 byte_cnt = image->len;
	int ret;

	trans->ucode_write_complete = 0;

	iwl_write_direct32(trans,
		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);

	iwl_write_direct32(trans,
		FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);

	iwl_write_direct32(trans,
		FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
		phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);

	iwl_write_direct32(trans,
		FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
		(iwl_get_dma_hi_addr(phy_addr)
			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);

	iwl_write_direct32(trans,
		FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
		FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);

	iwl_write_direct32(trans,
		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
		FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);

	IWL_DEBUG_FW(trans, "%s uCode section being loaded...\n", name);
	ret = wait_event_timeout(trans->shrd->wait_command_queue,
				 trans->ucode_write_complete, 5 * HZ);
	if (!ret) {
		IWL_ERR(trans, "Could not load the %s uCode section\n",
			name);
		return -ETIMEDOUT;
	}

	return 0;
}

static int iwl_load_given_ucode(struct iwl_trans *trans, struct fw_img *image)
{
	int ret = 0;

	ret = iwl_load_section(trans, "INST", &image->code,
				   IWLAGN_RTC_INST_LOWER_BOUND);
	if (ret)
		return ret;

	ret = iwl_load_section(trans, "DATA", &image->data,
				    IWLAGN_RTC_DATA_LOWER_BOUND);
	if (ret)
		return ret;

	/* Remove all resets to allow NIC to operate */
	iwl_write32(trans, CSR_RESET, 0);

	return 0;
}

static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, struct fw_img *fw)
1017 1018
{
	int ret;
1019 1020
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
1021

1022
	trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
1023 1024 1025 1026 1027 1028 1029 1030
	trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
	trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;

	trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
	trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;

	trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
	trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
1031

1032
	if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
1033
	     iwl_prepare_card_hw(trans)) {
1034
		IWL_WARN(trans, "Exit HW not ready\n");
1035 1036 1037 1038
		return -EIO;
	}

	/* If platform's RF_KILL switch is NOT set to KILL */
1039
	if (iwl_read32(trans, CSR_GP_CNTRL) &
1040
			CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
1041
		clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1042
	else
1043
		set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1044

1045
	if (iwl_is_rfkill(trans->shrd)) {
1046
		iwl_set_hw_rfkill_state(priv(trans), true);
1047
		iwl_enable_interrupts(trans);
1048 1049 1050
		return -ERFKILL;
	}

1051
	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1052

1053
	ret = iwl_nic_init(trans);
1054
	if (ret) {
1055
		IWL_ERR(trans, "Unable to init nic\n");
1056 1057 1058 1059
		return ret;
	}

	/* make sure rfkill handshake bits are cleared */
1060 1061
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1062 1063 1064
		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

	/* clear (again), then enable host interrupts */
1065
	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1066
	iwl_enable_interrupts(trans);
1067 1068

	/* really make sure rfkill handshake bits are cleared */
1069 1070
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1071

1072 1073 1074
	/* Load the given image to the HW */
	iwl_load_given_ucode(trans, fw);

1075 1076 1077
	return 0;
}

1078 1079
/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1080
 * must be called under priv->shrd->lock and mac access
1081
 */
1082
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1083
{
1084
	iwl_write_prph(trans, SCD_TXFACT, mask);
1085 1086
}

1087
static void iwl_tx_start(struct iwl_trans *trans)
1088 1089
{
	const struct queue_to_fifo_ac *queue_to_fifo;
1090 1091
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
1092 1093 1094 1095 1096
	u32 a;
	unsigned long flags;
	int i, chan;
	u32 reg_val;

1097
	spin_lock_irqsave(&trans->shrd->lock, flags);
1098

1099
	trans_pcie->scd_base_addr =
1100
		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
1101
	a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
1102
	/* reset conext data memory */
1103
	for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
1104
		a += 4)
1105
		iwl_write_targ_mem(trans, a, 0);
1106
	/* reset tx status memory */
1107
	for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
1108
		a += 4)
1109
		iwl_write_targ_mem(trans, a, 0);
1110
	for (; a < trans_pcie->scd_base_addr +
1111
	       SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
1112
	       a += 4)
1113
		iwl_write_targ_mem(trans, a, 0);
1114

1115
	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
1116
		       trans_pcie->scd_bc_tbls.dma >> 10);
1117 1118 1119

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1120
		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1121 1122 1123 1124
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
1125 1126
	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
1127 1128
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

1129
	iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
1130
		SCD_QUEUECHAIN_SEL_ALL(trans));
1131
	iwl_write_prph(trans, SCD_AGGR_SEL, 0);
1132 1133

	/* initiate the queues */
1134
	for (i = 0; i < hw_params(trans).max_txq_num; i++) {
1135 1136 1137
		iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
		iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
		iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1138
				SCD_CONTEXT_QUEUE_OFFSET(i), 0);
1139
		iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
				SCD_CONTEXT_QUEUE_OFFSET(i) +
				sizeof(u32),
				((SCD_WIN_SIZE <<
				SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
				((SCD_FRAME_LIMIT <<
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
	}

1150
	iwl_write_prph(trans, SCD_INTERRUPT_MASK,
1151
			IWL_MASK(0, hw_params(trans).max_txq_num));
1152 1153

	/* Activate all Tx DMA/FIFO channels */
1154
	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1155 1156

	/* map queues to FIFOs */
1157
	if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
1158 1159 1160 1161
		queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
	else
		queue_to_fifo = iwlagn_default_queue_to_tx_fifo;

1162
	iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
1163 1164

	/* make sure all queue are not stopped */
1165 1166
	memset(&trans_pcie->queue_stopped[0], 0,
		sizeof(trans_pcie->queue_stopped));
1167
	for (i = 0; i < 4; i++)
1168
		atomic_set(&trans_pcie->queue_stop_count[i], 0);
1169 1170

	/* reset to 0 to enable all the queue first */
1171
	trans_pcie->txq_ctx_active_msk = 0;
1172

1173
	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
1174
						IWLAGN_FIRST_AMPDU_QUEUE);
1175
	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
1176
						IWLAGN_FIRST_AMPDU_QUEUE);
1177

1178
	for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
1179 1180 1181
		int fifo = queue_to_fifo[i].fifo;
		int ac = queue_to_fifo[i].ac;

1182
		iwl_txq_ctx_activate(trans_pcie, i);
1183 1184 1185 1186 1187

		if (fifo == IWL_TX_FIFO_UNUSED)
			continue;

		if (ac != IWL_AC_UNSET)
1188 1189 1190
			iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
		iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
					      fifo, 0);
1191 1192
	}

1193
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
1194 1195

	/* Enable L1-Active */
1196
	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
1197 1198 1199
			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}

1200 1201 1202 1203 1204 1205
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
{
	iwl_reset_ict(trans);
	iwl_tx_start(trans);
}

1206 1207 1208
/**
 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
 */
1209
static int iwl_trans_tx_stop(struct iwl_trans *trans)
1210 1211 1212
{
	int ch, txq_id;
	unsigned long flags;
1213
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1214 1215

	/* Turn off all Tx DMA fifos */
1216
	spin_lock_irqsave(&trans->shrd->lock, flags);
1217

1218
	iwl_trans_txq_set_sched(trans, 0);
1219 1220

	/* Stop each Tx DMA channel, and wait for it to be idle */
1221
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
1222
		iwl_write_direct32(trans,
1223
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
1224
		if (iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
1225 1226
				    FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
				    1000))
1227
			IWL_ERR(trans, "Failing on timeout while stopping"
1228
			    " DMA channel %d [0x%08x]", ch,
1229
			    iwl_read_direct32(trans,
1230
					      FH_TSSR_TX_STATUS_REG));
1231
	}
1232
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
1233

1234
	if (!trans_pcie->txq) {
1235
		IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
1236 1237 1238 1239
		return 0;
	}

	/* Unmap DMA from host system and free skb's */
1240 1241
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		iwl_tx_queue_unmap(trans, txq_id);
1242 1243 1244 1245

	return 0;
}

1246
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1247 1248
{
	unsigned long flags;
1249
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1250

1251
	/* tell the device to stop sending interrupts */
1252 1253 1254 1255
	spin_lock_irqsave(&trans->shrd->lock, flags);
	iwl_disable_interrupts(trans);
	spin_unlock_irqrestore(&trans->shrd->lock, flags);

1256
	/* device going down, Stop using ICT table */
1257
	iwl_disable_ict(trans);
1258 1259 1260 1261 1262 1263 1264 1265

	/*
	 * If a HW restart happens during firmware loading,
	 * then the firmware loading might call this function
	 * and later it might be called again due to the
	 * restart. So don't process again if the device is
	 * already dead.
	 */
1266 1267
	if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
		iwl_trans_tx_stop(trans);
1268
#ifndef CONFIG_IWLWIFI_IDI
1269
		iwl_trans_rx_stop(trans);
1270
#endif
1271
		/* Power-down device's busmaster DMA clocks */
1272
		iwl_write_prph(trans, APMG_CLK_DIS_REG,
1273 1274 1275 1276 1277
			       APMG_CLK_VAL_DMA_CLK_RQT);
		udelay(5);
	}

	/* Make sure (redundant) we've released our request to stay awake */
1278
	iwl_clear_bit(trans, CSR_GP_CNTRL,
1279
			CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1280 1281

	/* Stop the device, and put it in low power state */
1282
	iwl_apm_stop(trans);
1283 1284 1285 1286 1287 1288 1289 1290 1291

	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
	 * Clean again the interrupt here
	 */
	spin_lock_irqsave(&trans->shrd->lock, flags);
	iwl_disable_interrupts(trans);
	spin_unlock_irqrestore(&trans->shrd->lock, flags);

	/* wait to make sure we flush pending tasklet*/
1292
	synchronize_irq(trans->irq);
1293 1294 1295
	tasklet_kill(&trans_pcie->irq_tasklet);

	/* stop and reset the on-board processor */
1296
	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1297 1298
}

1299
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1300
		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
1301
		u8 sta_id, u8 tid)
1302
{
1303 1304 1305
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1306
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1307
	struct iwl_cmd_meta *out_meta;
1308 1309
	struct iwl_tx_queue *txq;
	struct iwl_queue *q;
1310 1311 1312 1313 1314 1315

	dma_addr_t phys_addr = 0;
	dma_addr_t txcmd_phys;
	dma_addr_t scratch_phys;
	u16 len, firstlen, secondlen;
	u8 wait_write_ptr = 0;
1316 1317 1318
	u8 txq_id;
	bool is_agg = false;
	__le16 fc = hdr->frame_control;
1319
	u8 hdr_len = ieee80211_hdrlen(fc);
1320
	u16 __maybe_unused wifi_seq;
1321

1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
	/*
	 * Send this frame after DTIM -- there's a special queue
	 * reserved for this for contexts that support AP mode.
	 */
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		txq_id = trans_pcie->mcast_queue[ctx];

		/*
		 * The microcode will clear the more data
		 * bit in the last frame it transmits.
		 */
		hdr->frame_control |=
			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
		txq_id = IWL_AUX_QUEUE;
	else
		txq_id =
		    trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];

1341 1342 1343 1344 1345
	/* aggregation is on for this <sta,tid> */
	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
		WARN_ON(tid >= IWL_MAX_TID_COUNT);
		txq_id = trans_pcie->agg_txq[sta_id][tid];
		is_agg = true;
1346 1347
	}

1348
	txq = &trans_pcie->txq[txq_id];
1349 1350
	q = &txq->q;

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
	/* In AGG mode, the index in the ring must correspond to the WiFi
	 * sequence number. This is a HW requirements to help the SCD to parse
	 * the BA.
	 * Check here that the packets are in the right place on the ring.
	 */
#ifdef CONFIG_IWLWIFI_DEBUG
	wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
	WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
		  "Q: %d WiFi Seq %d tfdNum %d",
		  txq_id, wifi_seq, q->write_ptr);
#endif

1363
	/* Set up driver data for this TFD */
1364
	txq->skbs[q->write_ptr] = skb;
1365 1366 1367 1368 1369
	txq->cmd[q->write_ptr] = dev_cmd;

	dev_cmd->hdr.cmd = REPLY_TX;
	dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
				INDEX_TO_SEQ(q->write_ptr)));
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392

	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->meta[q->write_ptr];

	/*
	 * Use the first empty entry in this queue's command buffer array
	 * to contain the Tx command and MAC header concatenated together
	 * (payload data will be in another buffer).
	 * Size of this varies, due to varying MAC header length.
	 * If end is not dword aligned, we'll have 2 extra bytes at the end
	 * of the MAC header (device reads on dword boundaries).
	 * We'll tell device about this padding later.
	 */
	len = sizeof(struct iwl_tx_cmd) +
		sizeof(struct iwl_cmd_header) + hdr_len;
	firstlen = (len + 3) & ~3;

	/* Tell NIC about any 2-byte padding after MAC header */
	if (firstlen != len)
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

	/* Physical address of this Tx command's header (not MAC header!),
	 * within command buffer array. */
1393
	txcmd_phys = dma_map_single(trans->dev,
1394 1395
				    &dev_cmd->hdr, firstlen,
				    DMA_BIDIRECTIONAL);
1396
	if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
		return -1;
	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
	dma_unmap_len_set(out_meta, len, firstlen);

	if (!ieee80211_has_morefrags(fc)) {
		txq->need_update = 1;
	} else {
		wait_write_ptr = 1;
		txq->need_update = 0;
	}

	/* Set up TFD's 2nd entry to point directly to remainder of skb,
	 * if any (802.11 null frames have no payload). */
	secondlen = skb->len - hdr_len;
	if (secondlen > 0) {
1412
		phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1413
					   secondlen, DMA_TO_DEVICE);
1414 1415
		if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
			dma_unmap_single(trans->dev,
1416 1417 1418 1419 1420 1421 1422 1423
					 dma_unmap_addr(out_meta, mapping),
					 dma_unmap_len(out_meta, len),
					 DMA_BIDIRECTIONAL);
			return -1;
		}
	}

	/* Attach buffers to TFD */
1424
	iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
1425
	if (secondlen > 0)
1426
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
1427 1428 1429 1430 1431 1432
					     secondlen, 0);

	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
				offsetof(struct iwl_tx_cmd, scratch);

	/* take back ownership of DMA buffer to enable update */
1433
	dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1434 1435 1436 1437
			DMA_BIDIRECTIONAL);
	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1438
	IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1439
		     le16_to_cpu(dev_cmd->hdr.sequence));
1440 1441 1442
	IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
	iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
	iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1443 1444

	/* Set up entry for this TFD in Tx byte-count array */
1445
	iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1446

1447
	dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1448 1449
			DMA_BIDIRECTIONAL);

1450
	trace_iwlwifi_dev_tx(priv(trans),
1451 1452 1453 1454 1455 1456 1457
			     &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
			     &dev_cmd->hdr, firstlen,
			     skb->data + hdr_len, secondlen);

	/* Tell device the write index *just past* this latest filled TFD */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1458 1459
	iwl_txq_update_write_ptr(trans, txq);

1460 1461 1462 1463 1464 1465
	/*
	 * At this point the frame is "transmitted" successfully
	 * and we will get a TX status notification eventually,
	 * regardless of the value of ret. "ret" only indicates
	 * whether or not we should update the write pointer.
	 */
1466
	if (iwl_queue_space(q) < q->high_mark) {
1467 1468
		if (wait_write_ptr) {
			txq->need_update = 1;
1469
			iwl_txq_update_write_ptr(trans, txq);
1470
		} else {
1471
			iwl_stop_queue(trans, txq, "Queue is full");
1472 1473 1474 1475 1476
		}
	}
	return 0;
}

1477
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1478
{
1479 1480
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
1481 1482
	int err;

1483 1484
	trans_pcie->inta_mask = CSR_INI_SET_MASK;

1485 1486 1487
	if (!trans_pcie->irq_requested) {
		tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
			iwl_irq_tasklet, (unsigned long)trans);
1488

1489
		iwl_alloc_isr_ict(trans);
1490

1491 1492 1493 1494 1495
		err = request_irq(trans->irq, iwl_isr_ict, IRQF_SHARED,
			DRV_NAME, trans);
		if (err) {
			IWL_ERR(trans, "Error allocating IRQ %d\n",
				trans->irq);
1496
			goto error;
1497 1498 1499 1500
		}

		INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
		trans_pcie->irq_requested = true;
1501 1502
	}

1503 1504 1505 1506 1507
	err = iwl_prepare_card_hw(trans);
	if (err) {
		IWL_ERR(trans, "Error while preparing HW: %d", err);
		goto error;
	}
1508 1509 1510

	iwl_apm_init(trans);

1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
	/* If platform's RF_KILL switch is NOT set to KILL */
	if (iwl_read32(trans,
			CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
		clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
	else
		set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);

	iwl_set_hw_rfkill_state(priv(trans),
				test_bit(STATUS_RF_KILL_HW,
					 &trans->shrd->status));

1522 1523 1524 1525 1526 1527
	return err;

error:
	iwl_free_isr_ict(trans);
	tasklet_kill(&trans_pcie->irq_tasklet);
	return err;
1528 1529
}

1530 1531 1532 1533 1534 1535 1536 1537 1538
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
{
	iwl_apm_stop(trans);

	/* Even if we stop the HW, we still want the RF kill interrupt */
	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
	iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
}

1539
static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1540 1541 1542
		      int txq_id, int ssn, u32 status,
		      struct sk_buff_head *skbs)
{
1543 1544
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1545 1546
	/* n_bd is usually 256 => n_bd - 1 = 0xff */
	int tfd_num = ssn & (txq->q.n_bd - 1);
1547
	int freed = 0;
1548

1549 1550
	txq->time_stamp = jiffies;

1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
		     txq_id != trans_pcie->agg_txq[sta_id][tid])) {
		/*
		 * FIXME: this is a uCode bug which need to be addressed,
		 * log the information and return for now.
		 * Since it is can possibly happen very often and in order
		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
		 */
		IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
			"agg_txq[sta_id[tid] %d", txq_id,
			trans_pcie->agg_txq[sta_id][tid]);
		return 1;
1563 1564 1565
	}

	if (txq->q.read_ptr != tfd_num) {
1566 1567 1568
		IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
				txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
				tfd_num, ssn);
1569
		freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1570 1571 1572
		if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
		   (!txq->sched_retry ||
		   status != TX_STATUS_FAIL_PASSIVE_NO_RX))
1573
			iwl_wake_queue(trans, txq, "Packets reclaimed");
1574
	}
1575
	return 0;
1576 1577
}

1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
	iowrite8(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}

static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
	iowrite32(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}

static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
	u32 val = ioread32(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
	return val;
}

1594
static void iwl_trans_pcie_free(struct iwl_trans *trans)
1595
{
1596 1597 1598
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

1599
	iwl_calib_free_results(trans);
1600
	iwl_trans_pcie_tx_free(trans);
1601
#ifndef CONFIG_IWLWIFI_IDI
1602
	iwl_trans_pcie_rx_free(trans);
1603
#endif
1604 1605 1606 1607
	if (trans_pcie->irq_requested == true) {
		free_irq(trans->irq, trans);
		iwl_free_isr_ict(trans);
	}
1608 1609 1610 1611 1612 1613

	pci_disable_msi(trans_pcie->pci_dev);
	pci_iounmap(trans_pcie->pci_dev, trans_pcie->hw_base);
	pci_release_regions(trans_pcie->pci_dev);
	pci_disable_device(trans_pcie->pci_dev);

1614 1615
	trans->shrd->trans = NULL;
	kfree(trans);
1616 1617
}

J
Johannes Berg 已提交
1618
#ifdef CONFIG_PM_SLEEP
1619 1620 1621 1622
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
	/*
	 * This function is called when system goes into suspend state
1623 1624 1625
	 * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend
	 * function first but since iwlagn_mac_stop() has no knowledge of
	 * who the caller is,
1626 1627 1628 1629 1630 1631
	 * it will not call apm_ops.stop() to stop the DMA operation.
	 * Calling apm_ops.stop here to make sure we stop the DMA.
	 *
	 * But of course ... if we have configured WoWLAN then we did other
	 * things already :-)
	 */
1632
	if (!trans->shrd->wowlan) {
1633
		iwl_apm_stop(trans);
1634 1635
	} else {
		iwl_disable_interrupts(trans);
1636
		iwl_clear_bit(trans, CSR_GP_CNTRL,
1637 1638
			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
	}
1639 1640 1641 1642 1643 1644 1645 1646

	return 0;
}

static int iwl_trans_pcie_resume(struct iwl_trans *trans)
{
	bool hw_rfkill = false;

1647
	iwl_enable_interrupts(trans);
1648

1649
	if (!(iwl_read32(trans, CSR_GP_CNTRL) &
1650 1651 1652 1653 1654 1655 1656 1657
				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
		hw_rfkill = true;

	if (hw_rfkill)
		set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
	else
		clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);

1658
	iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
1659 1660 1661

	return 0;
}
J
Johannes Berg 已提交
1662
#endif /* CONFIG_PM_SLEEP */
1663

1664
static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
1665 1666
					  enum iwl_rxon_context_id ctx,
					  const char *msg)
1667 1668 1669 1670 1671 1672 1673
{
	u8 ac, txq_id;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	for (ac = 0; ac < AC_NUM; ac++) {
		txq_id = trans_pcie->ac_to_queue[ctx][ac];
1674
		IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
1675
			ac,
1676
			(atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
1677
			      ? "stopped" : "awake");
1678
		iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
1679 1680 1681
	}
}

1682 1683
static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
				      const char *msg)
1684
{
1685 1686
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

1687
	iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
1688 1689
}

1690 1691 1692 1693
#define IWL_FLUSH_WAIT_MS	2000

static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
{
1694
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
	struct iwl_tx_queue *txq;
	struct iwl_queue *q;
	int cnt;
	unsigned long now = jiffies;
	int ret = 0;

	/* waiting for all the tx frames complete might take a while */
	for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
		if (cnt == trans->shrd->cmd_queue)
			continue;
1705
		txq = &trans_pcie->txq[cnt];
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
		q = &txq->q;
		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
			msleep(1);

		if (q->read_ptr != q->write_ptr) {
			IWL_ERR(trans, "fail to flush all tx fifo queues\n");
			ret = -ETIMEDOUT;
			break;
		}
	}
	return ret;
}

1720 1721 1722 1723 1724 1725
/*
 * On every watchdog tick we check (latest) time stamp. If it does not
 * change during timeout period and queue is not empty we reset firmware.
 */
static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
{
1726 1727
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
	struct iwl_queue *q = &txq->q;
	unsigned long timeout;

	if (q->read_ptr == q->write_ptr) {
		txq->time_stamp = jiffies;
		return 0;
	}

	timeout = txq->time_stamp +
		  msecs_to_jiffies(hw_params(trans).wd_timeout);

	if (time_after(jiffies, timeout)) {
		IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
			hw_params(trans).wd_timeout);
1742
		IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1743
			q->read_ptr, q->write_ptr);
1744
		IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
1745
			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt))
1746
				& (TFD_QUEUE_SIZE_MAX - 1),
1747
			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1748 1749 1750 1751 1752 1753
		return 1;
	}

	return 0;
}

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
static const char *get_fh_string(int cmd)
{
	switch (cmd) {
	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
	IWL_CMD(FH_TSSR_TX_STATUS_REG);
	IWL_CMD(FH_TSSR_TX_ERROR_REG);
	default:
		return "UNKNOWN";
	}
}

int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
{
	int i;
#ifdef CONFIG_IWLWIFI_DEBUG
	int pos = 0;
	size_t bufsz = 0;
#endif
	static const u32 fh_tbl[] = {
		FH_RSCSR_CHNL0_STTS_WPTR_REG,
		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
		FH_RSCSR_CHNL0_WPTR,
		FH_MEM_RCSR_CHNL0_CONFIG_REG,
		FH_MEM_RSSR_SHARED_CTRL_REG,
		FH_MEM_RSSR_RX_STATUS_REG,
		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
		FH_TSSR_TX_STATUS_REG,
		FH_TSSR_TX_ERROR_REG
	};
#ifdef CONFIG_IWLWIFI_DEBUG
	if (display) {
		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
		*buf = kmalloc(bufsz, GFP_KERNEL);
		if (!*buf)
			return -ENOMEM;
		pos += scnprintf(*buf + pos, bufsz - pos,
				"FH register values:\n");
		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
			pos += scnprintf(*buf + pos, bufsz - pos,
				"  %34s: 0X%08x\n",
				get_fh_string(fh_tbl[i]),
1801
				iwl_read_direct32(trans, fh_tbl[i]));
1802 1803 1804 1805 1806 1807 1808 1809
		}
		return pos;
	}
#endif
	IWL_ERR(trans, "FH register values:\n");
	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
		IWL_ERR(trans, "  %34s: 0X%08x\n",
			get_fh_string(fh_tbl[i]),
1810
			iwl_read_direct32(trans, fh_tbl[i]));
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
	}
	return 0;
}

static const char *get_csr_string(int cmd)
{
	switch (cmd) {
	IWL_CMD(CSR_HW_IF_CONFIG_REG);
	IWL_CMD(CSR_INT_COALESCING);
	IWL_CMD(CSR_INT);
	IWL_CMD(CSR_INT_MASK);
	IWL_CMD(CSR_FH_INT_STATUS);
	IWL_CMD(CSR_GPIO_IN);
	IWL_CMD(CSR_RESET);
	IWL_CMD(CSR_GP_CNTRL);
	IWL_CMD(CSR_HW_REV);
	IWL_CMD(CSR_EEPROM_REG);
	IWL_CMD(CSR_EEPROM_GP);
	IWL_CMD(CSR_OTP_GP_REG);
	IWL_CMD(CSR_GIO_REG);
	IWL_CMD(CSR_GP_UCODE_REG);
	IWL_CMD(CSR_GP_DRIVER_REG);
	IWL_CMD(CSR_UCODE_DRV_GP1);
	IWL_CMD(CSR_UCODE_DRV_GP2);
	IWL_CMD(CSR_LED_REG);
	IWL_CMD(CSR_DRAM_INT_TBL_REG);
	IWL_CMD(CSR_GIO_CHICKEN_BITS);
	IWL_CMD(CSR_ANA_PLL_CFG);
	IWL_CMD(CSR_HW_REV_WA_REG);
	IWL_CMD(CSR_DBG_HPET_MEM_REG);
	default:
		return "UNKNOWN";
	}
}

void iwl_dump_csr(struct iwl_trans *trans)
{
	int i;
	static const u32 csr_tbl[] = {
		CSR_HW_IF_CONFIG_REG,
		CSR_INT_COALESCING,
		CSR_INT,
		CSR_INT_MASK,
		CSR_FH_INT_STATUS,
		CSR_GPIO_IN,
		CSR_RESET,
		CSR_GP_CNTRL,
		CSR_HW_REV,
		CSR_EEPROM_REG,
		CSR_EEPROM_GP,
		CSR_OTP_GP_REG,
		CSR_GIO_REG,
		CSR_GP_UCODE_REG,
		CSR_GP_DRIVER_REG,
		CSR_UCODE_DRV_GP1,
		CSR_UCODE_DRV_GP2,
		CSR_LED_REG,
		CSR_DRAM_INT_TBL_REG,
		CSR_GIO_CHICKEN_BITS,
		CSR_ANA_PLL_CFG,
		CSR_HW_REV_WA_REG,
		CSR_DBG_HPET_MEM_REG
	};
	IWL_ERR(trans, "CSR values:\n");
	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
		"CSR_INT_PERIODIC_REG)\n");
	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
		IWL_ERR(trans, "  %25s: 0X%08x\n",
			get_csr_string(csr_tbl[i]),
1880
			iwl_read32(trans, csr_tbl[i]));
1881 1882 1883
	}
}

1884 1885 1886
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
1887
	if (!debugfs_create_file(#name, mode, parent, trans,		\
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
				 &iwl_dbgfs_##name##_ops))		\
		return -ENOMEM;						\
} while (0)

/* file operation */
#define DEBUGFS_READ_FUNC(name)                                         \
static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
					char __user *user_buf,          \
					size_t count, loff_t *ppos);

#define DEBUGFS_WRITE_FUNC(name)                                        \
static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
					const char __user *user_buf,    \
					size_t count, loff_t *ppos);


static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
{
	file->private_data = inode->i_private;
	return 0;
}

#define DEBUGFS_READ_FILE_OPS(name)					\
	DEBUGFS_READ_FUNC(name);					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.read = iwl_dbgfs_##name##_read,				\
	.open = iwl_dbgfs_open_file_generic,				\
	.llseek = generic_file_llseek,					\
};

1918 1919 1920 1921 1922 1923 1924 1925
#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
	DEBUGFS_WRITE_FUNC(name);                                       \
static const struct file_operations iwl_dbgfs_##name##_ops = {          \
	.write = iwl_dbgfs_##name##_write,                              \
	.open = iwl_dbgfs_open_file_generic,				\
	.llseek = generic_file_llseek,					\
};

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
	DEBUGFS_READ_FUNC(name);					\
	DEBUGFS_WRITE_FUNC(name);					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.write = iwl_dbgfs_##name##_write,				\
	.read = iwl_dbgfs_##name##_read,				\
	.open = iwl_dbgfs_open_file_generic,				\
	.llseek = generic_file_llseek,					\
};

static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
						char __user *user_buf,
1938 1939
						size_t count, loff_t *ppos)
{
1940
	struct iwl_trans *trans = file->private_data;
1941
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1942 1943 1944 1945 1946 1947
	struct iwl_tx_queue *txq;
	struct iwl_queue *q;
	char *buf;
	int pos = 0;
	int cnt;
	int ret;
1948
	const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
1949

1950
	if (!trans_pcie->txq) {
1951
		IWL_ERR(trans, "txq not ready\n");
1952 1953 1954 1955 1956 1957
		return -EAGAIN;
	}
	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

1958
	for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1959
		txq = &trans_pcie->txq[cnt];
1960 1961 1962 1963 1964
		q = &txq->q;
		pos += scnprintf(buf + pos, bufsz - pos,
				"hwq %.2d: read=%u write=%u stop=%d"
				" swq_id=%#.2x (ac %d/hwq %d)\n",
				cnt, q->read_ptr, q->write_ptr,
1965
				!!test_bit(cnt, trans_pcie->queue_stopped),
1966 1967 1968 1969 1970 1971
				txq->swq_id, txq->swq_id & 3,
				(txq->swq_id >> 2) & 0x1f);
		if (cnt >= 4)
			continue;
		/* for the ACs, display the stop count too */
		pos += scnprintf(buf + pos, bufsz - pos,
1972 1973
			"        stop-count: %d\n",
			atomic_read(&trans_pcie->queue_stop_count[cnt]));
1974 1975 1976 1977 1978 1979 1980 1981 1982
	}
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
						char __user *user_buf,
						size_t count, loff_t *ppos) {
1983 1984 1985 1986
	struct iwl_trans *trans = file->private_data;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
	char buf[256];
	int pos = 0;
	const size_t bufsz = sizeof(buf);

	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
						rxq->read);
	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
						rxq->write);
	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
						rxq->free_count);
	if (rxq->rb_stts) {
		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
	} else {
		pos += scnprintf(buf + pos, bufsz - pos,
					"closed_rb_num: Not Allocated\n");
	}
	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}

2007 2008 2009 2010 2011 2012 2013 2014 2015
static ssize_t iwl_dbgfs_log_event_read(struct file *file,
					 char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	char *buf;
	int pos = 0;
	ssize_t ret = -ENOMEM;

2016
	ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039
	if (buf) {
		ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
		kfree(buf);
	}
	return ret;
}

static ssize_t iwl_dbgfs_log_event_write(struct file *file,
					const char __user *user_buf,
					size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	u32 event_log_flag;
	char buf[8];
	int buf_size;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%d", &event_log_flag) != 1)
		return -EFAULT;
	if (event_log_flag == 1)
2040
		iwl_dump_nic_event_log(trans, true, NULL, false);
2041 2042 2043 2044

	return count;
}

2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
					char __user *user_buf,
					size_t count, loff_t *ppos) {

	struct iwl_trans *trans = file->private_data;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	int pos = 0;
	char *buf;
	int bufsz = 24 * 64; /* 24 items * 64 char per item */
	ssize_t ret;

	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf) {
		IWL_ERR(trans, "Can not allocate Buffer\n");
		return -ENOMEM;
	}

	pos += scnprintf(buf + pos, bufsz - pos,
			"Interrupt Statistics Report:\n");

	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
		isr_stats->hw);
	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
		isr_stats->sw);
	if (isr_stats->sw || isr_stats->hw) {
		pos += scnprintf(buf + pos, bufsz - pos,
			"\tLast Restarting Code:  0x%X\n",
			isr_stats->err_code);
	}
#ifdef CONFIG_IWLWIFI_DEBUG
	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
		isr_stats->sch);
	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
		isr_stats->alive);
#endif
	pos += scnprintf(buf + pos, bufsz - pos,
		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);

	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
		isr_stats->ctkill);

	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
		isr_stats->wakeup);

	pos += scnprintf(buf + pos, bufsz - pos,
		"Rx command responses:\t\t %u\n", isr_stats->rx);

	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
		isr_stats->tx);

	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
		isr_stats->unhandled);

	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
					 const char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	char buf[8];
	int buf_size;
	u32 reset_flag;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%x", &reset_flag) != 1)
		return -EFAULT;
	if (reset_flag == 0)
		memset(isr_stats, 0, sizeof(*isr_stats));

	return count;
}

2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
static ssize_t iwl_dbgfs_csr_write(struct file *file,
					 const char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	char buf[8];
	int buf_size;
	int csr;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%d", &csr) != 1)
		return -EFAULT;

	iwl_dump_csr(trans);

	return count;
}

static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
					 char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	char *buf;
	int pos = 0;
	ssize_t ret = -EFAULT;

	ret = pos = iwl_dump_fh(trans, &buf, true);
	if (buf) {
		ret = simple_read_from_buffer(user_buf,
					      count, ppos, buf, pos);
		kfree(buf);
	}

	return ret;
}

2171
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2172
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2173
DEBUGFS_READ_FILE_OPS(fh_reg);
2174 2175
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
2176
DEBUGFS_WRITE_FILE_OPS(csr);
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186

/*
 * Create the debugfs files and directories
 *
 */
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
					struct dentry *dir)
{
	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2187
	DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
2188
	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2189 2190
	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2191 2192 2193 2194 2195 2196 2197 2198 2199
	return 0;
}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
					struct dentry *dir)
{ return 0; }

#endif /*CONFIG_IWLWIFI_DEBUGFS */

2200
const struct iwl_trans_ops trans_ops_pcie = {
2201
	.start_hw = iwl_trans_pcie_start_hw,
2202
	.stop_hw = iwl_trans_pcie_stop_hw,
2203
	.fw_alive = iwl_trans_pcie_fw_alive,
2204
	.start_fw = iwl_trans_pcie_start_fw,
2205
	.stop_device = iwl_trans_pcie_stop_device,
2206

2207
	.wake_any_queue = iwl_trans_pcie_wake_any_queue,
2208

2209
	.send_cmd = iwl_trans_pcie_send_cmd,
2210

2211
	.tx = iwl_trans_pcie_tx,
2212
	.reclaim = iwl_trans_pcie_reclaim,
2213

2214
	.tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
2215
	.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
2216
	.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
2217

2218
	.free = iwl_trans_pcie_free,
2219
	.stop_queue = iwl_trans_pcie_stop_queue,
2220 2221

	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
2222 2223

	.wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2224
	.check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
2225

J
Johannes Berg 已提交
2226
#ifdef CONFIG_PM_SLEEP
2227 2228
	.suspend = iwl_trans_pcie_suspend,
	.resume = iwl_trans_pcie_resume,
J
Johannes Berg 已提交
2229
#endif
2230 2231 2232
	.write8 = iwl_trans_pcie_write8,
	.write32 = iwl_trans_pcie_write32,
	.read32 = iwl_trans_pcie_read32,
2233
};
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338

struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
				       struct pci_dev *pdev,
				       const struct pci_device_id *ent)
{
	struct iwl_trans_pcie *trans_pcie;
	struct iwl_trans *trans;
	u16 pci_cmd;
	int err;

	trans = kzalloc(sizeof(struct iwl_trans) +
			     sizeof(struct iwl_trans_pcie), GFP_KERNEL);

	if (WARN_ON(!trans))
		return NULL;

	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	trans->ops = &trans_ops_pcie;
	trans->shrd = shrd;
	trans_pcie->trans = trans;
	spin_lock_init(&trans->hcmd_lock);

	/* W/A - seems to solve weird behavior. We need to remove this if we
	 * don't want to stay in L1 all the time. This wastes a lot of power */
	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
				PCIE_LINK_STATE_CLKPM);

	if (pci_enable_device(pdev)) {
		err = -ENODEV;
		goto out_no_pci;
	}

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!err)
			err = pci_set_consistent_dma_mask(pdev,
							DMA_BIT_MASK(32));
		/* both attempts failed: */
		if (err) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "No suitable DMA available.\n");
			goto out_pci_disable_device;
		}
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
		goto out_pci_disable_device;
	}

	trans_pcie->hw_base = pci_iomap(pdev, 0, 0);
	if (!trans_pcie->hw_base) {
		dev_printk(KERN_ERR, &pdev->dev, "pci_iomap failed");
		err = -ENODEV;
		goto out_pci_release_regions;
	}

	dev_printk(KERN_INFO, &pdev->dev,
		"pci_resource_len = 0x%08llx\n",
		(unsigned long long) pci_resource_len(pdev, 0));
	dev_printk(KERN_INFO, &pdev->dev,
		"pci_resource_base = %p\n", trans_pcie->hw_base);

	dev_printk(KERN_INFO, &pdev->dev,
		"HW Revision ID = 0x%X\n", pdev->revision);

	/* We disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state */
	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);

	err = pci_enable_msi(pdev);
	if (err)
		dev_printk(KERN_ERR, &pdev->dev,
			"pci_enable_msi failed(0X%x)", err);

	trans->dev = &pdev->dev;
	trans->irq = pdev->irq;
	trans_pcie->pci_dev = pdev;

	/* TODO: Move this away, not needed if not MSI */
	/* enable rfkill interrupt: hw bug w/a */
	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
		pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
	}

	return trans;

out_pci_release_regions:
	pci_release_regions(pdev);
out_pci_disable_device:
	pci_disable_device(pdev);
out_no_pci:
	kfree(trans);
	return NULL;
}