iwl-trans-pcie.c 55.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
63
#include <linux/interrupt.h>
64
#include <linux/debugfs.h>
65 66
#include <linux/bitops.h>
#include <linux/gfp.h>
67

68
#include "iwl-trans.h"
69
#include "iwl-trans-pcie-int.h"
70 71
#include "iwl-csr.h"
#include "iwl-prph.h"
72
#include "iwl-shared.h"
73
#include "iwl-eeprom.h"
74
#include "iwl-agn-hw.h"
75

76
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
77
{
78 79 80 81
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
	struct device *dev = bus(trans)->dev;
82

83
	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
84 85 86 87 88 89 90

	spin_lock_init(&rxq->lock);

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
91 92
	rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				     &rxq->bd_dma, GFP_KERNEL);
93 94
	if (!rxq->bd)
		goto err_bd;
95
	memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
96 97 98 99 100 101 102 103 104 105 106

	/*Allocate the driver's pointer to receive buffer status */
	rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
					  &rxq->rb_stts_dma, GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err_rb_stts;
	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));

	return 0;

err_rb_stts:
107 108
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			rxq->bd, rxq->bd_dma);
109 110 111 112 113 114
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
}

115
static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
116
{
117 118 119
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
120
	int i;
121 122 123 124 125 126

	/* Fill the rx_used queue with _all_ of the Rx buffers */
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
		/* In the reset function, these buffers may have been allocated
		 * to an SKB, so we need to unmap and free potential storage */
		if (rxq->pool[i].page != NULL) {
127 128
			dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
				PAGE_SIZE << hw_params(trans).rx_page_order,
129
				DMA_FROM_DEVICE);
130 131
			__free_pages(rxq->pool[i].page,
				     hw_params(trans).rx_page_order);
132 133 134 135
			rxq->pool[i].page = NULL;
		}
		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
	}
136 137
}

138
static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
139 140 141 142
				 struct iwl_rx_queue *rxq)
{
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
143
	u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
144 145 146 147 148 149 150

	if (iwlagn_mod_params.amsdu_size_8K)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
151
	iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
152 153

	/* Reset driver's Rx queue write index */
154
	iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
155 156

	/* Tell device where to find RBD circular buffer in DRAM */
157
	iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
158 159 160
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
161
	iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
162 163 164 165 166 167 168 169 170 171
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
172
	iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
173 174 175 176 177 178 179 180 181
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
			   rb_size|
			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
182
	iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
183 184
}

185
static int iwl_rx_init(struct iwl_trans *trans)
186
{
187 188 189 190
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;

191 192 193 194
	int i, err;
	unsigned long flags;

	if (!rxq->bd) {
195
		err = iwl_trans_rx_alloc(trans);
196 197 198 199 200 201 202 203
		if (err)
			return err;
	}

	spin_lock_irqsave(&rxq->lock, flags);
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);

204
	iwl_trans_rxq_free_rx_bufs(trans);
205 206 207 208 209 210 211 212 213 214 215

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
	rxq->free_count = 0;
	spin_unlock_irqrestore(&rxq->lock, flags);

216
	iwlagn_rx_replenish(trans);
217

218
	iwl_trans_rx_hw_init(trans, rxq);
219

220
	spin_lock_irqsave(&trans->shrd->lock, flags);
221
	rxq->need_update = 1;
222 223
	iwl_rx_queue_update_write_ptr(trans, rxq);
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
224

225 226 227
	return 0;
}

228
static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
229
{
230 231 232 233
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;

234 235 236 237 238
	unsigned long flags;

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
239
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
240 241 242 243
		return;
	}

	spin_lock_irqsave(&rxq->lock, flags);
244
	iwl_trans_rxq_free_rx_bufs(trans);
245 246
	spin_unlock_irqrestore(&rxq->lock, flags);

247
	dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
248 249 250 251 252
			  rxq->bd, rxq->bd_dma);
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;

	if (rxq->rb_stts)
253
		dma_free_coherent(bus(trans)->dev,
254 255 256
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
257
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
258 259 260 261
	memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
	rxq->rb_stts = NULL;
}

262
static int iwl_trans_rx_stop(struct iwl_trans *trans)
263 264 265
{

	/* stop Rx DMA */
266 267
	iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
268 269 270
			    FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

271
static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
272 273 274 275 276
				    struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

277
	ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
278 279 280 281 282 283 284
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

285
static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
286 287 288 289 290
				    struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

291
	dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
292 293 294
	memset(ptr, 0, sizeof(*ptr));
}

295 296 297
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
				struct iwl_tx_queue *txq, int slots_num,
				u32 txq_id)
298
{
299
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
300 301
	int i;

302
	if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
303 304
		return -EINVAL;

305 306
	txq->q.n_window = slots_num;

307 308
	txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
	txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
309 310 311 312

	if (!txq->meta || !txq->cmd)
		goto error;

313 314 315 316 317 318 319
	if (txq_id == trans->shrd->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
						GFP_KERNEL);
			if (!txq->cmd[i])
				goto error;
		}
320 321 322 323

	/* Alloc driver data array and TFD circular buffer */
	/* Driver private data, only for Tx (not command) queues,
	 * not shared with device. */
324
	if (txq_id != trans->shrd->cmd_queue) {
325 326
		txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
				    GFP_KERNEL);
327
		if (!txq->skbs) {
328
			IWL_ERR(trans, "kmalloc for auxiliary BD "
329 330 331 332
				  "structures failed\n");
			goto error;
		}
	} else {
333
		txq->skbs = NULL;
334 335 336 337
	}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
338 339
	txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
340
	if (!txq->tfds) {
341
		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
342 343 344 345 346 347
		goto error;
	}
	txq->q.id = txq_id;

	return 0;
error:
348 349
	kfree(txq->skbs);
	txq->skbs = NULL;
350 351
	/* since txq->cmd has been zeroed,
	 * all non allocated cmd[i] will be NULL */
352
	if (txq->cmd && txq_id == trans->shrd->cmd_queue)
353 354 355 356 357 358 359 360 361 362 363
		for (i = 0; i < slots_num; i++)
			kfree(txq->cmd[i]);
	kfree(txq->meta);
	kfree(txq->cmd);
	txq->meta = NULL;
	txq->cmd = NULL;

	return -ENOMEM;

}

364
static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
		      int slots_num, u32 txq_id)
{
	int ret;

	txq->need_update = 0;
	memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);

	/*
	 * For the default queues 0-3, set up the swq_id
	 * already -- all others need to get one later
	 * (if they need one at all).
	 */
	if (txq_id < 4)
		iwl_set_swq_id(txq, txq_id, txq_id);

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
385
	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
386 387 388 389 390 391 392 393
			txq_id);
	if (ret)
		return ret;

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
394
	iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
395 396 397 398 399
			     txq->q.dma_addr >> 8);

	return 0;
}

400 401 402
/**
 * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
 */
403
static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
404
{
405 406
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
407
	struct iwl_queue *q = &txq->q;
408
	enum dma_data_direction dma_dir;
409
	unsigned long flags;
410 411 412 413

	if (!q->n_bd)
		return;

414 415 416 417 418 419 420 421
	/* In the command queue, all the TBs are mapped as BIDI
	 * so unmap them as such.
	 */
	if (txq_id == trans->shrd->cmd_queue)
		dma_dir = DMA_BIDIRECTIONAL;
	else
		dma_dir = DMA_TO_DEVICE;

422
	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
423 424
	while (q->write_ptr != q->read_ptr) {
		/* The read_ptr needs to bound by q->n_window */
425 426
		iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
				    dma_dir);
427 428
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
429
	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
430 431
}

432 433 434 435 436 437 438 439
/**
 * iwl_tx_queue_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
440
static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
441
{
442 443
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
444
	struct device *dev = bus(trans)->dev;
445 446 447 448
	int i;
	if (WARN_ON(!txq))
		return;

449
	iwl_tx_queue_unmap(trans, txq_id);
450 451

	/* De-alloc array of command/tx buffers */
452 453 454 455

	if (txq_id == trans->shrd->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++)
			kfree(txq->cmd[i]);
456 457 458

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd) {
459
		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
460 461 462 463 464
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
	}

	/* De-alloc array of per-TFD driver data */
465 466
	kfree(txq->skbs);
	txq->skbs = NULL;
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

	/* deallocate arrays */
	kfree(txq->cmd);
	kfree(txq->meta);
	txq->cmd = NULL;
	txq->meta = NULL;

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

/**
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
483
static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
484 485
{
	int txq_id;
486
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
487 488

	/* Tx queues */
489
	if (trans_pcie->txq) {
490
		for (txq_id = 0;
491 492
		     txq_id < hw_params(trans).max_txq_num; txq_id++)
			iwl_tx_queue_free(trans, txq_id);
493 494
	}

495 496
	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;
497

498
	iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
499

500
	iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
501 502
}

503 504 505 506 507 508 509
/**
 * iwl_trans_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 *
 * @param priv
 * @return error code
 */
510
static int iwl_trans_tx_alloc(struct iwl_trans *trans)
511 512 513
{
	int ret;
	int txq_id, slots_num;
514
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
515

516
	u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
517 518
			sizeof(struct iwlagn_scd_bc_tbl);

519 520
	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
521
	if (WARN_ON(trans_pcie->txq)) {
522 523 524 525
		ret = -EINVAL;
		goto error;
	}

526
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
527
				   scd_bc_tbls_size);
528
	if (ret) {
529
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
530 531 532 533
		goto error;
	}

	/* Alloc keep-warm buffer */
534
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
535
	if (ret) {
536
		IWL_ERR(trans, "Keep Warm allocation failed\n");
537 538 539
		goto error;
	}

540 541
	trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
				  sizeof(struct iwl_tx_queue), GFP_KERNEL);
542
	if (!trans_pcie->txq) {
543
		IWL_ERR(trans, "Not enough memory for txq\n");
544 545 546 547 548
		ret = ENOMEM;
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
549 550
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
		slots_num = (txq_id == trans->shrd->cmd_queue) ?
551
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
552 553
		ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
554
		if (ret) {
555
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
556 557 558 559 560 561 562
			goto error;
		}
	}

	return 0;

error:
563
	iwl_trans_pcie_tx_free(trans);
564 565 566

	return ret;
}
567
static int iwl_tx_init(struct iwl_trans *trans)
568 569 570 571 572
{
	int ret;
	int txq_id, slots_num;
	unsigned long flags;
	bool alloc = false;
573
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
574

575
	if (!trans_pcie->txq) {
576
		ret = iwl_trans_tx_alloc(trans);
577 578 579 580 581
		if (ret)
			goto error;
		alloc = true;
	}

582
	spin_lock_irqsave(&trans->shrd->lock, flags);
583 584

	/* Turn off all Tx DMA fifos */
585
	iwl_write_prph(bus(trans), SCD_TXFACT, 0);
586 587

	/* Tell NIC where to find the "keep warm" buffer */
588 589
	iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);
590

591
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
592 593

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
594 595
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
		slots_num = (txq_id == trans->shrd->cmd_queue) ?
596
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
597 598
		ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
599
		if (ret) {
600
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
601 602 603 604 605 606 607 608
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
609
		iwl_trans_pcie_tx_free(trans);
610 611 612
	return ret;
}

613
static void iwl_set_pwr_vmain(struct iwl_trans *trans)
614 615 616 617 618 619
{
/*
 * (for documentation purposes)
 * to set power to V_AUX, do:

		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
620
			iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
621 622 623 624
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
 */

625
	iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
626 627 628 629
			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
			       ~APMG_PS_CTRL_MSK_PWR_SRC);
}

630
static int iwl_nic_init(struct iwl_trans *trans)
631 632 633 634
{
	unsigned long flags;

	/* nic_init */
635
	spin_lock_irqsave(&trans->shrd->lock, flags);
636
	iwl_apm_init(priv(trans));
637 638

	/* Set interrupt coalescing calibration timer to default (512 usecs) */
639 640
	iwl_write8(bus(trans), CSR_INT_COALESCING,
		IWL_HOST_INT_CALIB_TIMEOUT_DEF);
641

642
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
643

644
	iwl_set_pwr_vmain(trans);
645

646
	iwl_nic_config(priv(trans));
647 648

	/* Allocate the RX queue, or reset if it is already allocated */
649
	iwl_rx_init(trans);
650 651

	/* Allocate or reset and init all Tx and Command queues */
652
	if (iwl_tx_init(trans))
653 654
		return -ENOMEM;

655
	if (hw_params(trans).shadow_reg_enable) {
656
		/* enable shadow regs in HW */
657
		iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
658 659 660
			0x800FFFFF);
	}

661
	set_bit(STATUS_INIT, &trans->shrd->status);
662 663 664 665 666 667 668

	return 0;
}

#define HW_READY_TIMEOUT (50)

/* Note: returns poll_bit return value, which is >= 0 if success */
669
static int iwl_set_hw_ready(struct iwl_trans *trans)
670 671 672
{
	int ret;

673
	iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
674 675 676
		CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);

	/* See if we got it */
677
	ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
678 679 680 681
				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
				HW_READY_TIMEOUT);

682
	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
683 684 685 686
	return ret;
}

/* Note: returns standard 0/-ERROR code */
687
static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
688 689 690
{
	int ret;

691
	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
692

693
	ret = iwl_set_hw_ready(trans);
694 695 696 697
	if (ret >= 0)
		return 0;

	/* If HW is not ready, prepare the conditions to check again */
698
	iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
699 700
			CSR_HW_IF_CONFIG_REG_PREPARE);

701
	ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
702 703 704 705 706 707 708
			~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
			CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);

	if (ret < 0)
		return ret;

	/* HW should be ready by now, check again. */
709
	ret = iwl_set_hw_ready(trans);
710 711 712 713 714
	if (ret >= 0)
		return 0;
	return ret;
}

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
#define IWL_AC_UNSET -1

struct queue_to_fifo_ac {
	s8 fifo, ac;
};

static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
};

static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
	{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
	{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_BE_IPAN, 2, },
	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
};

static const u8 iwlagn_bss_ac_to_fifo[] = {
	IWL_TX_FIFO_VO,
	IWL_TX_FIFO_VI,
	IWL_TX_FIFO_BE,
	IWL_TX_FIFO_BK,
};
static const u8 iwlagn_bss_ac_to_queue[] = {
	0, 1, 2, 3,
};
static const u8 iwlagn_pan_ac_to_fifo[] = {
	IWL_TX_FIFO_VO_IPAN,
	IWL_TX_FIFO_VI_IPAN,
	IWL_TX_FIFO_BE_IPAN,
	IWL_TX_FIFO_BK_IPAN,
};
static const u8 iwlagn_pan_ac_to_queue[] = {
	7, 6, 5, 4,
};

768
static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
769 770
{
	int ret;
771 772
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
773

774
	trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
775 776 777 778 779 780 781 782
	trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
	trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;

	trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
	trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;

	trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
	trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
783

784
	if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
785 786
	     iwl_trans_pcie_prepare_card_hw(trans)) {
		IWL_WARN(trans, "Exit HW not ready\n");
787 788 789 790
		return -EIO;
	}

	/* If platform's RF_KILL switch is NOT set to KILL */
791
	if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
792
			CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
793
		clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
794
	else
795
		set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
796

797
	if (iwl_is_rfkill(trans->shrd)) {
798
		iwl_set_hw_rfkill_state(priv(trans), true);
799
		iwl_enable_interrupts(trans);
800 801 802
		return -ERFKILL;
	}

803
	iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
804

805
	ret = iwl_nic_init(trans);
806
	if (ret) {
807
		IWL_ERR(trans, "Unable to init nic\n");
808 809 810 811
		return ret;
	}

	/* make sure rfkill handshake bits are cleared */
812 813
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
814 815 816
		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

	/* clear (again), then enable host interrupts */
817
	iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
818
	iwl_enable_interrupts(trans);
819 820

	/* really make sure rfkill handshake bits are cleared */
821 822
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
823 824 825 826

	return 0;
}

827 828
/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
829
 * must be called under priv->shrd->lock and mac access
830
 */
831
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
832
{
833
	iwl_write_prph(bus(trans), SCD_TXFACT, mask);
834 835
}

836
static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
837 838
{
	const struct queue_to_fifo_ac *queue_to_fifo;
839 840
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
841 842 843 844 845
	u32 a;
	unsigned long flags;
	int i, chan;
	u32 reg_val;

846
	spin_lock_irqsave(&trans->shrd->lock, flags);
847

848 849
	trans_pcie->scd_base_addr =
		iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
850
	a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
851
	/* reset conext data memory */
852
	for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
853
		a += 4)
854
		iwl_write_targ_mem(bus(trans), a, 0);
855
	/* reset tx status memory */
856
	for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
857
		a += 4)
858
		iwl_write_targ_mem(bus(trans), a, 0);
859
	for (; a < trans_pcie->scd_base_addr +
860
	       SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
861
	       a += 4)
862
		iwl_write_targ_mem(bus(trans), a, 0);
863

864
	iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
865
		       trans_pcie->scd_bc_tbls.dma >> 10);
866 867 868

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
869
		iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
870 871 872 873
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
874 875
	reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
876 877
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

878
	iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
879
		SCD_QUEUECHAIN_SEL_ALL(trans));
880
	iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
881 882

	/* initiate the queues */
883
	for (i = 0; i < hw_params(trans).max_txq_num; i++) {
884 885 886
		iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
		iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
		iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
887
				SCD_CONTEXT_QUEUE_OFFSET(i), 0);
888
		iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
889 890 891 892 893 894 895 896 897 898
				SCD_CONTEXT_QUEUE_OFFSET(i) +
				sizeof(u32),
				((SCD_WIN_SIZE <<
				SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
				((SCD_FRAME_LIMIT <<
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
	}

899
	iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
900
			IWL_MASK(0, hw_params(trans).max_txq_num));
901 902

	/* Activate all Tx DMA/FIFO channels */
903
	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
904 905

	/* map queues to FIFOs */
906
	if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
907 908 909 910
		queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
	else
		queue_to_fifo = iwlagn_default_queue_to_tx_fifo;

911
	iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
912 913

	/* make sure all queue are not stopped */
914 915
	memset(&trans_pcie->queue_stopped[0], 0,
		sizeof(trans_pcie->queue_stopped));
916
	for (i = 0; i < 4; i++)
917
		atomic_set(&trans_pcie->queue_stop_count[i], 0);
918 919

	/* reset to 0 to enable all the queue first */
920
	trans_pcie->txq_ctx_active_msk = 0;
921

922
	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
923
						IWLAGN_FIRST_AMPDU_QUEUE);
924
	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
925
						IWLAGN_FIRST_AMPDU_QUEUE);
926

927
	for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
928 929 930
		int fifo = queue_to_fifo[i].fifo;
		int ac = queue_to_fifo[i].ac;

931
		iwl_txq_ctx_activate(trans_pcie, i);
932 933 934 935 936

		if (fifo == IWL_TX_FIFO_UNUSED)
			continue;

		if (ac != IWL_AC_UNSET)
937 938 939
			iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
		iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
					      fifo, 0);
940 941
	}

942
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
943 944

	/* Enable L1-Active */
945
	iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
946 947 948
			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}

949 950 951
/**
 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
 */
952
static int iwl_trans_tx_stop(struct iwl_trans *trans)
953 954 955
{
	int ch, txq_id;
	unsigned long flags;
956
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
957 958

	/* Turn off all Tx DMA fifos */
959
	spin_lock_irqsave(&trans->shrd->lock, flags);
960

961
	iwl_trans_txq_set_sched(trans, 0);
962 963

	/* Stop each Tx DMA channel, and wait for it to be idle */
964
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
965
		iwl_write_direct32(bus(trans),
966
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
967
		if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
968 969
				    FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
				    1000))
970
			IWL_ERR(trans, "Failing on timeout while stopping"
971
			    " DMA channel %d [0x%08x]", ch,
972
			    iwl_read_direct32(bus(trans),
973
					      FH_TSSR_TX_STATUS_REG));
974
	}
975
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
976

977
	if (!trans_pcie->txq) {
978
		IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
979 980 981 982
		return 0;
	}

	/* Unmap DMA from host system and free skb's */
983 984
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		iwl_tx_queue_unmap(trans, txq_id);
985 986 987 988

	return 0;
}

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
{
	unsigned long flags;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock_irqsave(&trans->shrd->lock, flags);
	iwl_disable_interrupts(trans);
	spin_unlock_irqrestore(&trans->shrd->lock, flags);

	/* wait to make sure we flush pending tasklet*/
	synchronize_irq(bus(trans)->irq);
	tasklet_kill(&trans_pcie->irq_tasklet);
}

1004
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1005 1006
{
	/* stop and reset the on-board processor */
1007
	iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1008 1009

	/* tell the device to stop sending interrupts */
1010
	iwl_trans_pcie_disable_sync_irq(trans);
1011 1012

	/* device going down, Stop using ICT table */
1013
	iwl_disable_ict(trans);
1014 1015 1016 1017 1018 1019 1020 1021

	/*
	 * If a HW restart happens during firmware loading,
	 * then the firmware loading might call this function
	 * and later it might be called again due to the
	 * restart. So don't process again if the device is
	 * already dead.
	 */
1022 1023 1024
	if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
		iwl_trans_tx_stop(trans);
		iwl_trans_rx_stop(trans);
1025 1026

		/* Power-down device's busmaster DMA clocks */
1027
		iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
1028 1029 1030 1031 1032
			       APMG_CLK_VAL_DMA_CLK_RQT);
		udelay(5);
	}

	/* Make sure (redundant) we've released our request to stay awake */
1033
	iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
1034
			CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1035 1036

	/* Stop the device, and put it in low power state */
1037
	iwl_apm_stop(priv(trans));
1038 1039
}

1040
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1041 1042
		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
		u8 sta_id)
1043
{
1044 1045 1046
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1047
	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1048
	struct iwl_cmd_meta *out_meta;
1049 1050
	struct iwl_tx_queue *txq;
	struct iwl_queue *q;
1051 1052 1053 1054 1055

	dma_addr_t phys_addr = 0;
	dma_addr_t txcmd_phys;
	dma_addr_t scratch_phys;
	u16 len, firstlen, secondlen;
1056
	u16 seq_number = 0;
1057
	u8 wait_write_ptr = 0;
1058 1059 1060 1061
	u8 txq_id;
	u8 tid = 0;
	bool is_agg = false;
	__le16 fc = hdr->frame_control;
1062 1063
	u8 hdr_len = ieee80211_hdrlen(fc);

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
	/*
	 * Send this frame after DTIM -- there's a special queue
	 * reserved for this for contexts that support AP mode.
	 */
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		txq_id = trans_pcie->mcast_queue[ctx];

		/*
		 * The microcode will clear the more data
		 * bit in the last frame it transmits.
		 */
		hdr->frame_control |=
			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
		txq_id = IWL_AUX_QUEUE;
	else
		txq_id =
		    trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];

1083
	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
		u8 *qc = NULL;
		struct iwl_tid_data *tid_data;
		qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
		tid_data = &trans->shrd->tid_data[sta_id][tid];

		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
			return -1;

		seq_number = tid_data->seq_number;
		seq_number &= IEEE80211_SCTL_SEQ;
		hdr->seq_ctrl = hdr->seq_ctrl &
				cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(seq_number);
		seq_number += 0x10;
		/* aggregation is on for this <sta,tid> */
1100 1101
		if (info->flags & IEEE80211_TX_CTL_AMPDU) {
			WARN_ON(tid_data->agg.state != IWL_AGG_ON);
1102 1103 1104 1105 1106
			txq_id = tid_data->agg.txq_id;
			is_agg = true;
		}
	}

1107 1108 1109
	/* Copy MAC header from skb into command buffer */
	memcpy(tx_cmd->hdr, hdr, hdr_len);

1110
	txq = &trans_pcie->txq[txq_id];
1111 1112
	q = &txq->q;

1113
	/* Set up driver data for this TFD */
1114
	txq->skbs[q->write_ptr] = skb;
1115 1116 1117 1118 1119
	txq->cmd[q->write_ptr] = dev_cmd;

	dev_cmd->hdr.cmd = REPLY_TX;
	dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
				INDEX_TO_SEQ(q->write_ptr)));
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142

	/* Set up first empty entry in queue's array of Tx/cmd buffers */
	out_meta = &txq->meta[q->write_ptr];

	/*
	 * Use the first empty entry in this queue's command buffer array
	 * to contain the Tx command and MAC header concatenated together
	 * (payload data will be in another buffer).
	 * Size of this varies, due to varying MAC header length.
	 * If end is not dword aligned, we'll have 2 extra bytes at the end
	 * of the MAC header (device reads on dword boundaries).
	 * We'll tell device about this padding later.
	 */
	len = sizeof(struct iwl_tx_cmd) +
		sizeof(struct iwl_cmd_header) + hdr_len;
	firstlen = (len + 3) & ~3;

	/* Tell NIC about any 2-byte padding after MAC header */
	if (firstlen != len)
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

	/* Physical address of this Tx command's header (not MAC header!),
	 * within command buffer array. */
1143
	txcmd_phys = dma_map_single(bus(trans)->dev,
1144 1145
				    &dev_cmd->hdr, firstlen,
				    DMA_BIDIRECTIONAL);
1146
	if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
		return -1;
	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
	dma_unmap_len_set(out_meta, len, firstlen);

	if (!ieee80211_has_morefrags(fc)) {
		txq->need_update = 1;
	} else {
		wait_write_ptr = 1;
		txq->need_update = 0;
	}

	/* Set up TFD's 2nd entry to point directly to remainder of skb,
	 * if any (802.11 null frames have no payload). */
	secondlen = skb->len - hdr_len;
	if (secondlen > 0) {
1162
		phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
1163
					   secondlen, DMA_TO_DEVICE);
1164 1165
		if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
			dma_unmap_single(bus(trans)->dev,
1166 1167 1168 1169 1170 1171 1172 1173
					 dma_unmap_addr(out_meta, mapping),
					 dma_unmap_len(out_meta, len),
					 DMA_BIDIRECTIONAL);
			return -1;
		}
	}

	/* Attach buffers to TFD */
1174
	iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
1175
	if (secondlen > 0)
1176
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
1177 1178 1179 1180 1181 1182
					     secondlen, 0);

	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
				offsetof(struct iwl_tx_cmd, scratch);

	/* take back ownership of DMA buffer to enable update */
1183
	dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
1184 1185 1186 1187
			DMA_BIDIRECTIONAL);
	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);

1188
	IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1189
		     le16_to_cpu(dev_cmd->hdr.sequence));
1190 1191 1192
	IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
	iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
	iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1193 1194

	/* Set up entry for this TFD in Tx byte-count array */
1195 1196
	if (is_agg)
		iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
1197 1198
					       le16_to_cpu(tx_cmd->len));

1199
	dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
1200 1201
			DMA_BIDIRECTIONAL);

1202
	trace_iwlwifi_dev_tx(priv(trans),
1203 1204 1205 1206 1207 1208 1209
			     &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
			     sizeof(struct iwl_tfd),
			     &dev_cmd->hdr, firstlen,
			     skb->data + hdr_len, secondlen);

	/* Tell device the write index *just past* this latest filled TFD */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1210 1211
	iwl_txq_update_write_ptr(trans, txq);

1212
	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
1213 1214 1215 1216 1217
		trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
		if (!ieee80211_has_morefrags(fc))
			trans->shrd->tid_data[sta_id][tid].seq_number =
				seq_number;
	}
1218 1219 1220 1221 1222 1223 1224

	/*
	 * At this point the frame is "transmitted" successfully
	 * and we will get a TX status notification eventually,
	 * regardless of the value of ret. "ret" only indicates
	 * whether or not we should update the write pointer.
	 */
1225
	if (iwl_queue_space(q) < q->high_mark) {
1226 1227
		if (wait_write_ptr) {
			txq->need_update = 1;
1228
			iwl_txq_update_write_ptr(trans, txq);
1229
		} else {
1230
			iwl_stop_queue(trans, txq);
1231 1232 1233 1234 1235
		}
	}
	return 0;
}

1236
static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
1237 1238
{
	/* Remove all resets to allow NIC to operate */
1239
	iwl_write32(bus(trans), CSR_RESET, 0);
1240 1241
}

1242 1243
static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
{
1244 1245
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
1246 1247
	int err;

1248 1249 1250 1251
	trans_pcie->inta_mask = CSR_INI_SET_MASK;

	tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
		iwl_irq_tasklet, (unsigned long)trans);
1252

1253
	iwl_alloc_isr_ict(trans);
1254 1255

	err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1256
		DRV_NAME, trans);
1257
	if (err) {
1258 1259
		IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
		iwl_free_isr_ict(trans);
1260 1261 1262
		return err;
	}

1263
	INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1264 1265 1266
	return 0;
}

1267 1268
static int iwlagn_txq_check_empty(struct iwl_trans *trans,
			   int sta_id, u8 tid, int txq_id)
1269
{
1270 1271
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
	struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];

	lockdep_assert_held(&trans->shrd->sta_lock);

	switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
	case IWL_EMPTYING_HW_QUEUE_DELBA:
		/* We are reclaiming the last packet of the */
		/* aggregated HW queue */
		if ((txq_id  == tid_data->agg.txq_id) &&
		    (q->read_ptr == q->write_ptr)) {
			IWL_DEBUG_HT(trans,
				"HW queue empty: continue DELBA flow\n");
1284
			iwl_trans_pcie_txq_agg_disable(trans, txq_id);
1285 1286 1287 1288
			tid_data->agg.state = IWL_AGG_OFF;
			iwl_stop_tx_ba_trans_ready(priv(trans),
						   NUM_IWL_RXON_CTX,
						   sta_id, tid);
1289
			iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
		}
		break;
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		/* We are reclaiming the last packet of the queue */
		if (tid_data->tfds_in_queue == 0) {
			IWL_DEBUG_HT(trans,
				"HW queue empty: continue ADDBA flow\n");
			tid_data->agg.state = IWL_AGG_ON;
			iwl_start_tx_ba_trans_ready(priv(trans),
						    NUM_IWL_RXON_CTX,
						    sta_id, tid);
		}
		break;
1303 1304
	default:
		break;
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
	}

	return 0;
}

static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
			    int sta_id, int tid, int freed)
{
	lockdep_assert_held(&trans->shrd->sta_lock);

	if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
		trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
	else {
		IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
			trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
			freed);
		trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
	}
}

static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
		      int txq_id, int ssn, u32 status,
		      struct sk_buff_head *skbs)
{
1329 1330
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1331
	enum iwl_agg_state agg_state;
1332 1333
	/* n_bd is usually 256 => n_bd - 1 = 0xff */
	int tfd_num = ssn & (txq->q.n_bd - 1);
1334
	int freed = 0;
1335 1336
	bool cond;

1337 1338
	txq->time_stamp = jiffies;

1339 1340
	if (txq->sched_retry) {
		agg_state =
1341
			trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
1342 1343 1344 1345 1346 1347 1348 1349 1350
		cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
	} else {
		cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
	}

	if (txq->q.read_ptr != tfd_num) {
		IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
				"scd_ssn=%d idx=%d txq=%d swq=%d\n",
				ssn , tfd_num, txq_id, txq->swq_id);
1351
		freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1352
		if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1353
			iwl_wake_queue(trans, txq);
1354
	}
1355 1356 1357

	iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
	iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
1358 1359
}

1360
static void iwl_trans_pcie_free(struct iwl_trans *trans)
1361
{
1362 1363
	iwl_trans_pcie_tx_free(trans);
	iwl_trans_pcie_rx_free(trans);
1364 1365 1366 1367
	free_irq(bus(trans)->irq, trans);
	iwl_free_isr_ict(trans);
	trans->shrd->trans = NULL;
	kfree(trans);
1368 1369
}

J
Johannes Berg 已提交
1370
#ifdef CONFIG_PM_SLEEP
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
	/*
	 * This function is called when system goes into suspend state
	 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
	 * first but since iwl_mac_stop() has no knowledge of who the caller is,
	 * it will not call apm_ops.stop() to stop the DMA operation.
	 * Calling apm_ops.stop here to make sure we stop the DMA.
	 *
	 * But of course ... if we have configured WoWLAN then we did other
	 * things already :-)
	 */
1383
	if (!trans->shrd->wowlan) {
1384
		iwl_apm_stop(priv(trans));
1385 1386 1387 1388 1389
	} else {
		iwl_disable_interrupts(trans);
		iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
	}
1390 1391 1392 1393 1394 1395 1396 1397

	return 0;
}

static int iwl_trans_pcie_resume(struct iwl_trans *trans)
{
	bool hw_rfkill = false;

1398
	iwl_enable_interrupts(trans);
1399

1400
	if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
1401 1402 1403 1404 1405 1406 1407 1408
				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
		hw_rfkill = true;

	if (hw_rfkill)
		set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
	else
		clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);

1409
	iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
1410 1411 1412

	return 0;
}
J
Johannes Berg 已提交
1413
#endif /* CONFIG_PM_SLEEP */
1414

1415
static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
1416
					  enum iwl_rxon_context_id ctx)
1417 1418 1419 1420 1421 1422 1423 1424 1425
{
	u8 ac, txq_id;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	for (ac = 0; ac < AC_NUM; ac++) {
		txq_id = trans_pcie->ac_to_queue[ctx][ac];
		IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
			ac,
1426
			(atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
1427
			      ? "stopped" : "awake");
1428
		iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1429 1430 1431
	}
}

1432
const struct iwl_trans_ops trans_ops_pcie;
1433

1434 1435 1436 1437 1438 1439
static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
{
	struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
					      sizeof(struct iwl_trans_pcie),
					      GFP_KERNEL);
	if (iwl_trans) {
1440 1441
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1442 1443
		iwl_trans->ops = &trans_ops_pcie;
		iwl_trans->shrd = shrd;
1444
		trans_pcie->trans = iwl_trans;
1445
		spin_lock_init(&iwl_trans->hcmd_lock);
1446
	}
1447

1448 1449
	return iwl_trans;
}
1450

1451 1452
static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
{
1453 1454 1455
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
1456 1457
}

1458 1459 1460 1461
#define IWL_FLUSH_WAIT_MS	2000

static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
{
1462
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
	struct iwl_tx_queue *txq;
	struct iwl_queue *q;
	int cnt;
	unsigned long now = jiffies;
	int ret = 0;

	/* waiting for all the tx frames complete might take a while */
	for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
		if (cnt == trans->shrd->cmd_queue)
			continue;
1473
		txq = &trans_pcie->txq[cnt];
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
		q = &txq->q;
		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
			msleep(1);

		if (q->read_ptr != q->write_ptr) {
			IWL_ERR(trans, "fail to flush all tx fifo queues\n");
			ret = -ETIMEDOUT;
			break;
		}
	}
	return ret;
}

1488 1489 1490 1491 1492 1493
/*
 * On every watchdog tick we check (latest) time stamp. If it does not
 * change during timeout period and queue is not empty we reset firmware.
 */
static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
{
1494 1495
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
	struct iwl_queue *q = &txq->q;
	unsigned long timeout;

	if (q->read_ptr == q->write_ptr) {
		txq->time_stamp = jiffies;
		return 0;
	}

	timeout = txq->time_stamp +
		  msecs_to_jiffies(hw_params(trans).wd_timeout);

	if (time_after(jiffies, timeout)) {
		IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
			hw_params(trans).wd_timeout);
1510 1511
		IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
			q->read_ptr, q->write_ptr);
1512 1513 1514 1515 1516 1517
		return 1;
	}

	return 0;
}

1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
static const char *get_fh_string(int cmd)
{
	switch (cmd) {
	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
	IWL_CMD(FH_TSSR_TX_STATUS_REG);
	IWL_CMD(FH_TSSR_TX_ERROR_REG);
	default:
		return "UNKNOWN";
	}
}

int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
{
	int i;
#ifdef CONFIG_IWLWIFI_DEBUG
	int pos = 0;
	size_t bufsz = 0;
#endif
	static const u32 fh_tbl[] = {
		FH_RSCSR_CHNL0_STTS_WPTR_REG,
		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
		FH_RSCSR_CHNL0_WPTR,
		FH_MEM_RCSR_CHNL0_CONFIG_REG,
		FH_MEM_RSSR_SHARED_CTRL_REG,
		FH_MEM_RSSR_RX_STATUS_REG,
		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
		FH_TSSR_TX_STATUS_REG,
		FH_TSSR_TX_ERROR_REG
	};
#ifdef CONFIG_IWLWIFI_DEBUG
	if (display) {
		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
		*buf = kmalloc(bufsz, GFP_KERNEL);
		if (!*buf)
			return -ENOMEM;
		pos += scnprintf(*buf + pos, bufsz - pos,
				"FH register values:\n");
		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
			pos += scnprintf(*buf + pos, bufsz - pos,
				"  %34s: 0X%08x\n",
				get_fh_string(fh_tbl[i]),
				iwl_read_direct32(bus(trans), fh_tbl[i]));
		}
		return pos;
	}
#endif
	IWL_ERR(trans, "FH register values:\n");
	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
		IWL_ERR(trans, "  %34s: 0X%08x\n",
			get_fh_string(fh_tbl[i]),
			iwl_read_direct32(bus(trans), fh_tbl[i]));
	}
	return 0;
}

static const char *get_csr_string(int cmd)
{
	switch (cmd) {
	IWL_CMD(CSR_HW_IF_CONFIG_REG);
	IWL_CMD(CSR_INT_COALESCING);
	IWL_CMD(CSR_INT);
	IWL_CMD(CSR_INT_MASK);
	IWL_CMD(CSR_FH_INT_STATUS);
	IWL_CMD(CSR_GPIO_IN);
	IWL_CMD(CSR_RESET);
	IWL_CMD(CSR_GP_CNTRL);
	IWL_CMD(CSR_HW_REV);
	IWL_CMD(CSR_EEPROM_REG);
	IWL_CMD(CSR_EEPROM_GP);
	IWL_CMD(CSR_OTP_GP_REG);
	IWL_CMD(CSR_GIO_REG);
	IWL_CMD(CSR_GP_UCODE_REG);
	IWL_CMD(CSR_GP_DRIVER_REG);
	IWL_CMD(CSR_UCODE_DRV_GP1);
	IWL_CMD(CSR_UCODE_DRV_GP2);
	IWL_CMD(CSR_LED_REG);
	IWL_CMD(CSR_DRAM_INT_TBL_REG);
	IWL_CMD(CSR_GIO_CHICKEN_BITS);
	IWL_CMD(CSR_ANA_PLL_CFG);
	IWL_CMD(CSR_HW_REV_WA_REG);
	IWL_CMD(CSR_DBG_HPET_MEM_REG);
	default:
		return "UNKNOWN";
	}
}

void iwl_dump_csr(struct iwl_trans *trans)
{
	int i;
	static const u32 csr_tbl[] = {
		CSR_HW_IF_CONFIG_REG,
		CSR_INT_COALESCING,
		CSR_INT,
		CSR_INT_MASK,
		CSR_FH_INT_STATUS,
		CSR_GPIO_IN,
		CSR_RESET,
		CSR_GP_CNTRL,
		CSR_HW_REV,
		CSR_EEPROM_REG,
		CSR_EEPROM_GP,
		CSR_OTP_GP_REG,
		CSR_GIO_REG,
		CSR_GP_UCODE_REG,
		CSR_GP_DRIVER_REG,
		CSR_UCODE_DRV_GP1,
		CSR_UCODE_DRV_GP2,
		CSR_LED_REG,
		CSR_DRAM_INT_TBL_REG,
		CSR_GIO_CHICKEN_BITS,
		CSR_ANA_PLL_CFG,
		CSR_HW_REV_WA_REG,
		CSR_DBG_HPET_MEM_REG
	};
	IWL_ERR(trans, "CSR values:\n");
	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
		"CSR_INT_PERIODIC_REG)\n");
	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
		IWL_ERR(trans, "  %25s: 0X%08x\n",
			get_csr_string(csr_tbl[i]),
			iwl_read32(bus(trans), csr_tbl[i]));
	}
}

1648 1649 1650
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
1651
	if (!debugfs_create_file(#name, mode, parent, trans,		\
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
				 &iwl_dbgfs_##name##_ops))		\
		return -ENOMEM;						\
} while (0)

/* file operation */
#define DEBUGFS_READ_FUNC(name)                                         \
static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
					char __user *user_buf,          \
					size_t count, loff_t *ppos);

#define DEBUGFS_WRITE_FUNC(name)                                        \
static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
					const char __user *user_buf,    \
					size_t count, loff_t *ppos);


static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
{
	file->private_data = inode->i_private;
	return 0;
}

#define DEBUGFS_READ_FILE_OPS(name)					\
	DEBUGFS_READ_FUNC(name);					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.read = iwl_dbgfs_##name##_read,				\
	.open = iwl_dbgfs_open_file_generic,				\
	.llseek = generic_file_llseek,					\
};

1682 1683 1684 1685 1686 1687 1688 1689
#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
	DEBUGFS_WRITE_FUNC(name);                                       \
static const struct file_operations iwl_dbgfs_##name##_ops = {          \
	.write = iwl_dbgfs_##name##_write,                              \
	.open = iwl_dbgfs_open_file_generic,				\
	.llseek = generic_file_llseek,					\
};

1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
	DEBUGFS_READ_FUNC(name);					\
	DEBUGFS_WRITE_FUNC(name);					\
static const struct file_operations iwl_dbgfs_##name##_ops = {		\
	.write = iwl_dbgfs_##name##_write,				\
	.read = iwl_dbgfs_##name##_read,				\
	.open = iwl_dbgfs_open_file_generic,				\
	.llseek = generic_file_llseek,					\
};

static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
						char __user *user_buf,
1702 1703
						size_t count, loff_t *ppos)
{
1704
	struct iwl_trans *trans = file->private_data;
1705
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1706 1707 1708 1709 1710 1711
	struct iwl_tx_queue *txq;
	struct iwl_queue *q;
	char *buf;
	int pos = 0;
	int cnt;
	int ret;
1712
	const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
1713

1714
	if (!trans_pcie->txq) {
1715
		IWL_ERR(trans, "txq not ready\n");
1716 1717 1718 1719 1720 1721
		return -EAGAIN;
	}
	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

1722
	for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1723
		txq = &trans_pcie->txq[cnt];
1724 1725 1726 1727 1728
		q = &txq->q;
		pos += scnprintf(buf + pos, bufsz - pos,
				"hwq %.2d: read=%u write=%u stop=%d"
				" swq_id=%#.2x (ac %d/hwq %d)\n",
				cnt, q->read_ptr, q->write_ptr,
1729
				!!test_bit(cnt, trans_pcie->queue_stopped),
1730 1731 1732 1733 1734 1735
				txq->swq_id, txq->swq_id & 3,
				(txq->swq_id >> 2) & 0x1f);
		if (cnt >= 4)
			continue;
		/* for the ACs, display the stop count too */
		pos += scnprintf(buf + pos, bufsz - pos,
1736 1737
			"        stop-count: %d\n",
			atomic_read(&trans_pcie->queue_stop_count[cnt]));
1738 1739 1740 1741 1742 1743 1744 1745 1746
	}
	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
						char __user *user_buf,
						size_t count, loff_t *ppos) {
1747 1748 1749 1750
	struct iwl_trans *trans = file->private_data;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
	char buf[256];
	int pos = 0;
	const size_t bufsz = sizeof(buf);

	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
						rxq->read);
	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
						rxq->write);
	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
						rxq->free_count);
	if (rxq->rb_stts) {
		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
	} else {
		pos += scnprintf(buf + pos, bufsz - pos,
					"closed_rb_num: Not Allocated\n");
	}
	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}

1771 1772 1773 1774 1775 1776 1777 1778 1779
static ssize_t iwl_dbgfs_log_event_read(struct file *file,
					 char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	char *buf;
	int pos = 0;
	ssize_t ret = -ENOMEM;

1780
	ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
	if (buf) {
		ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
		kfree(buf);
	}
	return ret;
}

static ssize_t iwl_dbgfs_log_event_write(struct file *file,
					const char __user *user_buf,
					size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	u32 event_log_flag;
	char buf[8];
	int buf_size;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%d", &event_log_flag) != 1)
		return -EFAULT;
	if (event_log_flag == 1)
1804
		iwl_dump_nic_event_log(trans, true, NULL, false);
1805 1806 1807 1808

	return count;
}

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
					char __user *user_buf,
					size_t count, loff_t *ppos) {

	struct iwl_trans *trans = file->private_data;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	int pos = 0;
	char *buf;
	int bufsz = 24 * 64; /* 24 items * 64 char per item */
	ssize_t ret;

	buf = kzalloc(bufsz, GFP_KERNEL);
	if (!buf) {
		IWL_ERR(trans, "Can not allocate Buffer\n");
		return -ENOMEM;
	}

	pos += scnprintf(buf + pos, bufsz - pos,
			"Interrupt Statistics Report:\n");

	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
		isr_stats->hw);
	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
		isr_stats->sw);
	if (isr_stats->sw || isr_stats->hw) {
		pos += scnprintf(buf + pos, bufsz - pos,
			"\tLast Restarting Code:  0x%X\n",
			isr_stats->err_code);
	}
#ifdef CONFIG_IWLWIFI_DEBUG
	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
		isr_stats->sch);
	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
		isr_stats->alive);
#endif
	pos += scnprintf(buf + pos, bufsz - pos,
		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);

	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
		isr_stats->ctkill);

	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
		isr_stats->wakeup);

	pos += scnprintf(buf + pos, bufsz - pos,
		"Rx command responses:\t\t %u\n", isr_stats->rx);

	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
		isr_stats->tx);

	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
		isr_stats->unhandled);

	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
	kfree(buf);
	return ret;
}

static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
					 const char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;

	char buf[8];
	int buf_size;
	u32 reset_flag;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%x", &reset_flag) != 1)
		return -EFAULT;
	if (reset_flag == 0)
		memset(isr_stats, 0, sizeof(*isr_stats));

	return count;
}

1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
static ssize_t iwl_dbgfs_csr_write(struct file *file,
					 const char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	char buf[8];
	int buf_size;
	int csr;

	memset(buf, 0, sizeof(buf));
	buf_size = min(count, sizeof(buf) -  1);
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;
	if (sscanf(buf, "%d", &csr) != 1)
		return -EFAULT;

	iwl_dump_csr(trans);

	return count;
}

static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
					 char __user *user_buf,
					 size_t count, loff_t *ppos)
{
	struct iwl_trans *trans = file->private_data;
	char *buf;
	int pos = 0;
	ssize_t ret = -EFAULT;

	ret = pos = iwl_dump_fh(trans, &buf, true);
	if (buf) {
		ret = simple_read_from_buffer(user_buf,
					      count, ppos, buf, pos);
		kfree(buf);
	}

	return ret;
}

1935
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
1936
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1937
DEBUGFS_READ_FILE_OPS(fh_reg);
1938 1939
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
1940
DEBUGFS_WRITE_FILE_OPS(csr);
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950

/*
 * Create the debugfs files and directories
 *
 */
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
					struct dentry *dir)
{
	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1951
	DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
1952
	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1953 1954
	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1955 1956 1957 1958 1959 1960 1961 1962 1963
	return 0;
}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
					struct dentry *dir)
{ return 0; }

#endif /*CONFIG_IWLWIFI_DEBUGFS */

1964 1965 1966 1967 1968 1969
const struct iwl_trans_ops trans_ops_pcie = {
	.alloc = iwl_trans_pcie_alloc,
	.request_irq = iwl_trans_pcie_request_irq,
	.start_device = iwl_trans_pcie_start_device,
	.prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
	.stop_device = iwl_trans_pcie_stop_device,
1970

1971
	.tx_start = iwl_trans_pcie_tx_start,
1972
	.wake_any_queue = iwl_trans_pcie_wake_any_queue,
1973

1974
	.send_cmd = iwl_trans_pcie_send_cmd,
1975

1976
	.tx = iwl_trans_pcie_tx,
1977
	.reclaim = iwl_trans_pcie_reclaim,
1978

1979
	.tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
1980
	.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
1981
	.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
1982

1983
	.kick_nic = iwl_trans_pcie_kick_nic,
1984

1985
	.free = iwl_trans_pcie_free,
1986
	.stop_queue = iwl_trans_pcie_stop_queue,
1987 1988

	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
1989 1990

	.wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
1991
	.check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
1992

J
Johannes Berg 已提交
1993
#ifdef CONFIG_PM_SLEEP
1994 1995
	.suspend = iwl_trans_pcie_suspend,
	.resume = iwl_trans_pcie_resume,
J
Johannes Berg 已提交
1996
#endif
1997
};