internal.h 34.3 KB
Newer Older
1 2
/******************************************************************************
 *
3 4
 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6
 * Copyright(c) 2018 Intel Corporation
7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
21
 * this program.
22 23 24 25 26
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30 31 32 33
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__

E
Emmanuel Grumbach 已提交
34 35 36
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
37
#include <linux/wait.h>
38
#include <linux/pci.h>
39
#include <linux/timer.h>
40
#include <linux/cpu.h>
E
Emmanuel Grumbach 已提交
41

42
#include "iwl-fh.h"
E
Emmanuel Grumbach 已提交
43 44 45 46
#include "iwl-csr.h"
#include "iwl-trans.h"
#include "iwl-debug.h"
#include "iwl-io.h"
47
#include "iwl-op-mode.h"
48
#include "iwl-drv.h"
E
Emmanuel Grumbach 已提交
49

J
Johannes Berg 已提交
50 51 52 53
/* We need 2 entries for the TX command and header, and another one might
 * be needed for potential data in the SKB's head. The remaining ones can
 * be used for frags.
 */
54
#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
J
Johannes Berg 已提交
55

56 57 58 59 60 61
/*
 * RX related structures and functions
 */
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
62
#define RX_PENDING_WATERMARK 16
63
#define FIRST_RX_QUEUE 512
64

E
Emmanuel Grumbach 已提交
65
struct iwl_host_cmd;
66

67 68 69
/*This file includes the declaration that are internal to the
 * trans_pcie layer */

70 71 72 73
/**
 * struct iwl_rx_mem_buffer
 * @page_dma: bus address of rxb page
 * @page: driver's pointer to the rxb page
S
Sara Sharon 已提交
74
 * @invalid: rxb is in driver ownership - not owned by HW
75
 * @vid: index of this rxb in the global table
76
 * @size: size used from the buffer
77
 */
78 79 80
struct iwl_rx_mem_buffer {
	dma_addr_t page_dma;
	struct page *page;
81
	u16 vid;
S
Sara Sharon 已提交
82
	bool invalid;
83
	struct list_head list;
84
	u32 size;
85 86
};

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/**
 * struct isr_statistics - interrupt statistics
 *
 */
struct isr_statistics {
	u32 hw;
	u32 sw;
	u32 err_code;
	u32 sch;
	u32 alive;
	u32 rfkill;
	u32 ctkill;
	u32 wakeup;
	u32 rx;
	u32 tx;
	u32 unhandled;
};

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
#define IWL_CD_STTS_OPTIMIZED_POS	0
#define IWL_CD_STTS_OPTIMIZED_MSK	0x01
#define IWL_CD_STTS_TRANSFER_STATUS_POS	1
#define IWL_CD_STTS_TRANSFER_STATUS_MSK	0x0E
#define IWL_CD_STTS_WIFI_STATUS_POS	4
#define IWL_CD_STTS_WIFI_STATUS_MSK	0xF0

/**
 * enum iwl_completion_desc_transfer_status -  transfer status (bits 1-3)
 * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
 *	In sniffer mode, when split is used, set in last CD completion. (RX)
 * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
 *	all CD completion. (RX)
 * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
 */
enum iwl_completion_desc_transfer_status {
	IWL_CD_STTS_UNUSED,
	IWL_CD_STTS_UNUSED_2,
	IWL_CD_STTS_END_TRANSFER,
	IWL_CD_STTS_OVERFLOW,
	IWL_CD_STTS_ABORTED,
	IWL_CD_STTS_ERROR,
};

/**
 * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
 * @IWL_CD_STTS_VALID: the packet is valid (RX)
 * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
 * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
 * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
 * @IWL_CD_STTS_DUP: duplicate packet (RX)
 * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
 * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
 * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
 * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
 * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
 * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
 * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
 * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
 * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
 * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
 */
enum iwl_completion_desc_wifi_status {
	IWL_CD_STTS_VALID,
	IWL_CD_STTS_FCS_ERR,
	IWL_CD_STTS_SEC_KEY_ERR,
	IWL_CD_STTS_DECRYPTION_ERR,
	IWL_CD_STTS_DUP,
	IWL_CD_STTS_ICV_MIC_ERR,
	IWL_CD_STTS_INTERNAL_SNAP_ERR,
	IWL_CD_STTS_SEC_PORT_FAIL,
	IWL_CD_STTS_BA_OLD_SN,
	IWL_CD_STTS_QOS_NULL,
	IWL_CD_STTS_MAC_HDR_ERR,
	IWL_CD_STTS_MAX_RETRANS,
	IWL_CD_STTS_EX_LIFETIME,
	IWL_CD_STTS_NOT_USED,
	IWL_CD_STTS_REPLAY_ERR,
};

165 166 167 168
#define IWL_RX_TD_TYPE_MSK	0xff000000
#define IWL_RX_TD_SIZE_MSK	0x00ffffff
#define IWL_RX_TD_SIZE_2K	BIT(11)
#define IWL_RX_TD_TYPE		0
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206

/**
 * struct iwl_rx_transfer_desc - transfer descriptor
 * @type_n_size: buffer type (bit 0: external buff valid,
 *	bit 1: optional footer valid, bit 2-7: reserved)
 *	and buffer size
 * @addr: ptr to free buffer start address
 * @rbid: unique tag of the buffer
 * @reserved: reserved
 */
struct iwl_rx_transfer_desc {
	__le32 type_n_size;
	__le64 addr;
	__le16 rbid;
	__le16 reserved;
} __packed;

#define IWL_RX_CD_SIZE		0xffffff00

/**
 * struct iwl_rx_completion_desc - completion descriptor
 * @type: buffer type (bit 0: external buff valid,
 *	bit 1: optional footer valid, bit 2-7: reserved)
 * @status: status of the completion
 * @reserved1: reserved
 * @rbid: unique tag of the received buffer
 * @size: buffer size, masked by IWL_RX_CD_SIZE
 * @reserved2: reserved
 */
struct iwl_rx_completion_desc {
	u8 type;
	u8 status;
	__le16 reserved1;
	__le16 rbid;
	__le32 size;
	u8 reserved2[22];
} __packed;

207
/**
208
 * struct iwl_rxq - Rx queue
209 210 211
 * @id: queue index
 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
 *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
212
 *	In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
213
 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
214 215
 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
216 217 218 219
 * @tr_tail: driver's pointer to the transmission ring tail buffer
 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
 * @cr_tail: driver's pointer to the completion ring tail buffer
 * @cr_tail_dma: physical address of the buffer for the completion ring tail
220 221 222
 * @read: Shared index to newest available Rx buffer
 * @write: Shared index to oldest written Rx packet
 * @free_count: Number of pre-allocated buffers in rx_free
223
 * @used_count: Number of RBDs handled to allocator to use for allocation
224
 * @write_actual:
225 226
 * @rx_free: list of RBDs with allocated RB ready for use
 * @rx_used: list of RBDs with no RB attached
227 228 229 230
 * @need_update: flag to indicate we need to update read/write index
 * @rb_stts: driver's pointer to receive buffer status
 * @rb_stts_dma: bus address of receive buffer status
 * @lock:
231
 * @queue: actual rx queue. Not used for multi-rx queue.
232 233 234
 *
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 */
235
struct iwl_rxq {
236 237
	int id;
	void *bd;
238
	dma_addr_t bd_dma;
239 240 241 242 243
	union {
		void *used_bd;
		__le32 *bd_32;
		struct iwl_rx_completion_desc *cd;
	};
244
	dma_addr_t used_bd_dma;
245 246 247 248
	__le16 *tr_tail;
	dma_addr_t tr_tail_dma;
	__le16 *cr_tail;
	dma_addr_t cr_tail_dma;
249 250 251
	u32 read;
	u32 write;
	u32 free_count;
252
	u32 used_count;
253
	u32 write_actual;
254
	u32 queue_size;
255 256
	struct list_head rx_free;
	struct list_head rx_used;
257
	bool need_update;
258
	void *rb_stts;
259 260
	dma_addr_t rb_stts_dma;
	spinlock_t lock;
261
	struct napi_struct napi;
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};

/**
 * struct iwl_rb_allocator - Rx allocator
 * @req_pending: number of requests the allcator had not processed yet
 * @req_ready: number of requests honored and ready for claiming
 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
 *	the queue. This is a list of &struct iwl_rx_mem_buffer
 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
 *	of &struct iwl_rx_mem_buffer
 * @lock: protects the rbd_allocated and rbd_empty lists
 * @alloc_wq: work queue for background calls
 * @rx_alloc: work struct for background calls
 */
struct iwl_rb_allocator {
	atomic_t req_pending;
	atomic_t req_ready;
	struct list_head rbd_allocated;
	struct list_head rbd_empty;
	spinlock_t lock;
	struct workqueue_struct *alloc_wq;
	struct work_struct rx_alloc;
285 286
};

E
Emmanuel Grumbach 已提交
287 288 289 290 291 292
struct iwl_dma_ptr {
	dma_addr_t dma;
	void *addr;
	size_t size;
};

293 294 295 296
/**
 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
 * @index -- current index
 */
297
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
298
{
299
	return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
300 301
}

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
/**
 * iwl_get_closed_rb_stts - get closed rb stts from different structs
 * @rxq - the rxq to get the rb stts from
 */
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
					    struct iwl_rxq *rxq)
{
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
		__le16 *rb_stts = rxq->rb_stts;

		return READ_ONCE(*rb_stts);
	} else {
		struct iwl_rb_status *rb_stts = rxq->rb_stts;

		return READ_ONCE(rb_stts->closed_rb_num);
	}
}

320 321 322 323
/**
 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
 * @index -- current index
 */
324
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
325
{
326
	return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
327 328
}

329 330 331
struct iwl_cmd_meta {
	/* only for SYNC commands, iff the reply skb is wanted */
	struct iwl_host_cmd *source;
332
	u32 flags;
333
	u32 tbs;
334 335 336
};


337 338 339
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32

340
/*
341 342 343 344
 * The FH will write back to the first TB only, so we need to copy some data
 * into the buffer regardless of whether it should be mapped or not.
 * This indicates how big the first TB must be to include the scratch buffer
 * and the assigned PN.
345
 * Since PN location is 8 bytes at offset 12, it's 20 now.
346 347
 * If we make it bigger then allocations will be bigger and copy slower, so
 * that's probably not useful.
348
 */
349
#define IWL_FIRST_TB_SIZE	20
350
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
351

352
struct iwl_pcie_txq_entry {
353 354
	struct iwl_device_cmd *cmd;
	struct sk_buff *skb;
355 356
	/* buffer to free after command completes */
	const void *free_buf;
357 358 359
	struct iwl_cmd_meta meta;
};

360 361
struct iwl_pcie_first_tb_buf {
	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
362 363
};

364
/**
365
 * struct iwl_txq - Tx Queue for DMA
366
 * @q: generic Rx/Tx queue descriptor
367
 * @tfds: transmit frame descriptors (DMA memory)
368
 * @first_tb_bufs: start of command headers, including scratch buffers, for
369 370
 *	the writeback -- this is DMA memory and an array holding one buffer
 *	for each command on the queue
371
 * @first_tb_dma: DMA address for the first_tb_bufs start
372 373 374 375
 * @entries: transmit entries (driver state)
 * @lock: queue lock
 * @stuck_timer: timer that fires if queue gets stuck
 * @trans_pcie: pointer back to transport (for timer)
376
 * @need_update: indicates need to update read/write index
377
 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
378
 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
379 380
 * @frozen: tx stuck queue timer is frozen
 * @frozen_expiry_remainder: remember how long until the timer fires
381
 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
382 383 384 385 386 387 388
 * @write_ptr: 1-st empty entry (index) host_w
 * @read_ptr: last used entry (index) host_r
 * @dma_addr:  physical addr for BD's
 * @n_window: safe queue window
 * @id: queue id
 * @low_mark: low watermark, resume queue if free space more than this
 * @high_mark: high watermark, stop queue if free space less than this
389 390 391
 *
 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 * descriptors) and required locking structures.
392 393 394 395 396 397 398 399 400 401 402 403 404
 *
 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
 * there might be HW changes in the future). For the normal TX
 * queues, n_window, which is the size of the software queue data
 * is also 256; however, for the command queue, n_window is only
 * 32 since we don't need so many commands pending. Since the HW
 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
 * This means that we end up with the following:
 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 *  SW entries:           | 0      | ... | 31          |
 * where N is a number between 0 and 7. This means that the SW
 * data is a window overlayed over the HW queue.
405
 */
406
struct iwl_txq {
407
	void *tfds;
408 409
	struct iwl_pcie_first_tb_buf *first_tb_bufs;
	dma_addr_t first_tb_dma;
410
	struct iwl_pcie_txq_entry *entries;
411
	spinlock_t lock;
412
	unsigned long frozen_expiry_remainder;
413 414
	struct timer_list stuck_timer;
	struct iwl_trans_pcie *trans_pcie;
415
	bool need_update;
416
	bool frozen;
417
	bool ampdu;
418
	int block;
419
	unsigned long wd_timeout;
420
	struct sk_buff_head overflow_q;
421
	struct iwl_dma_ptr bc_tbl;
422 423 424 425 426 427 428 429

	int write_ptr;
	int read_ptr;
	dma_addr_t dma_addr;
	int n_window;
	u32 id;
	int low_mark;
	int high_mark;
430 431
};

432
static inline dma_addr_t
433
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
434
{
435 436
	return txq->first_tb_dma +
	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
437 438
}

439 440 441 442 443
struct iwl_tso_hdr_page {
	struct page *page;
	u8 *pos;
};

444 445 446 447 448 449 450 451 452 453
/**
 * enum iwl_shared_irq_flags - level of sharing for irq
 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
 */
enum iwl_shared_irq_flags {
	IWL_SHARED_IRQ_NON_RX		= BIT(0),
	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
};

454 455 456 457 458 459 460 461 462 463 464 465
/**
 * enum iwl_image_response_code - image response values
 * @IWL_IMAGE_RESP_DEF: the default value of the register
 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
 * @IWL_IMAGE_RESP_FAIL: iml reading failed
 */
enum iwl_image_response_code {
	IWL_IMAGE_RESP_DEF		= 0,
	IWL_IMAGE_RESP_SUCCESS		= 1,
	IWL_IMAGE_RESP_FAIL		= 2,
};

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/**
 * struct iwl_dram_data
 * @physical: page phy pointer
 * @block: pointer to the allocated block/page
 * @size: size of the block/page
 */
struct iwl_dram_data {
	dma_addr_t physical;
	void *block;
	int size;
};

/**
 * struct iwl_self_init_dram - dram data used by self init process
 * @fw: lmac and umac dram data
 * @fw_cnt: total number of items in array
 * @paging: paging dram data
 * @paging_cnt: total number of items in array
 */
struct iwl_self_init_dram {
	struct iwl_dram_data *fw;
	int fw_cnt;
	struct iwl_dram_data *paging;
	int paging_cnt;
};

492 493
/**
 * struct iwl_trans_pcie - PCIe transport specific data
494
 * @rxq: all the RX queue data
495
 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
496
 * @global_table: table mapping received VID from hw to rxb
497
 * @rba: allocator for RX replenishing
498
 * @ctxt_info: context information for FW self init
499 500 501 502 503 504
 * @ctxt_info_gen3: context information for gen3 devices
 * @prph_info: prph info for self init
 * @prph_scratch: prph scratch for self init
 * @ctxt_info_dma_addr: dma addr of context information
 * @prph_info_dma_addr: dma addr of prph info
 * @prph_scratch_dma_addr: dma addr of prph scratch
505 506 507 508 509
 * @ctxt_info_dma_addr: dma addr of context information
 * @init_dram: DRAM data of firmware image (including paging).
 *	Context information addresses will be taken from here.
 *	This is driver's local copy for keeping track of size and
 *	count for allocating and freeing the memory.
510
 * @trans: pointer to the generic transport area
511 512
 * @scd_base_addr: scheduler sram base address in SRAM
 * @scd_bc_tbls: pointer to the byte count table of the scheduler
513
 * @kw: keep warm address
514 515
 * @pci_dev: basic pci-network driver stuff
 * @hw_base: pci hardware address support
516 517
 * @ucode_write_complete: indicates that the ucode has been copied.
 * @ucode_write_waitq: wait queue for uCode load
518
 * @cmd_queue - command queue number
519
 * @rx_buf_size: Rx buffer size
520
 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
521
 * @scd_set_active: should the transport configure the SCD for HCMD queue
522 523
 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
 *	frame.
524
 * @rx_page_order: page order for receive buffer size
525
 * @reg_lock: protect hw register access
526
 * @mutex: to protect stop_device / start_fw / start_hw
527
 * @cmd_in_flight: true when we have a host command in flight
528 529 530
 * @fw_mon_phys: physical address of the buffer for the firmware monitor
 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
 * @fw_mon_size: size of the buffer for the firmware monitor
531 532
 * @msix_entries: array of MSI-X entries
 * @msix_enabled: true if managed to enable MSI-X
533 534 535 536
 * @shared_vec_mask: the type of causes the shared vector handles
 *	(see iwl_shared_irq_flags).
 * @alloc_vecs: the number of interrupt vectors allocated by the OS
 * @def_irq: default irq for non rx causes
537 538 539 540
 * @fh_init_mask: initial unmasked fh causes
 * @hw_init_mask: initial unmasked hw causes
 * @fh_mask: current unmasked fh causes
 * @hw_mask: current unmasked hw causes
541 542
 * @in_rescan: true if we have triggered a device rescan
 * @scheduled_for_removal: true if we have scheduled a device removal
543 544
 */
struct iwl_trans_pcie {
545
	struct iwl_rxq *rxq;
546
	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
547
	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
548
	struct iwl_rb_allocator rba;
549 550 551 552 553 554
	union {
		struct iwl_context_info *ctxt_info;
		struct iwl_context_info_gen3 *ctxt_info_gen3;
	};
	struct iwl_prph_info *prph_info;
	struct iwl_prph_scratch *prph_scratch;
555
	dma_addr_t ctxt_info_dma_addr;
556 557 558
	dma_addr_t prph_info_dma_addr;
	dma_addr_t prph_scratch_dma_addr;
	dma_addr_t iml_dma_addr;
559
	struct iwl_self_init_dram init_dram;
560
	struct iwl_trans *trans;
561

562 563
	struct net_device napi_dev;

564 565
	struct __percpu iwl_tso_hdr_page *tso_hdr_page;

566 567 568 569 570
	/* INT ICT Table */
	__le32 *ict_tbl;
	dma_addr_t ict_tbl_dma;
	int ict_index;
	bool use_ict;
571
	bool is_down, opmode_down;
572
	bool debug_rfkill;
573
	struct isr_statistics isr_stats;
574

J
Johannes Berg 已提交
575
	spinlock_t irq_lock;
576
	struct mutex mutex;
577
	u32 inta_mask;
578 579
	u32 scd_base_addr;
	struct iwl_dma_ptr scd_bc_tbls;
580
	struct iwl_dma_ptr kw;
581

582
	struct iwl_txq *txq_memory;
S
Sara Sharon 已提交
583 584 585
	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
586 587 588 589

	/* PCI bus related data */
	struct pci_dev *pci_dev;
	void __iomem *hw_base;
590 591 592

	bool ucode_write_complete;
	wait_queue_head_t ucode_write_waitq;
593
	wait_queue_head_t wait_command_queue;
594
	wait_queue_head_t d0i3_waitq;
595

596 597
	u8 page_offs, dev_cmd_offs;

598
	u8 cmd_queue;
599
	u8 cmd_fifo;
600
	unsigned int cmd_q_wdg_timeout;
601 602
	u8 n_no_reclaim_cmds;
	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
603
	u8 max_tbs;
604
	u16 tfd_size;
605

606
	enum iwl_amsdu_size rx_buf_size;
607
	bool bc_table_dword;
608
	bool scd_set_active;
609
	bool sw_csum_tx;
610
	bool pcie_dbg_dumped_once;
611
	u32 rx_page_order;
612

613 614
	/*protect hw register */
	spinlock_t reg_lock;
615
	bool cmd_hold_nic_awake;
616 617
	bool ref_cmd_in_flight;

618 619 620
	dma_addr_t fw_mon_phys;
	struct page *fw_mon_page;
	u32 fw_mon_size;
621 622 623

	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
	bool msix_enabled;
624 625 626
	u8 shared_vec_mask;
	u32 alloc_vecs;
	u32 def_irq;
627 628 629 630
	u32 fh_init_mask;
	u32 hw_init_mask;
	u32 fh_mask;
	u32 hw_mask;
631
	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
632 633 634
	u16 tx_cmd_queue_size;
	bool in_rescan;
	bool scheduled_for_removal;
635 636
};

637 638 639 640 641
static inline struct iwl_trans_pcie *
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
{
	return (void *)trans->trans_specific;
}
642

643 644 645 646 647 648 649 650 651 652 653 654 655 656
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
				      struct msix_entry *entry)
{
	/*
	 * Before sending the interrupt the HW disables it to prevent
	 * a nested interrupt. This is done by writing 1 to the corresponding
	 * bit in the mask register. After handling the interrupt, it should be
	 * re-enabled by clearing this bit. This register is defined as
	 * write 1 clear (W1C) register, meaning that it's being clear
	 * by writing 1 to the bit.
	 */
	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}

657 658 659 660 661 662 663
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
	return container_of((void *)trans_pcie, struct iwl_trans,
			    trans_specific);
}

664 665 666 667
/*
 * Convention: trans API functions: iwl_trans_pcie_XXX
 *	Other functions: iwl_pcie_XXX
 */
668 669 670 671 672
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg);
void iwl_trans_pcie_free(struct iwl_trans *trans);

673 674 675
/*****************************************************
* RX
******************************************************/
676
int iwl_pcie_rx_init(struct iwl_trans *trans);
677
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
678
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
679
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
680 681
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
682 683
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
684 685 686 687 688
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
			    struct iwl_rxq *rxq);
689

690
/*****************************************************
691
* ICT - interrupt handling
692
******************************************************/
693
irqreturn_t iwl_pcie_isr(int irq, void *data);
694 695 696 697
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
void iwl_pcie_disable_ict(struct iwl_trans *trans);
698

699 700 701
/*****************************************************
* TX / HCMD
******************************************************/
702
int iwl_pcie_tx_init(struct iwl_trans *trans);
703
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
704 705 706
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
707
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
708 709
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout);
710 711
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
				bool configure_scd);
712 713
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
					bool shared_mode);
714 715
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
				  struct iwl_txq *txq);
716 717
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id);
718
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
719
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
720
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
721
			    struct iwl_rx_cmd_buffer *rxb);
722 723
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs);
724 725
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);

726
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
727
					  u8 idx)
728
{
729
	if (trans->cfg->use_tfh) {
730 731
		struct iwl_tfh_tfd *tfd = _tfd;
		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
732 733

		return le16_to_cpu(tb->tb_len);
734 735 736
	} else {
		struct iwl_tfd *tfd = _tfd;
		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
737

738 739
		return le16_to_cpu(tb->hi_n_len) >> 4;
	}
740 741
}

742 743 744
/*****************************************************
* Error handling
******************************************************/
745
void iwl_pcie_dump_csr(struct iwl_trans *trans);
746

747 748 749
/*****************************************************
* Helpers
******************************************************/
750
static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
751
{
752
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
753

754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	clear_bit(STATUS_INT_ENABLED, &trans->status);
	if (!trans_pcie->msix_enabled) {
		/* disable interrupts from uCode/NIC to host */
		iwl_write32(trans, CSR_INT_MASK, 0x00000000);

		/* acknowledge/clear/reset any interrupts still pending
		 * from uCode or flow handler (Rx/Tx DMA) */
		iwl_write32(trans, CSR_INT, 0xffffffff);
		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
	} else {
		/* disable all the interrupt we might use */
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
	}
770 771
	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825

#define IWL_NUM_OF_COMPLETION_RINGS	31
#define IWL_NUM_OF_TRANSFER_RINGS	527

static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
					    int start)
{
	int i = 0;

	while (start < fw->num_sec &&
	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
		start++;
		i++;
	}

	return i;
}

static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
					       const struct fw_desc *sec,
					       struct iwl_dram_data *dram)
{
	dram->block = dma_alloc_coherent(trans->dev, sec->len,
					 &dram->physical,
					 GFP_KERNEL);
	if (!dram->block)
		return -ENOMEM;

	dram->size = sec->len;
	memcpy(dram->block, sec->data, sec->len);

	return 0;
}

static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
	int i;

	if (!dram->fw) {
		WARN_ON(dram->fw_cnt);
		return;
	}

	for (i = 0; i < dram->fw_cnt; i++)
		dma_free_coherent(trans->dev, dram->fw[i].size,
				  dram->fw[i].block, dram->fw[i].physical);

	kfree(dram->fw);
	dram->fw_cnt = 0;
	dram->fw = NULL;
}
826

827 828 829 830 831 832 833 834 835 836
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_disable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}

static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
837
{
D
Don Fry 已提交
838
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
839 840

	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
841
	set_bit(STATUS_INT_ENABLED, &trans->status);
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INI_SET_MASK;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		/*
		 * fh/hw_mask keeps all the unmasked causes.
		 * Unlike msi, in msix cause is enabled when it is unset.
		 */
		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    ~trans_pcie->fh_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    ~trans_pcie->hw_mask);
	}
}

859 860 861 862 863 864 865 866
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_enable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}
867 868 869 870 871 872 873 874 875 876 877 878 879 880
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
	trans_pcie->hw_mask = msk;
}

static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
	trans_pcie->fh_mask = msk;
881 882
}

883 884 885 886 887
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
888 889 890 891 892 893 894 895 896
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
		iwl_enable_fh_int_msk_msix(trans,
					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
	}
897 898
}

899
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
900 901 902 903
{
	return index & (q->n_window - 1);
}

904
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
905 906
				     struct iwl_txq *txq, int idx)
{
907 908 909 910 911 912
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (trans->cfg->use_tfh)
		idx = iwl_pcie_get_cmd_index(txq, idx);

	return txq->tfds + trans_pcie->tfd_size * idx;
913 914
}

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
static inline const char *queue_name(struct device *dev,
				     struct iwl_trans_pcie *trans_p, int i)
{
	if (trans_p->shared_vec_mask) {
		int vec = trans_p->shared_vec_mask &
			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;

		if (i == 0)
			return DRV_NAME ": shared IRQ";

		return devm_kasprintf(dev, GFP_KERNEL,
				      DRV_NAME ": queue %d", i + vec);
	}
	if (i == 0)
		return DRV_NAME ": default queue";

	if (i == trans_p->alloc_vecs - 1)
		return DRV_NAME ": exception";

	return devm_kasprintf(dev, GFP_KERNEL,
			      DRV_NAME  ": queue %d", i);
}

938 939
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
940 941
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

942
	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
943 944 945 946 947 948 949 950 951
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_enable_hw_int_msk_msix(trans,
					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
	}
952 953 954 955 956 957 958 959 960 961

	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
		/*
		 * On 9000-series devices this bit isn't enabled by default, so
		 * when we power down the device we need set the bit to allow it
		 * to wake up the PCI-E bus for RF-kill interrupts.
		 */
		iwl_set_bit(trans, CSR_GP_CNTRL,
			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
	}
962 963
}

964 965
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);

966
static inline void iwl_wake_queue(struct iwl_trans *trans,
967
				  struct iwl_txq *txq)
968
{
969 970
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

971 972 973
	if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
974
	}
975 976 977
}

static inline void iwl_stop_queue(struct iwl_trans *trans,
978
				  struct iwl_txq *txq)
979
{
980
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
981

982 983 984
	if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
		iwl_op_mode_queue_full(trans->op_mode, txq->id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
985 986
	} else
		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
987
				    txq->id);
988 989
}

990
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
991
{
992 993 994 995 996 997 998
	int index = iwl_pcie_get_cmd_index(q, i);
	int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
	int w = iwl_pcie_get_cmd_index(q, q->write_ptr);

	return w >= r ?
		(index >= r && index < w) :
		!(index < r && index >= w);
999 1000
}

1001 1002
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
1003 1004 1005 1006 1007 1008
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->mutex);

	if (trans_pcie->debug_rfkill)
		return true;
1009

1010 1011 1012 1013
	return !(iwl_read32(trans, CSR_GP_CNTRL) &
		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
						  u32 reg, u32 mask, u32 value)
{
	u32 v;

#ifdef CONFIG_IWLWIFI_DEBUG
	WARN_ON_ONCE(value & ~mask);
#endif

	v = iwl_read32(trans, reg);
	v &= ~mask;
	v |= value;
	iwl_write32(trans, reg, v);
}

static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
					      u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
}

static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
					    u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}

1041 1042
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);

1043 1044 1045 1046 1047 1048 1049 1050 1051
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
#else
static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
{
	return 0;
}
#endif

1052 1053 1054
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);

1055 1056
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);

1057 1058
void iwl_pcie_rx_allocator_work(struct work_struct *data);

1059 1060 1061 1062
/* common functions that are used by gen2 transport */
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1063
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1064 1065
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
				       bool was_in_rfkill);
1066
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1067
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1068
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1069
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1070
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1071
		      int slots_num, bool cmd_queue);
1072
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1073
		       struct iwl_txq *txq, int slots_num,  bool cmd_queue);
1074 1075 1076
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
			   struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1077
void iwl_pcie_apply_destination(struct iwl_trans *trans);
1078 1079
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
			    struct sk_buff *skb);
S
Sara Sharon 已提交
1080 1081 1082
#ifdef CONFIG_INET
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
1083

1084 1085 1086
/* common functions that are used by gen3 transport */
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);

1087 1088 1089 1090
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
				 const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1091 1092
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
				 struct iwl_tx_queue_cfg_cmd *cmd,
1093
				 int cmd_id, int size,
1094 1095
				 unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1096 1097
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
			   struct iwl_device_cmd *dev_cmd, int txq_id);
1098 1099
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
				  struct iwl_host_cmd *cmd);
1100 1101 1102
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
				     bool low_power);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
1103 1104 1105
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1106
#endif /* __iwl_trans_int_pcie_h__ */