internal.h 35.5 KB
Newer Older
1
/******************************************************************************
2 3 4 5 6
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
7
 *
8 9
 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
 * Copyright(c) 2018 Intel Corporation
12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * The full GNU General Public License is included in this distribution in the
23
 * file called COPYING.
24 25
 *
 * Contact Information:
26
 *  Intel Linux Wireless <linuxwifi@intel.com>
27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
 * BSD LICENSE
 *
 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
 * Copyright(c) 2018 Intel Corporation
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
63 64 65 66
 *****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__

E
Emmanuel Grumbach 已提交
67 68 69
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
70
#include <linux/wait.h>
71
#include <linux/pci.h>
72
#include <linux/timer.h>
73
#include <linux/cpu.h>
E
Emmanuel Grumbach 已提交
74

75
#include "iwl-fh.h"
E
Emmanuel Grumbach 已提交
76 77 78 79
#include "iwl-csr.h"
#include "iwl-trans.h"
#include "iwl-debug.h"
#include "iwl-io.h"
80
#include "iwl-op-mode.h"
81
#include "iwl-drv.h"
E
Emmanuel Grumbach 已提交
82

J
Johannes Berg 已提交
83 84 85 86
/* We need 2 entries for the TX command and header, and another one might
 * be needed for potential data in the SKB's head. The remaining ones can
 * be used for frags.
 */
87
#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
J
Johannes Berg 已提交
88

89 90 91 92 93 94
/*
 * RX related structures and functions
 */
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
95
#define RX_PENDING_WATERMARK 16
96
#define FIRST_RX_QUEUE 512
97

E
Emmanuel Grumbach 已提交
98
struct iwl_host_cmd;
99

100 101 102
/*This file includes the declaration that are internal to the
 * trans_pcie layer */

103 104 105 106
/**
 * struct iwl_rx_mem_buffer
 * @page_dma: bus address of rxb page
 * @page: driver's pointer to the rxb page
S
Sara Sharon 已提交
107
 * @invalid: rxb is in driver ownership - not owned by HW
108
 * @vid: index of this rxb in the global table
109
 * @size: size used from the buffer
110
 */
111 112 113
struct iwl_rx_mem_buffer {
	dma_addr_t page_dma;
	struct page *page;
114
	u16 vid;
S
Sara Sharon 已提交
115
	bool invalid;
116
	struct list_head list;
117
	u32 size;
118 119
};

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
/**
 * struct isr_statistics - interrupt statistics
 *
 */
struct isr_statistics {
	u32 hw;
	u32 sw;
	u32 err_code;
	u32 sch;
	u32 alive;
	u32 rfkill;
	u32 ctkill;
	u32 wakeup;
	u32 rx;
	u32 tx;
	u32 unhandled;
};

138 139 140 141
#define IWL_RX_TD_TYPE_MSK	0xff000000
#define IWL_RX_TD_SIZE_MSK	0x00ffffff
#define IWL_RX_TD_SIZE_2K	BIT(11)
#define IWL_RX_TD_TYPE		0
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

/**
 * struct iwl_rx_transfer_desc - transfer descriptor
 * @type_n_size: buffer type (bit 0: external buff valid,
 *	bit 1: optional footer valid, bit 2-7: reserved)
 *	and buffer size
 * @addr: ptr to free buffer start address
 * @rbid: unique tag of the buffer
 * @reserved: reserved
 */
struct iwl_rx_transfer_desc {
	__le32 type_n_size;
	__le64 addr;
	__le16 rbid;
	__le16 reserved;
} __packed;

#define IWL_RX_CD_SIZE		0xffffff00

/**
 * struct iwl_rx_completion_desc - completion descriptor
 * @type: buffer type (bit 0: external buff valid,
 *	bit 1: optional footer valid, bit 2-7: reserved)
 * @status: status of the completion
 * @reserved1: reserved
 * @rbid: unique tag of the received buffer
 * @size: buffer size, masked by IWL_RX_CD_SIZE
 * @reserved2: reserved
 */
struct iwl_rx_completion_desc {
	u8 type;
	u8 status;
	__le16 reserved1;
	__le16 rbid;
	__le32 size;
	u8 reserved2[22];
} __packed;

180
/**
181
 * struct iwl_rxq - Rx queue
182 183 184
 * @id: queue index
 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
 *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
185
 *	In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
186
 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
187 188
 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
189 190 191 192
 * @tr_tail: driver's pointer to the transmission ring tail buffer
 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
 * @cr_tail: driver's pointer to the completion ring tail buffer
 * @cr_tail_dma: physical address of the buffer for the completion ring tail
193 194 195
 * @read: Shared index to newest available Rx buffer
 * @write: Shared index to oldest written Rx packet
 * @free_count: Number of pre-allocated buffers in rx_free
196
 * @used_count: Number of RBDs handled to allocator to use for allocation
197
 * @write_actual:
198 199
 * @rx_free: list of RBDs with allocated RB ready for use
 * @rx_used: list of RBDs with no RB attached
200 201 202 203
 * @need_update: flag to indicate we need to update read/write index
 * @rb_stts: driver's pointer to receive buffer status
 * @rb_stts_dma: bus address of receive buffer status
 * @lock:
204
 * @queue: actual rx queue. Not used for multi-rx queue.
205 206 207
 *
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 */
208
struct iwl_rxq {
209 210
	int id;
	void *bd;
211
	dma_addr_t bd_dma;
212 213 214 215 216
	union {
		void *used_bd;
		__le32 *bd_32;
		struct iwl_rx_completion_desc *cd;
	};
217
	dma_addr_t used_bd_dma;
218 219 220 221
	__le16 *tr_tail;
	dma_addr_t tr_tail_dma;
	__le16 *cr_tail;
	dma_addr_t cr_tail_dma;
222 223 224
	u32 read;
	u32 write;
	u32 free_count;
225
	u32 used_count;
226
	u32 write_actual;
227
	u32 queue_size;
228 229
	struct list_head rx_free;
	struct list_head rx_used;
230
	bool need_update;
231
	void *rb_stts;
232 233
	dma_addr_t rb_stts_dma;
	spinlock_t lock;
234
	struct napi_struct napi;
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};

/**
 * struct iwl_rb_allocator - Rx allocator
 * @req_pending: number of requests the allcator had not processed yet
 * @req_ready: number of requests honored and ready for claiming
 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
 *	the queue. This is a list of &struct iwl_rx_mem_buffer
 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
 *	of &struct iwl_rx_mem_buffer
 * @lock: protects the rbd_allocated and rbd_empty lists
 * @alloc_wq: work queue for background calls
 * @rx_alloc: work struct for background calls
 */
struct iwl_rb_allocator {
	atomic_t req_pending;
	atomic_t req_ready;
	struct list_head rbd_allocated;
	struct list_head rbd_empty;
	spinlock_t lock;
	struct workqueue_struct *alloc_wq;
	struct work_struct rx_alloc;
258 259
};

E
Emmanuel Grumbach 已提交
260 261 262 263 264 265
struct iwl_dma_ptr {
	dma_addr_t dma;
	void *addr;
	size_t size;
};

266 267 268 269
/**
 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
 * @index -- current index
 */
270
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
271
{
272
	return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
273 274
}

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
/**
 * iwl_get_closed_rb_stts - get closed rb stts from different structs
 * @rxq - the rxq to get the rb stts from
 */
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
					    struct iwl_rxq *rxq)
{
	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
		__le16 *rb_stts = rxq->rb_stts;

		return READ_ONCE(*rb_stts);
	} else {
		struct iwl_rb_status *rb_stts = rxq->rb_stts;

		return READ_ONCE(rb_stts->closed_rb_num);
	}
}

293 294 295 296
/**
 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
 * @index -- current index
 */
297
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
298
{
299
	return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
300 301
}

302 303 304
struct iwl_cmd_meta {
	/* only for SYNC commands, iff the reply skb is wanted */
	struct iwl_host_cmd *source;
305
	u32 flags;
306
	u32 tbs;
307 308 309
};


310 311 312
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32

313
/*
314 315 316 317
 * The FH will write back to the first TB only, so we need to copy some data
 * into the buffer regardless of whether it should be mapped or not.
 * This indicates how big the first TB must be to include the scratch buffer
 * and the assigned PN.
318
 * Since PN location is 8 bytes at offset 12, it's 20 now.
319 320
 * If we make it bigger then allocations will be bigger and copy slower, so
 * that's probably not useful.
321
 */
322
#define IWL_FIRST_TB_SIZE	20
323
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
324

325
struct iwl_pcie_txq_entry {
326 327
	struct iwl_device_cmd *cmd;
	struct sk_buff *skb;
328 329
	/* buffer to free after command completes */
	const void *free_buf;
330 331 332
	struct iwl_cmd_meta meta;
};

333 334
struct iwl_pcie_first_tb_buf {
	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
335 336
};

337
/**
338
 * struct iwl_txq - Tx Queue for DMA
339
 * @q: generic Rx/Tx queue descriptor
340
 * @tfds: transmit frame descriptors (DMA memory)
341
 * @first_tb_bufs: start of command headers, including scratch buffers, for
342 343
 *	the writeback -- this is DMA memory and an array holding one buffer
 *	for each command on the queue
344
 * @first_tb_dma: DMA address for the first_tb_bufs start
345 346 347 348
 * @entries: transmit entries (driver state)
 * @lock: queue lock
 * @stuck_timer: timer that fires if queue gets stuck
 * @trans_pcie: pointer back to transport (for timer)
349
 * @need_update: indicates need to update read/write index
350
 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
351
 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
352 353
 * @frozen: tx stuck queue timer is frozen
 * @frozen_expiry_remainder: remember how long until the timer fires
354
 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
355 356 357 358 359 360 361
 * @write_ptr: 1-st empty entry (index) host_w
 * @read_ptr: last used entry (index) host_r
 * @dma_addr:  physical addr for BD's
 * @n_window: safe queue window
 * @id: queue id
 * @low_mark: low watermark, resume queue if free space more than this
 * @high_mark: high watermark, stop queue if free space less than this
362 363 364
 *
 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 * descriptors) and required locking structures.
365 366 367 368 369 370 371 372 373 374 375 376 377
 *
 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
 * there might be HW changes in the future). For the normal TX
 * queues, n_window, which is the size of the software queue data
 * is also 256; however, for the command queue, n_window is only
 * 32 since we don't need so many commands pending. Since the HW
 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
 * This means that we end up with the following:
 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 *  SW entries:           | 0      | ... | 31          |
 * where N is a number between 0 and 7. This means that the SW
 * data is a window overlayed over the HW queue.
378
 */
379
struct iwl_txq {
380
	void *tfds;
381 382
	struct iwl_pcie_first_tb_buf *first_tb_bufs;
	dma_addr_t first_tb_dma;
383
	struct iwl_pcie_txq_entry *entries;
384
	spinlock_t lock;
385
	unsigned long frozen_expiry_remainder;
386 387
	struct timer_list stuck_timer;
	struct iwl_trans_pcie *trans_pcie;
388
	bool need_update;
389
	bool frozen;
390
	bool ampdu;
391
	int block;
392
	unsigned long wd_timeout;
393
	struct sk_buff_head overflow_q;
394
	struct iwl_dma_ptr bc_tbl;
395 396 397 398 399 400 401 402

	int write_ptr;
	int read_ptr;
	dma_addr_t dma_addr;
	int n_window;
	u32 id;
	int low_mark;
	int high_mark;
403 404
};

405
static inline dma_addr_t
406
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
407
{
408 409
	return txq->first_tb_dma +
	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
410 411
}

412 413 414 415 416
struct iwl_tso_hdr_page {
	struct page *page;
	u8 *pos;
};

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
#ifdef CONFIG_IWLWIFI_DEBUGFS
/**
 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
 * debugfs file
 *
 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
 *	set the file can no longer be used.
 */
enum iwl_fw_mon_dbgfs_state {
	IWL_FW_MON_DBGFS_STATE_CLOSED,
	IWL_FW_MON_DBGFS_STATE_OPEN,
	IWL_FW_MON_DBGFS_STATE_DISABLED,
};
#endif

434 435 436 437 438 439 440 441 442 443
/**
 * enum iwl_shared_irq_flags - level of sharing for irq
 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
 */
enum iwl_shared_irq_flags {
	IWL_SHARED_IRQ_NON_RX		= BIT(0),
	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
};

444 445 446 447 448 449 450 451 452 453 454 455
/**
 * enum iwl_image_response_code - image response values
 * @IWL_IMAGE_RESP_DEF: the default value of the register
 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
 * @IWL_IMAGE_RESP_FAIL: iml reading failed
 */
enum iwl_image_response_code {
	IWL_IMAGE_RESP_DEF		= 0,
	IWL_IMAGE_RESP_SUCCESS		= 1,
	IWL_IMAGE_RESP_FAIL		= 2,
};

456 457 458 459 460 461 462 463 464 465 466 467 468 469
/**
 * struct iwl_self_init_dram - dram data used by self init process
 * @fw: lmac and umac dram data
 * @fw_cnt: total number of items in array
 * @paging: paging dram data
 * @paging_cnt: total number of items in array
 */
struct iwl_self_init_dram {
	struct iwl_dram_data *fw;
	int fw_cnt;
	struct iwl_dram_data *paging;
	int paging_cnt;
};

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
/**
 * struct cont_rec: continuous recording data structure
 * @prev_wr_ptr: the last address that was read in monitor_data
 *	debugfs file
 * @prev_wrap_cnt: the wrap count that was used during the last read in
 *	monitor_data debugfs file
 * @state: the state of monitor_data debugfs file as described
 *	in &iwl_fw_mon_dbgfs_state enum
 * @mutex: locked while reading from monitor_data debugfs file
 */
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct cont_rec {
	u32 prev_wr_ptr;
	u32 prev_wrap_cnt;
	u8  state;
	/* Used to sync monitor_data debugfs file with driver unload flow */
	struct mutex mutex;
};
#endif

490 491
/**
 * struct iwl_trans_pcie - PCIe transport specific data
492
 * @rxq: all the RX queue data
493
 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
494
 * @global_table: table mapping received VID from hw to rxb
495
 * @rba: allocator for RX replenishing
496
 * @ctxt_info: context information for FW self init
497 498 499 500 501 502
 * @ctxt_info_gen3: context information for gen3 devices
 * @prph_info: prph info for self init
 * @prph_scratch: prph scratch for self init
 * @ctxt_info_dma_addr: dma addr of context information
 * @prph_info_dma_addr: dma addr of prph info
 * @prph_scratch_dma_addr: dma addr of prph scratch
503 504 505 506 507
 * @ctxt_info_dma_addr: dma addr of context information
 * @init_dram: DRAM data of firmware image (including paging).
 *	Context information addresses will be taken from here.
 *	This is driver's local copy for keeping track of size and
 *	count for allocating and freeing the memory.
508
 * @trans: pointer to the generic transport area
509 510
 * @scd_base_addr: scheduler sram base address in SRAM
 * @scd_bc_tbls: pointer to the byte count table of the scheduler
511
 * @kw: keep warm address
512 513
 * @pci_dev: basic pci-network driver stuff
 * @hw_base: pci hardware address support
514 515
 * @ucode_write_complete: indicates that the ucode has been copied.
 * @ucode_write_waitq: wait queue for uCode load
516
 * @cmd_queue - command queue number
517
 * @def_rx_queue - default rx queue number
518
 * @rx_buf_size: Rx buffer size
519
 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
520
 * @scd_set_active: should the transport configure the SCD for HCMD queue
521 522
 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
 *	frame.
523
 * @rx_page_order: page order for receive buffer size
524
 * @reg_lock: protect hw register access
525
 * @mutex: to protect stop_device / start_fw / start_hw
526
 * @cmd_in_flight: true when we have a host command in flight
527 528 529
#ifdef CONFIG_IWLWIFI_DEBUGFS
 * @fw_mon_data: fw continuous recording data
#endif
530 531
 * @msix_entries: array of MSI-X entries
 * @msix_enabled: true if managed to enable MSI-X
532 533 534 535
 * @shared_vec_mask: the type of causes the shared vector handles
 *	(see iwl_shared_irq_flags).
 * @alloc_vecs: the number of interrupt vectors allocated by the OS
 * @def_irq: default irq for non rx causes
536 537 538 539
 * @fh_init_mask: initial unmasked fh causes
 * @hw_init_mask: initial unmasked hw causes
 * @fh_mask: current unmasked fh causes
 * @hw_mask: current unmasked hw causes
540
 * @in_rescan: true if we have triggered a device rescan
541 542
 */
struct iwl_trans_pcie {
543
	struct iwl_rxq *rxq;
544
	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
545
	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
546
	struct iwl_rb_allocator rba;
547 548 549 550 551 552
	union {
		struct iwl_context_info *ctxt_info;
		struct iwl_context_info_gen3 *ctxt_info_gen3;
	};
	struct iwl_prph_info *prph_info;
	struct iwl_prph_scratch *prph_scratch;
553
	dma_addr_t ctxt_info_dma_addr;
554 555 556
	dma_addr_t prph_info_dma_addr;
	dma_addr_t prph_scratch_dma_addr;
	dma_addr_t iml_dma_addr;
557
	struct iwl_self_init_dram init_dram;
558
	struct iwl_trans *trans;
559

560 561
	struct net_device napi_dev;

562 563
	struct __percpu iwl_tso_hdr_page *tso_hdr_page;

564 565 566 567 568
	/* INT ICT Table */
	__le32 *ict_tbl;
	dma_addr_t ict_tbl_dma;
	int ict_index;
	bool use_ict;
569
	bool is_down, opmode_down;
570
	bool debug_rfkill;
571
	struct isr_statistics isr_stats;
572

J
Johannes Berg 已提交
573
	spinlock_t irq_lock;
574
	struct mutex mutex;
575
	u32 inta_mask;
576 577
	u32 scd_base_addr;
	struct iwl_dma_ptr scd_bc_tbls;
578
	struct iwl_dma_ptr kw;
579

580
	struct iwl_txq *txq_memory;
S
Sara Sharon 已提交
581 582 583
	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
584 585 586 587

	/* PCI bus related data */
	struct pci_dev *pci_dev;
	void __iomem *hw_base;
588 589 590

	bool ucode_write_complete;
	wait_queue_head_t ucode_write_waitq;
591
	wait_queue_head_t wait_command_queue;
592
	wait_queue_head_t d0i3_waitq;
593

594 595
	u8 page_offs, dev_cmd_offs;

596
	u8 cmd_queue;
597
	u8 def_rx_queue;
598
	u8 cmd_fifo;
599
	unsigned int cmd_q_wdg_timeout;
600 601
	u8 n_no_reclaim_cmds;
	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
602
	u8 max_tbs;
603
	u16 tfd_size;
604

605
	enum iwl_amsdu_size rx_buf_size;
606
	bool bc_table_dword;
607
	bool scd_set_active;
608
	bool sw_csum_tx;
609
	bool pcie_dbg_dumped_once;
610
	u32 rx_page_order;
611

612 613
	/*protect hw register */
	spinlock_t reg_lock;
614
	bool cmd_hold_nic_awake;
615 616
	bool ref_cmd_in_flight;

617 618 619 620
#ifdef CONFIG_IWLWIFI_DEBUGFS
	struct cont_rec fw_mon_data;
#endif

621 622
	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
	bool msix_enabled;
623 624 625
	u8 shared_vec_mask;
	u32 alloc_vecs;
	u32 def_irq;
626 627 628 629
	u32 fh_init_mask;
	u32 hw_init_mask;
	u32 fh_mask;
	u32 hw_mask;
630
	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
631 632
	u16 tx_cmd_queue_size;
	bool in_rescan;
633 634
};

635 636 637 638 639
static inline struct iwl_trans_pcie *
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
{
	return (void *)trans->trans_specific;
}
640

641 642 643 644 645 646 647 648 649 650 651 652 653 654
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
				      struct msix_entry *entry)
{
	/*
	 * Before sending the interrupt the HW disables it to prevent
	 * a nested interrupt. This is done by writing 1 to the corresponding
	 * bit in the mask register. After handling the interrupt, it should be
	 * re-enabled by clearing this bit. This register is defined as
	 * write 1 clear (W1C) register, meaning that it's being clear
	 * by writing 1 to the bit.
	 */
	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}

655 656 657 658 659 660 661
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
	return container_of((void *)trans_pcie, struct iwl_trans,
			    trans_specific);
}

662 663 664 665
/*
 * Convention: trans API functions: iwl_trans_pcie_XXX
 *	Other functions: iwl_pcie_XXX
 */
666 667 668 669 670
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg);
void iwl_trans_pcie_free(struct iwl_trans *trans);

671 672 673
/*****************************************************
* RX
******************************************************/
674
int _iwl_pcie_rx_init(struct iwl_trans *trans);
675
int iwl_pcie_rx_init(struct iwl_trans *trans);
676
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
677
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
678
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
679 680
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
681 682
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
683 684 685 686 687
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
			    struct iwl_rxq *rxq);
688
int iwl_pcie_rx_alloc(struct iwl_trans *trans);
689

690
/*****************************************************
691
* ICT - interrupt handling
692
******************************************************/
693
irqreturn_t iwl_pcie_isr(int irq, void *data);
694 695 696 697
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
void iwl_pcie_disable_ict(struct iwl_trans *trans);
698

699 700 701
/*****************************************************
* TX / HCMD
******************************************************/
702
int iwl_pcie_tx_init(struct iwl_trans *trans);
703 704
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
			  int queue_size);
705 706 707
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
708
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
709 710
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout);
711 712
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
				bool configure_scd);
713 714
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
					bool shared_mode);
715 716
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
				  struct iwl_txq *txq);
717 718
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id);
719
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
720
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
721 722 723
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
				  struct iwl_txq *txq);
724
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
725
			    struct iwl_rx_cmd_buffer *rxb);
726 727
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs);
728
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
729 730 731
void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
				   struct iwl_txq *txq, u16 byte_cnt,
				   int num_tbs);
732

733
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
734
					  u8 idx)
735
{
736
	if (trans->cfg->use_tfh) {
737 738
		struct iwl_tfh_tfd *tfd = _tfd;
		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
739 740

		return le16_to_cpu(tb->tb_len);
741 742 743
	} else {
		struct iwl_tfd *tfd = _tfd;
		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
744

745 746
		return le16_to_cpu(tb->hi_n_len) >> 4;
	}
747 748
}

749 750 751
/*****************************************************
* Error handling
******************************************************/
752
void iwl_pcie_dump_csr(struct iwl_trans *trans);
753

754 755 756
/*****************************************************
* Helpers
******************************************************/
757
static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
758
{
759
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
760

761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
	clear_bit(STATUS_INT_ENABLED, &trans->status);
	if (!trans_pcie->msix_enabled) {
		/* disable interrupts from uCode/NIC to host */
		iwl_write32(trans, CSR_INT_MASK, 0x00000000);

		/* acknowledge/clear/reset any interrupts still pending
		 * from uCode or flow handler (Rx/Tx DMA) */
		iwl_write32(trans, CSR_INT, 0xffffffff);
		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
	} else {
		/* disable all the interrupt we might use */
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
	}
777 778
	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832

#define IWL_NUM_OF_COMPLETION_RINGS	31
#define IWL_NUM_OF_TRANSFER_RINGS	527

static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
					    int start)
{
	int i = 0;

	while (start < fw->num_sec &&
	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
		start++;
		i++;
	}

	return i;
}

static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
					       const struct fw_desc *sec,
					       struct iwl_dram_data *dram)
{
	dram->block = dma_alloc_coherent(trans->dev, sec->len,
					 &dram->physical,
					 GFP_KERNEL);
	if (!dram->block)
		return -ENOMEM;

	dram->size = sec->len;
	memcpy(dram->block, sec->data, sec->len);

	return 0;
}

static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
	int i;

	if (!dram->fw) {
		WARN_ON(dram->fw_cnt);
		return;
	}

	for (i = 0; i < dram->fw_cnt; i++)
		dma_free_coherent(trans->dev, dram->fw[i].size,
				  dram->fw[i].block, dram->fw[i].physical);

	kfree(dram->fw);
	dram->fw_cnt = 0;
	dram->fw = NULL;
}
833

834 835 836 837 838 839 840 841 842 843
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_disable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}

static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
844
{
D
Don Fry 已提交
845
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
846 847

	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
848
	set_bit(STATUS_INT_ENABLED, &trans->status);
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INI_SET_MASK;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		/*
		 * fh/hw_mask keeps all the unmasked causes.
		 * Unlike msi, in msix cause is enabled when it is unset.
		 */
		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    ~trans_pcie->fh_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    ~trans_pcie->hw_mask);
	}
}

866 867 868 869 870 871 872 873
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_enable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}
874 875 876 877 878 879 880 881 882 883 884 885 886 887
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
	trans_pcie->hw_mask = msk;
}

static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
	trans_pcie->fh_mask = msk;
888 889
}

890 891 892 893 894
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
895 896 897 898 899 900 901 902 903
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
		iwl_enable_fh_int_msk_msix(trans,
					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
	}
904 905
}

906
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
907 908 909 910
{
	return index & (q->n_window - 1);
}

911
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
912 913
				     struct iwl_txq *txq, int idx)
{
914 915 916 917 918 919
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (trans->cfg->use_tfh)
		idx = iwl_pcie_get_cmd_index(txq, idx);

	return txq->tfds + trans_pcie->tfd_size * idx;
920 921
}

922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
static inline const char *queue_name(struct device *dev,
				     struct iwl_trans_pcie *trans_p, int i)
{
	if (trans_p->shared_vec_mask) {
		int vec = trans_p->shared_vec_mask &
			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;

		if (i == 0)
			return DRV_NAME ": shared IRQ";

		return devm_kasprintf(dev, GFP_KERNEL,
				      DRV_NAME ": queue %d", i + vec);
	}
	if (i == 0)
		return DRV_NAME ": default queue";

	if (i == trans_p->alloc_vecs - 1)
		return DRV_NAME ": exception";

	return devm_kasprintf(dev, GFP_KERNEL,
			      DRV_NAME  ": queue %d", i);
}

945 946
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
947 948
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

949
	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
950 951 952 953 954 955 956 957 958
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_enable_hw_int_msk_msix(trans,
					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
	}
959 960 961 962 963 964 965 966 967 968

	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
		/*
		 * On 9000-series devices this bit isn't enabled by default, so
		 * when we power down the device we need set the bit to allow it
		 * to wake up the PCI-E bus for RF-kill interrupts.
		 */
		iwl_set_bit(trans, CSR_GP_CNTRL,
			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
	}
969 970
}

971 972
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);

973
static inline void iwl_wake_queue(struct iwl_trans *trans,
974
				  struct iwl_txq *txq)
975
{
976 977
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

978 979 980
	if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
981
	}
982 983 984
}

static inline void iwl_stop_queue(struct iwl_trans *trans,
985
				  struct iwl_txq *txq)
986
{
987
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
988

989 990 991
	if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
		iwl_op_mode_queue_full(trans->op_mode, txq->id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
992 993
	} else
		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
994
				    txq->id);
995 996
}

997
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
998
{
999 1000 1001 1002 1003 1004 1005
	int index = iwl_pcie_get_cmd_index(q, i);
	int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
	int w = iwl_pcie_get_cmd_index(q, q->write_ptr);

	return w >= r ?
		(index >= r && index < w) :
		!(index < r && index >= w);
1006 1007
}

1008 1009
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
1010 1011 1012 1013 1014 1015
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->mutex);

	if (trans_pcie->debug_rfkill)
		return true;
1016

1017 1018 1019 1020
	return !(iwl_read32(trans, CSR_GP_CNTRL) &
		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
						  u32 reg, u32 mask, u32 value)
{
	u32 v;

#ifdef CONFIG_IWLWIFI_DEBUG
	WARN_ON_ONCE(value & ~mask);
#endif

	v = iwl_read32(trans, reg);
	v &= ~mask;
	v |= value;
	iwl_write32(trans, reg, v);
}

static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
					      u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
}

static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
					    u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}

1048 1049 1050 1051 1052
static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
{
	return (trans->dbg_dest_tlv || trans->ini_valid);
}

1053
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1054
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
1055

1056 1057 1058 1059 1060 1061 1062 1063 1064
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
#else
static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
{
	return 0;
}
#endif

1065 1066 1067
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);

1068 1069
void iwl_pcie_rx_allocator_work(struct work_struct *data);

1070
/* common functions that are used by gen2 transport */
1071
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1072 1073 1074
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1075
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1076 1077
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
				       bool was_in_rfkill);
1078
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1079
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1080
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1081
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1082
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1083
		      int slots_num, bool cmd_queue);
1084
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1085
		       struct iwl_txq *txq, int slots_num,  bool cmd_queue);
1086 1087 1088
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
			   struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1089
void iwl_pcie_apply_destination(struct iwl_trans *trans);
1090 1091
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
			    struct sk_buff *skb);
S
Sara Sharon 已提交
1092 1093 1094
#ifdef CONFIG_INET
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
1095

1096 1097 1098
/* common functions that are used by gen3 transport */
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);

1099 1100 1101 1102
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
				 const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1103 1104 1105 1106 1107 1108 1109 1110
void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
				   struct iwl_txq *txq);
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
				     struct iwl_txq **intxq, int size,
				     unsigned int timeout);
int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
				      struct iwl_txq *txq,
				      struct iwl_host_cmd *hcmd);
1111
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1112
				 __le16 flags, u8 sta_id, u8 tid,
1113
				 int cmd_id, int size,
1114 1115
				 unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1116 1117
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
			   struct iwl_device_cmd *dev_cmd, int txq_id);
1118 1119
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
				  struct iwl_host_cmd *cmd);
1120 1121 1122
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
				     bool low_power);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
1123 1124 1125
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1126
#endif /* __iwl_trans_int_pcie_h__ */