internal.h 32.2 KB
Newer Older
1 2
/******************************************************************************
 *
3 4
 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6
 * Copyright(c) 2018 Intel Corporation
7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
21
 * this program.
22 23 24 25 26
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30 31 32 33
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__

E
Emmanuel Grumbach 已提交
34 35 36
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
37
#include <linux/wait.h>
38
#include <linux/pci.h>
39
#include <linux/timer.h>
40
#include <linux/cpu.h>
E
Emmanuel Grumbach 已提交
41

42
#include "iwl-fh.h"
E
Emmanuel Grumbach 已提交
43 44 45 46
#include "iwl-csr.h"
#include "iwl-trans.h"
#include "iwl-debug.h"
#include "iwl-io.h"
47
#include "iwl-op-mode.h"
E
Emmanuel Grumbach 已提交
48

J
Johannes Berg 已提交
49 50 51 52
/* We need 2 entries for the TX command and header, and another one might
 * be needed for potential data in the SKB's head. The remaining ones can
 * be used for frags.
 */
53
#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
J
Johannes Berg 已提交
54

55 56 57 58 59 60
/*
 * RX related structures and functions
 */
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
61
#define RX_PENDING_WATERMARK 16
62
#define FIRST_RX_QUEUE 512
63

E
Emmanuel Grumbach 已提交
64
struct iwl_host_cmd;
65

66 67 68
/*This file includes the declaration that are internal to the
 * trans_pcie layer */

69 70 71 72
/**
 * struct iwl_rx_mem_buffer
 * @page_dma: bus address of rxb page
 * @page: driver's pointer to the rxb page
S
Sara Sharon 已提交
73
 * @invalid: rxb is in driver ownership - not owned by HW
74 75
 * @vid: index of this rxb in the global table
 */
76 77 78
struct iwl_rx_mem_buffer {
	dma_addr_t page_dma;
	struct page *page;
79
	u16 vid;
S
Sara Sharon 已提交
80
	bool invalid;
81 82 83
	struct list_head list;
};

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/**
 * struct isr_statistics - interrupt statistics
 *
 */
struct isr_statistics {
	u32 hw;
	u32 sw;
	u32 err_code;
	u32 sch;
	u32 alive;
	u32 rfkill;
	u32 ctkill;
	u32 wakeup;
	u32 rx;
	u32 tx;
	u32 unhandled;
};

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
#define IWL_CD_STTS_OPTIMIZED_POS	0
#define IWL_CD_STTS_OPTIMIZED_MSK	0x01
#define IWL_CD_STTS_TRANSFER_STATUS_POS	1
#define IWL_CD_STTS_TRANSFER_STATUS_MSK	0x0E
#define IWL_CD_STTS_WIFI_STATUS_POS	4
#define IWL_CD_STTS_WIFI_STATUS_MSK	0xF0

/**
 * enum iwl_completion_desc_transfer_status -  transfer status (bits 1-3)
 * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
 *	In sniffer mode, when split is used, set in last CD completion. (RX)
 * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
 *	all CD completion. (RX)
 * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
 */
enum iwl_completion_desc_transfer_status {
	IWL_CD_STTS_UNUSED,
	IWL_CD_STTS_UNUSED_2,
	IWL_CD_STTS_END_TRANSFER,
	IWL_CD_STTS_OVERFLOW,
	IWL_CD_STTS_ABORTED,
	IWL_CD_STTS_ERROR,
};

/**
 * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
 * @IWL_CD_STTS_VALID: the packet is valid (RX)
 * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
 * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
 * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
 * @IWL_CD_STTS_DUP: duplicate packet (RX)
 * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
 * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
 * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
 * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
 * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
 * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
 * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
 * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
 * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
 * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
 */
enum iwl_completion_desc_wifi_status {
	IWL_CD_STTS_VALID,
	IWL_CD_STTS_FCS_ERR,
	IWL_CD_STTS_SEC_KEY_ERR,
	IWL_CD_STTS_DECRYPTION_ERR,
	IWL_CD_STTS_DUP,
	IWL_CD_STTS_ICV_MIC_ERR,
	IWL_CD_STTS_INTERNAL_SNAP_ERR,
	IWL_CD_STTS_SEC_PORT_FAIL,
	IWL_CD_STTS_BA_OLD_SN,
	IWL_CD_STTS_QOS_NULL,
	IWL_CD_STTS_MAC_HDR_ERR,
	IWL_CD_STTS_MAX_RETRANS,
	IWL_CD_STTS_EX_LIFETIME,
	IWL_CD_STTS_NOT_USED,
	IWL_CD_STTS_REPLAY_ERR,
};

#define IWL_RX_TD_TYPE		0xff000000
#define IWL_RX_TD_SIZE		0x00ffffff

/**
 * struct iwl_rx_transfer_desc - transfer descriptor
 * @type_n_size: buffer type (bit 0: external buff valid,
 *	bit 1: optional footer valid, bit 2-7: reserved)
 *	and buffer size
 * @addr: ptr to free buffer start address
 * @rbid: unique tag of the buffer
 * @reserved: reserved
 */
struct iwl_rx_transfer_desc {
	__le32 type_n_size;
	__le64 addr;
	__le16 rbid;
	__le16 reserved;
} __packed;

#define IWL_RX_CD_SIZE		0xffffff00

/**
 * struct iwl_rx_completion_desc - completion descriptor
 * @type: buffer type (bit 0: external buff valid,
 *	bit 1: optional footer valid, bit 2-7: reserved)
 * @status: status of the completion
 * @reserved1: reserved
 * @rbid: unique tag of the received buffer
 * @size: buffer size, masked by IWL_RX_CD_SIZE
 * @reserved2: reserved
 */
struct iwl_rx_completion_desc {
	u8 type;
	u8 status;
	__le16 reserved1;
	__le16 rbid;
	__le32 size;
	u8 reserved2[22];
} __packed;

202
/**
203
 * struct iwl_rxq - Rx queue
204 205 206
 * @id: queue index
 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
 *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
207
 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
208 209
 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
210 211 212 213
 * @tr_tail: driver's pointer to the transmission ring tail buffer
 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
 * @cr_tail: driver's pointer to the completion ring tail buffer
 * @cr_tail_dma: physical address of the buffer for the completion ring tail
214 215 216
 * @read: Shared index to newest available Rx buffer
 * @write: Shared index to oldest written Rx packet
 * @free_count: Number of pre-allocated buffers in rx_free
217
 * @used_count: Number of RBDs handled to allocator to use for allocation
218
 * @write_actual:
219 220
 * @rx_free: list of RBDs with allocated RB ready for use
 * @rx_used: list of RBDs with no RB attached
221 222 223 224
 * @need_update: flag to indicate we need to update read/write index
 * @rb_stts: driver's pointer to receive buffer status
 * @rb_stts_dma: bus address of receive buffer status
 * @lock:
225
 * @queue: actual rx queue. Not used for multi-rx queue.
226 227 228
 *
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 */
229
struct iwl_rxq {
230 231
	int id;
	void *bd;
232
	dma_addr_t bd_dma;
233 234
	__le32 *used_bd;
	dma_addr_t used_bd_dma;
235 236 237 238
	__le16 *tr_tail;
	dma_addr_t tr_tail_dma;
	__le16 *cr_tail;
	dma_addr_t cr_tail_dma;
239 240 241
	u32 read;
	u32 write;
	u32 free_count;
242
	u32 used_count;
243
	u32 write_actual;
244
	u32 queue_size;
245 246
	struct list_head rx_free;
	struct list_head rx_used;
247
	bool need_update;
248 249 250
	struct iwl_rb_status *rb_stts;
	dma_addr_t rb_stts_dma;
	spinlock_t lock;
251
	struct napi_struct napi;
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};

/**
 * struct iwl_rb_allocator - Rx allocator
 * @req_pending: number of requests the allcator had not processed yet
 * @req_ready: number of requests honored and ready for claiming
 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
 *	the queue. This is a list of &struct iwl_rx_mem_buffer
 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
 *	of &struct iwl_rx_mem_buffer
 * @lock: protects the rbd_allocated and rbd_empty lists
 * @alloc_wq: work queue for background calls
 * @rx_alloc: work struct for background calls
 */
struct iwl_rb_allocator {
	atomic_t req_pending;
	atomic_t req_ready;
	struct list_head rbd_allocated;
	struct list_head rbd_empty;
	spinlock_t lock;
	struct workqueue_struct *alloc_wq;
	struct work_struct rx_alloc;
275 276
};

E
Emmanuel Grumbach 已提交
277 278 279 280 281 282
struct iwl_dma_ptr {
	dma_addr_t dma;
	void *addr;
	size_t size;
};

283 284 285 286
/**
 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
 * @index -- current index
 */
287
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
288
{
289
	return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
290 291 292 293 294 295
}

/**
 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
 * @index -- current index
 */
296
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
297
{
298
	return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
299 300
}

301 302 303
struct iwl_cmd_meta {
	/* only for SYNC commands, iff the reply skb is wanted */
	struct iwl_host_cmd *source;
304
	u32 flags;
305
	u32 tbs;
306 307 308
};


309 310 311
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32

312
/*
313 314 315 316
 * The FH will write back to the first TB only, so we need to copy some data
 * into the buffer regardless of whether it should be mapped or not.
 * This indicates how big the first TB must be to include the scratch buffer
 * and the assigned PN.
317
 * Since PN location is 8 bytes at offset 12, it's 20 now.
318 319
 * If we make it bigger then allocations will be bigger and copy slower, so
 * that's probably not useful.
320
 */
321
#define IWL_FIRST_TB_SIZE	20
322
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
323

324
struct iwl_pcie_txq_entry {
325 326
	struct iwl_device_cmd *cmd;
	struct sk_buff *skb;
327 328
	/* buffer to free after command completes */
	const void *free_buf;
329 330 331
	struct iwl_cmd_meta meta;
};

332 333
struct iwl_pcie_first_tb_buf {
	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
334 335
};

336
/**
337
 * struct iwl_txq - Tx Queue for DMA
338
 * @q: generic Rx/Tx queue descriptor
339
 * @tfds: transmit frame descriptors (DMA memory)
340
 * @first_tb_bufs: start of command headers, including scratch buffers, for
341 342
 *	the writeback -- this is DMA memory and an array holding one buffer
 *	for each command on the queue
343
 * @first_tb_dma: DMA address for the first_tb_bufs start
344 345 346 347
 * @entries: transmit entries (driver state)
 * @lock: queue lock
 * @stuck_timer: timer that fires if queue gets stuck
 * @trans_pcie: pointer back to transport (for timer)
348
 * @need_update: indicates need to update read/write index
349
 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
350
 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
351 352
 * @frozen: tx stuck queue timer is frozen
 * @frozen_expiry_remainder: remember how long until the timer fires
353
 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
354 355 356 357 358 359 360
 * @write_ptr: 1-st empty entry (index) host_w
 * @read_ptr: last used entry (index) host_r
 * @dma_addr:  physical addr for BD's
 * @n_window: safe queue window
 * @id: queue id
 * @low_mark: low watermark, resume queue if free space more than this
 * @high_mark: high watermark, stop queue if free space less than this
361 362 363
 *
 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 * descriptors) and required locking structures.
364 365 366 367 368 369 370 371 372 373 374 375 376
 *
 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
 * there might be HW changes in the future). For the normal TX
 * queues, n_window, which is the size of the software queue data
 * is also 256; however, for the command queue, n_window is only
 * 32 since we don't need so many commands pending. Since the HW
 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
 * This means that we end up with the following:
 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 *  SW entries:           | 0      | ... | 31          |
 * where N is a number between 0 and 7. This means that the SW
 * data is a window overlayed over the HW queue.
377
 */
378
struct iwl_txq {
379
	void *tfds;
380 381
	struct iwl_pcie_first_tb_buf *first_tb_bufs;
	dma_addr_t first_tb_dma;
382
	struct iwl_pcie_txq_entry *entries;
383
	spinlock_t lock;
384
	unsigned long frozen_expiry_remainder;
385 386
	struct timer_list stuck_timer;
	struct iwl_trans_pcie *trans_pcie;
387
	bool need_update;
388
	bool frozen;
389
	bool ampdu;
390
	int block;
391
	unsigned long wd_timeout;
392
	struct sk_buff_head overflow_q;
393
	struct iwl_dma_ptr bc_tbl;
394 395 396 397 398 399 400 401

	int write_ptr;
	int read_ptr;
	dma_addr_t dma_addr;
	int n_window;
	u32 id;
	int low_mark;
	int high_mark;
402 403
};

404
static inline dma_addr_t
405
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
406
{
407 408
	return txq->first_tb_dma +
	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
409 410
}

411 412 413 414 415
struct iwl_tso_hdr_page {
	struct page *page;
	u8 *pos;
};

416 417 418 419 420 421 422 423 424 425
/**
 * enum iwl_shared_irq_flags - level of sharing for irq
 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
 */
enum iwl_shared_irq_flags {
	IWL_SHARED_IRQ_NON_RX		= BIT(0),
	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
};

426 427 428 429 430 431 432 433 434 435 436 437
/**
 * enum iwl_image_response_code - image response values
 * @IWL_IMAGE_RESP_DEF: the default value of the register
 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
 * @IWL_IMAGE_RESP_FAIL: iml reading failed
 */
enum iwl_image_response_code {
	IWL_IMAGE_RESP_DEF		= 0,
	IWL_IMAGE_RESP_SUCCESS		= 1,
	IWL_IMAGE_RESP_FAIL		= 2,
};

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/**
 * struct iwl_dram_data
 * @physical: page phy pointer
 * @block: pointer to the allocated block/page
 * @size: size of the block/page
 */
struct iwl_dram_data {
	dma_addr_t physical;
	void *block;
	int size;
};

/**
 * struct iwl_self_init_dram - dram data used by self init process
 * @fw: lmac and umac dram data
 * @fw_cnt: total number of items in array
 * @paging: paging dram data
 * @paging_cnt: total number of items in array
 */
struct iwl_self_init_dram {
	struct iwl_dram_data *fw;
	int fw_cnt;
	struct iwl_dram_data *paging;
	int paging_cnt;
};

464 465
/**
 * struct iwl_trans_pcie - PCIe transport specific data
466
 * @rxq: all the RX queue data
467
 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
468
 * @global_table: table mapping received VID from hw to rxb
469
 * @rba: allocator for RX replenishing
470
 * @ctxt_info: context information for FW self init
471 472 473 474 475 476
 * @ctxt_info_gen3: context information for gen3 devices
 * @prph_info: prph info for self init
 * @prph_scratch: prph scratch for self init
 * @ctxt_info_dma_addr: dma addr of context information
 * @prph_info_dma_addr: dma addr of prph info
 * @prph_scratch_dma_addr: dma addr of prph scratch
477 478 479 480 481
 * @ctxt_info_dma_addr: dma addr of context information
 * @init_dram: DRAM data of firmware image (including paging).
 *	Context information addresses will be taken from here.
 *	This is driver's local copy for keeping track of size and
 *	count for allocating and freeing the memory.
482
 * @trans: pointer to the generic transport area
483 484
 * @scd_base_addr: scheduler sram base address in SRAM
 * @scd_bc_tbls: pointer to the byte count table of the scheduler
485
 * @kw: keep warm address
486 487
 * @pci_dev: basic pci-network driver stuff
 * @hw_base: pci hardware address support
488 489
 * @ucode_write_complete: indicates that the ucode has been copied.
 * @ucode_write_waitq: wait queue for uCode load
490
 * @cmd_queue - command queue number
491
 * @rx_buf_size: Rx buffer size
492
 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
493
 * @scd_set_active: should the transport configure the SCD for HCMD queue
494 495
 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
 *	frame.
496
 * @rx_page_order: page order for receive buffer size
497
 * @reg_lock: protect hw register access
498
 * @mutex: to protect stop_device / start_fw / start_hw
499
 * @cmd_in_flight: true when we have a host command in flight
500 501 502
 * @fw_mon_phys: physical address of the buffer for the firmware monitor
 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
 * @fw_mon_size: size of the buffer for the firmware monitor
503 504
 * @msix_entries: array of MSI-X entries
 * @msix_enabled: true if managed to enable MSI-X
505 506 507 508
 * @shared_vec_mask: the type of causes the shared vector handles
 *	(see iwl_shared_irq_flags).
 * @alloc_vecs: the number of interrupt vectors allocated by the OS
 * @def_irq: default irq for non rx causes
509 510 511 512
 * @fh_init_mask: initial unmasked fh causes
 * @hw_init_mask: initial unmasked hw causes
 * @fh_mask: current unmasked fh causes
 * @hw_mask: current unmasked hw causes
513 514
 * @in_rescan: true if we have triggered a device rescan
 * @scheduled_for_removal: true if we have scheduled a device removal
515 516
 */
struct iwl_trans_pcie {
517
	struct iwl_rxq *rxq;
518
	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
519
	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
520
	struct iwl_rb_allocator rba;
521 522 523 524 525 526
	union {
		struct iwl_context_info *ctxt_info;
		struct iwl_context_info_gen3 *ctxt_info_gen3;
	};
	struct iwl_prph_info *prph_info;
	struct iwl_prph_scratch *prph_scratch;
527
	dma_addr_t ctxt_info_dma_addr;
528 529 530
	dma_addr_t prph_info_dma_addr;
	dma_addr_t prph_scratch_dma_addr;
	dma_addr_t iml_dma_addr;
531
	struct iwl_self_init_dram init_dram;
532
	struct iwl_trans *trans;
533

534 535
	struct net_device napi_dev;

536 537
	struct __percpu iwl_tso_hdr_page *tso_hdr_page;

538 539 540 541 542
	/* INT ICT Table */
	__le32 *ict_tbl;
	dma_addr_t ict_tbl_dma;
	int ict_index;
	bool use_ict;
543
	bool is_down, opmode_down;
544
	bool debug_rfkill;
545
	struct isr_statistics isr_stats;
546

J
Johannes Berg 已提交
547
	spinlock_t irq_lock;
548
	struct mutex mutex;
549
	u32 inta_mask;
550 551
	u32 scd_base_addr;
	struct iwl_dma_ptr scd_bc_tbls;
552
	struct iwl_dma_ptr kw;
553

554
	struct iwl_txq *txq_memory;
S
Sara Sharon 已提交
555 556 557
	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
558 559 560 561

	/* PCI bus related data */
	struct pci_dev *pci_dev;
	void __iomem *hw_base;
562 563 564

	bool ucode_write_complete;
	wait_queue_head_t ucode_write_waitq;
565
	wait_queue_head_t wait_command_queue;
566
	wait_queue_head_t d0i3_waitq;
567

568 569
	u8 page_offs, dev_cmd_offs;

570
	u8 cmd_queue;
571
	u8 cmd_fifo;
572
	unsigned int cmd_q_wdg_timeout;
573 574
	u8 n_no_reclaim_cmds;
	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
575
	u8 max_tbs;
576
	u16 tfd_size;
577

578
	enum iwl_amsdu_size rx_buf_size;
579
	bool bc_table_dword;
580
	bool scd_set_active;
581
	bool sw_csum_tx;
582
	bool pcie_dbg_dumped_once;
583
	u32 rx_page_order;
584

585 586
	/*protect hw register */
	spinlock_t reg_lock;
587
	bool cmd_hold_nic_awake;
588 589
	bool ref_cmd_in_flight;

590 591 592
	dma_addr_t fw_mon_phys;
	struct page *fw_mon_page;
	u32 fw_mon_size;
593 594 595

	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
	bool msix_enabled;
596 597 598
	u8 shared_vec_mask;
	u32 alloc_vecs;
	u32 def_irq;
599 600 601 602
	u32 fh_init_mask;
	u32 hw_init_mask;
	u32 fh_mask;
	u32 hw_mask;
603
	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
604 605 606
	u16 tx_cmd_queue_size;
	bool in_rescan;
	bool scheduled_for_removal;
607 608
};

609 610 611 612 613
static inline struct iwl_trans_pcie *
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
{
	return (void *)trans->trans_specific;
}
614

615 616 617 618 619 620 621
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
	return container_of((void *)trans_pcie, struct iwl_trans,
			    trans_specific);
}

622 623 624 625
/*
 * Convention: trans API functions: iwl_trans_pcie_XXX
 *	Other functions: iwl_pcie_XXX
 */
626 627 628 629 630
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg);
void iwl_trans_pcie_free(struct iwl_trans *trans);

631 632 633
/*****************************************************
* RX
******************************************************/
634
int iwl_pcie_rx_init(struct iwl_trans *trans);
635
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
636
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
637
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
638 639
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
640 641
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
642

643
/*****************************************************
644
* ICT - interrupt handling
645
******************************************************/
646
irqreturn_t iwl_pcie_isr(int irq, void *data);
647 648 649 650
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
void iwl_pcie_disable_ict(struct iwl_trans *trans);
651

652 653 654
/*****************************************************
* TX / HCMD
******************************************************/
655
int iwl_pcie_tx_init(struct iwl_trans *trans);
656
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
657 658 659
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
660
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
661 662
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout);
663 664
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
				bool configure_scd);
665 666
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
					bool shared_mode);
667 668
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
				  struct iwl_txq *txq);
669 670
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id);
671
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
672
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
673
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
674
			    struct iwl_rx_cmd_buffer *rxb);
675 676
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs);
677 678
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);

679
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
680
					  u8 idx)
681
{
682
	if (trans->cfg->use_tfh) {
683 684
		struct iwl_tfh_tfd *tfd = _tfd;
		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
685 686

		return le16_to_cpu(tb->tb_len);
687 688 689
	} else {
		struct iwl_tfd *tfd = _tfd;
		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
690

691 692
		return le16_to_cpu(tb->hi_n_len) >> 4;
	}
693 694
}

695 696 697
/*****************************************************
* Error handling
******************************************************/
698
void iwl_pcie_dump_csr(struct iwl_trans *trans);
699

700 701 702
/*****************************************************
* Helpers
******************************************************/
703
static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
704
{
705
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
706

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	clear_bit(STATUS_INT_ENABLED, &trans->status);
	if (!trans_pcie->msix_enabled) {
		/* disable interrupts from uCode/NIC to host */
		iwl_write32(trans, CSR_INT_MASK, 0x00000000);

		/* acknowledge/clear/reset any interrupts still pending
		 * from uCode or flow handler (Rx/Tx DMA) */
		iwl_write32(trans, CSR_INT, 0xffffffff);
		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
	} else {
		/* disable all the interrupt we might use */
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
	}
723 724
	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

#define IWL_NUM_OF_COMPLETION_RINGS	31
#define IWL_NUM_OF_TRANSFER_RINGS	527

static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
					    int start)
{
	int i = 0;

	while (start < fw->num_sec &&
	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
		start++;
		i++;
	}

	return i;
}

static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
					       const struct fw_desc *sec,
					       struct iwl_dram_data *dram)
{
	dram->block = dma_alloc_coherent(trans->dev, sec->len,
					 &dram->physical,
					 GFP_KERNEL);
	if (!dram->block)
		return -ENOMEM;

	dram->size = sec->len;
	memcpy(dram->block, sec->data, sec->len);

	return 0;
}

static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
	int i;

	if (!dram->fw) {
		WARN_ON(dram->fw_cnt);
		return;
	}

	for (i = 0; i < dram->fw_cnt; i++)
		dma_free_coherent(trans->dev, dram->fw[i].size,
				  dram->fw[i].block, dram->fw[i].physical);

	kfree(dram->fw);
	dram->fw_cnt = 0;
	dram->fw = NULL;
}
779

780 781 782 783 784 785 786 787 788 789
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_disable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}

static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
790
{
D
Don Fry 已提交
791
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
792 793

	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
794
	set_bit(STATUS_INT_ENABLED, &trans->status);
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INI_SET_MASK;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		/*
		 * fh/hw_mask keeps all the unmasked causes.
		 * Unlike msi, in msix cause is enabled when it is unset.
		 */
		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    ~trans_pcie->fh_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    ~trans_pcie->hw_mask);
	}
}

812 813 814 815 816 817 818 819
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_enable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}
820 821 822 823 824 825 826 827 828 829 830 831 832 833
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
	trans_pcie->hw_mask = msk;
}

static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
	trans_pcie->fh_mask = msk;
834 835
}

836 837 838 839 840
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
841 842 843 844 845 846 847 848 849
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
		iwl_enable_fh_int_msk_msix(trans,
					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
	}
850 851
}

852
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
853 854 855 856
{
	return index & (q->n_window - 1);
}

857
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
858 859
				     struct iwl_txq *txq, int idx)
{
860 861 862 863 864 865
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (trans->cfg->use_tfh)
		idx = iwl_pcie_get_cmd_index(txq, idx);

	return txq->tfds + trans_pcie->tfd_size * idx;
866 867
}

868 869
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
870 871
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

872
	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
873 874 875 876 877 878 879 880 881
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_enable_hw_int_msk_msix(trans,
					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
	}
882 883 884 885 886 887 888 889 890 891

	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
		/*
		 * On 9000-series devices this bit isn't enabled by default, so
		 * when we power down the device we need set the bit to allow it
		 * to wake up the PCI-E bus for RF-kill interrupts.
		 */
		iwl_set_bit(trans, CSR_GP_CNTRL,
			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
	}
892 893
}

894 895
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);

896
static inline void iwl_wake_queue(struct iwl_trans *trans,
897
				  struct iwl_txq *txq)
898
{
899 900
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

901 902 903
	if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
904
	}
905 906 907
}

static inline void iwl_stop_queue(struct iwl_trans *trans,
908
				  struct iwl_txq *txq)
909
{
910
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
911

912 913 914
	if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
		iwl_op_mode_queue_full(trans->op_mode, txq->id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
915 916
	} else
		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
917
				    txq->id);
918 919
}

920
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
921
{
922 923 924 925 926 927 928
	int index = iwl_pcie_get_cmd_index(q, i);
	int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
	int w = iwl_pcie_get_cmd_index(q, q->write_ptr);

	return w >= r ?
		(index >= r && index < w) :
		!(index < r && index >= w);
929 930
}

931 932
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
933 934 935 936 937 938
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->mutex);

	if (trans_pcie->debug_rfkill)
		return true;
939

940 941 942 943
	return !(iwl_read32(trans, CSR_GP_CNTRL) &
		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
						  u32 reg, u32 mask, u32 value)
{
	u32 v;

#ifdef CONFIG_IWLWIFI_DEBUG
	WARN_ON_ONCE(value & ~mask);
#endif

	v = iwl_read32(trans, reg);
	v &= ~mask;
	v |= value;
	iwl_write32(trans, reg, v);
}

static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
					      u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
}

static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
					    u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}

971 972
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);

973 974 975 976 977 978 979 980 981
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
#else
static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
{
	return 0;
}
#endif

982 983 984
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);

985 986
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);

987 988
void iwl_pcie_rx_allocator_work(struct work_struct *data);

989 990 991 992
/* common functions that are used by gen2 transport */
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
993
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
994 995
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
				       bool was_in_rfkill);
996
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
997
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
998
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
999
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1000
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1001
		      int slots_num, bool cmd_queue);
1002
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1003
		       struct iwl_txq *txq, int slots_num,  bool cmd_queue);
1004 1005 1006
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
			   struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1007
void iwl_pcie_apply_destination(struct iwl_trans *trans);
1008 1009
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
			    struct sk_buff *skb);
S
Sara Sharon 已提交
1010 1011 1012
#ifdef CONFIG_INET
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
1013

1014 1015 1016
/* common functions that are used by gen3 transport */
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);

1017 1018 1019 1020
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
				 const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1021 1022
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
				 struct iwl_tx_queue_cfg_cmd *cmd,
1023
				 int cmd_id, int size,
1024 1025
				 unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1026 1027
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
			   struct iwl_device_cmd *dev_cmd, int txq_id);
1028 1029
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
				  struct iwl_host_cmd *cmd);
1030 1031 1032
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
				     bool low_power);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
1033 1034 1035
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1036
#endif /* __iwl_trans_int_pcie_h__ */