internal.h 21.0 KB
Newer Older
1 2
/******************************************************************************
 *
3 4
 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5
 * Copyright(c) 2016 Intel Deutschland GmbH
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
27
 *  Intel Linux Wireless <linuxwifi@intel.com>
28 29 30 31 32 33
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__

E
Emmanuel Grumbach 已提交
34 35 36
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
37
#include <linux/wait.h>
38
#include <linux/pci.h>
39
#include <linux/timer.h>
E
Emmanuel Grumbach 已提交
40

41
#include "iwl-fh.h"
E
Emmanuel Grumbach 已提交
42 43 44 45
#include "iwl-csr.h"
#include "iwl-trans.h"
#include "iwl-debug.h"
#include "iwl-io.h"
46
#include "iwl-op-mode.h"
E
Emmanuel Grumbach 已提交
47

J
Johannes Berg 已提交
48 49 50 51 52 53
/* We need 2 entries for the TX command and header, and another one might
 * be needed for potential data in the SKB's head. The remaining ones can
 * be used for frags.
 */
#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)

54 55 56 57 58 59
/*
 * RX related structures and functions
 */
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
60
#define RX_PENDING_WATERMARK 16
61

E
Emmanuel Grumbach 已提交
62
struct iwl_host_cmd;
63

64 65 66
/*This file includes the declaration that are internal to the
 * trans_pcie layer */

67 68 69 70 71 72
/**
 * struct iwl_rx_mem_buffer
 * @page_dma: bus address of rxb page
 * @page: driver's pointer to the rxb page
 * @vid: index of this rxb in the global table
 */
73 74 75
struct iwl_rx_mem_buffer {
	dma_addr_t page_dma;
	struct page *page;
76
	u16 vid;
77 78 79
	struct list_head list;
};

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/**
 * struct isr_statistics - interrupt statistics
 *
 */
struct isr_statistics {
	u32 hw;
	u32 sw;
	u32 err_code;
	u32 sch;
	u32 alive;
	u32 rfkill;
	u32 ctkill;
	u32 wakeup;
	u32 rx;
	u32 tx;
	u32 unhandled;
};

98
/**
99
 * struct iwl_rxq - Rx queue
100 101 102
 * @id: queue index
 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
 *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
103
 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
104 105
 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
106 107 108
 * @read: Shared index to newest available Rx buffer
 * @write: Shared index to oldest written Rx packet
 * @free_count: Number of pre-allocated buffers in rx_free
109
 * @used_count: Number of RBDs handled to allocator to use for allocation
110
 * @write_actual:
111 112
 * @rx_free: list of RBDs with allocated RB ready for use
 * @rx_used: list of RBDs with no RB attached
113 114 115 116
 * @need_update: flag to indicate we need to update read/write index
 * @rb_stts: driver's pointer to receive buffer status
 * @rb_stts_dma: bus address of receive buffer status
 * @lock:
117
 * @queue: actual rx queue. Not used for multi-rx queue.
118 119 120
 *
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 */
121
struct iwl_rxq {
122 123
	int id;
	void *bd;
124
	dma_addr_t bd_dma;
125 126
	__le32 *used_bd;
	dma_addr_t used_bd_dma;
127 128 129
	u32 read;
	u32 write;
	u32 free_count;
130
	u32 used_count;
131
	u32 write_actual;
132
	u32 queue_size;
133 134
	struct list_head rx_free;
	struct list_head rx_used;
135
	bool need_update;
136 137 138
	struct iwl_rb_status *rb_stts;
	dma_addr_t rb_stts_dma;
	spinlock_t lock;
139
	struct napi_struct napi;
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};

/**
 * struct iwl_rb_allocator - Rx allocator
 * @req_pending: number of requests the allcator had not processed yet
 * @req_ready: number of requests honored and ready for claiming
 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
 *	the queue. This is a list of &struct iwl_rx_mem_buffer
 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
 *	of &struct iwl_rx_mem_buffer
 * @lock: protects the rbd_allocated and rbd_empty lists
 * @alloc_wq: work queue for background calls
 * @rx_alloc: work struct for background calls
 */
struct iwl_rb_allocator {
	atomic_t req_pending;
	atomic_t req_ready;
	struct list_head rbd_allocated;
	struct list_head rbd_empty;
	spinlock_t lock;
	struct workqueue_struct *alloc_wq;
	struct work_struct rx_alloc;
163 164
};

E
Emmanuel Grumbach 已提交
165 166 167 168 169 170
struct iwl_dma_ptr {
	dma_addr_t dma;
	void *addr;
	size_t size;
};

171 172 173 174
/**
 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
 * @index -- current index
 */
175
static inline int iwl_queue_inc_wrap(int index)
176
{
177
	return ++index & (TFD_QUEUE_SIZE_MAX - 1);
178 179 180 181 182 183
}

/**
 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
 * @index -- current index
 */
184
static inline int iwl_queue_dec_wrap(int index)
185
{
186
	return --index & (TFD_QUEUE_SIZE_MAX - 1);
187 188
}

189 190 191
struct iwl_cmd_meta {
	/* only for SYNC commands, iff the reply skb is wanted */
	struct iwl_host_cmd *source;
192
	u32 flags;
193 194 195 196 197 198 199
};

/*
 * Generic queue structure
 *
 * Contains common data for Rx and Tx queues.
 *
200 201
 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
202 203 204 205
 * there might be HW changes in the future). For the normal TX
 * queues, n_window, which is the size of the software queue data
 * is also 256; however, for the command queue, n_window is only
 * 32 since we don't need so many commands pending. Since the HW
206
 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
207
 * the software buffers (in the variables @meta, @txb in struct
208 209
 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
 * the same struct) have 256.
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
 * This means that we end up with the following:
 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 *  SW entries:           | 0      | ... | 31          |
 * where N is a number between 0 and 7. This means that the SW
 * data is a window overlayed over the HW queue.
 */
struct iwl_queue {
	int write_ptr;       /* 1-st empty entry (index) host_w*/
	int read_ptr;         /* last used entry (index) host_r*/
	/* use for monitoring and recovering the stuck queue */
	dma_addr_t dma_addr;   /* physical addr for BD's */
	int n_window;	       /* safe queue window */
	u32 id;
	int low_mark;	       /* low watermark, resume queue if free
				* space more than this */
	int high_mark;         /* high watermark, stop queue if free
				* space less than this */
};

229 230 231
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32

232 233 234
/*
 * The FH will write back to the first TB only, so we need
 * to copy some data into the buffer regardless of whether
235 236 237 238 239
 * it should be mapped or not. This indicates how big the
 * first TB must be to include the scratch buffer. Since
 * the scratch is 4 bytes at offset 12, it's 16 now. If we
 * make it bigger then allocations will be bigger and copy
 * slower, so that's probably not useful.
240
 */
241
#define IWL_HCMD_SCRATCHBUF_SIZE	16
242

243
struct iwl_pcie_txq_entry {
244 245
	struct iwl_device_cmd *cmd;
	struct sk_buff *skb;
246 247
	/* buffer to free after command completes */
	const void *free_buf;
248 249 250
	struct iwl_cmd_meta meta;
};

251 252 253 254 255 256
struct iwl_pcie_txq_scratch_buf {
	struct iwl_cmd_header hdr;
	u8 buf[8];
	__le32 scratch;
};

257
/**
258
 * struct iwl_txq - Tx Queue for DMA
259
 * @q: generic Rx/Tx queue descriptor
260
 * @tfds: transmit frame descriptors (DMA memory)
261 262 263 264
 * @scratchbufs: start of command headers, including scratch buffers, for
 *	the writeback -- this is DMA memory and an array holding one buffer
 *	for each command on the queue
 * @scratchbufs_dma: DMA address for the scratchbufs start
265 266 267 268
 * @entries: transmit entries (driver state)
 * @lock: queue lock
 * @stuck_timer: timer that fires if queue gets stuck
 * @trans_pcie: pointer back to transport (for timer)
269
 * @need_update: indicates need to update read/write index
270
 * @active: stores if queue is active
271
 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
272
 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
273 274
 * @frozen: tx stuck queue timer is frozen
 * @frozen_expiry_remainder: remember how long until the timer fires
275 276 277 278
 *
 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 * descriptors) and required locking structures.
 */
279
struct iwl_txq {
280 281
	struct iwl_queue q;
	struct iwl_tfd *tfds;
282 283
	struct iwl_pcie_txq_scratch_buf *scratchbufs;
	dma_addr_t scratchbufs_dma;
284
	struct iwl_pcie_txq_entry *entries;
285
	spinlock_t lock;
286
	unsigned long frozen_expiry_remainder;
287 288
	struct timer_list stuck_timer;
	struct iwl_trans_pcie *trans_pcie;
289
	bool need_update;
290
	bool frozen;
291
	u8 active;
292
	bool ampdu;
293
	bool block;
294
	unsigned long wd_timeout;
295
	struct sk_buff_head overflow_q;
296 297
};

298 299 300 301 302 303 304
static inline dma_addr_t
iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
{
	return txq->scratchbufs_dma +
	       sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
}

305 306 307 308 309
struct iwl_tso_hdr_page {
	struct page *page;
	u8 *pos;
};

310 311
/**
 * struct iwl_trans_pcie - PCIe transport specific data
312
 * @rxq: all the RX queue data
313
 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
314
 * @global_table: table mapping received VID from hw to rxb
315
 * @rba: allocator for RX replenishing
316
 * @drv - pointer to iwl_drv
317
 * @trans: pointer to the generic transport area
318 319
 * @scd_base_addr: scheduler sram base address in SRAM
 * @scd_bc_tbls: pointer to the byte count table of the scheduler
320
 * @kw: keep warm address
321 322
 * @pci_dev: basic pci-network driver stuff
 * @hw_base: pci hardware address support
323 324
 * @ucode_write_complete: indicates that the ucode has been copied.
 * @ucode_write_waitq: wait queue for uCode load
325
 * @cmd_queue - command queue number
326
 * @rx_buf_size: Rx buffer size
327
 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
328
 * @scd_set_active: should the transport configure the SCD for HCMD queue
329
 * @wide_cmd_header: true when ucode supports wide command header format
330 331
 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
 *	frame.
332
 * @rx_page_order: page order for receive buffer size
333
 * @reg_lock: protect hw register access
334
 * @mutex: to protect stop_device / start_fw / start_hw
335
 * @cmd_in_flight: true when we have a host command in flight
336 337 338
 * @fw_mon_phys: physical address of the buffer for the firmware monitor
 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
 * @fw_mon_size: size of the buffer for the firmware monitor
339 340 341 342 343 344 345 346
 * @msix_entries: array of MSI-X entries
 * @msix_enabled: true if managed to enable MSI-X
 * @allocated_vector: the number of interrupt vector allocated by the OS
 * @default_irq_num: default irq for non rx interrupt
 * @fh_init_mask: initial unmasked fh causes
 * @hw_init_mask: initial unmasked hw causes
 * @fh_mask: current unmasked fh causes
 * @hw_mask: current unmasked hw causes
347 348
 */
struct iwl_trans_pcie {
349
	struct iwl_rxq *rxq;
350
	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
351
	struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
352
	struct iwl_rb_allocator rba;
353
	struct iwl_trans *trans;
354
	struct iwl_drv *drv;
355

356 357
	struct net_device napi_dev;

358 359
	struct __percpu iwl_tso_hdr_page *tso_hdr_page;

360 361 362 363 364
	/* INT ICT Table */
	__le32 *ict_tbl;
	dma_addr_t ict_tbl_dma;
	int ict_index;
	bool use_ict;
365
	bool is_down;
366
	struct isr_statistics isr_stats;
367

J
Johannes Berg 已提交
368
	spinlock_t irq_lock;
369
	struct mutex mutex;
370
	u32 inta_mask;
371 372
	u32 scd_base_addr;
	struct iwl_dma_ptr scd_bc_tbls;
373
	struct iwl_dma_ptr kw;
374

375
	struct iwl_txq *txq;
376
	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
377
	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
378 379 380 381

	/* PCI bus related data */
	struct pci_dev *pci_dev;
	void __iomem *hw_base;
382 383 384

	bool ucode_write_complete;
	wait_queue_head_t ucode_write_waitq;
385
	wait_queue_head_t wait_command_queue;
386
	wait_queue_head_t d0i3_waitq;
387

388
	u8 cmd_queue;
389
	u8 cmd_fifo;
390
	unsigned int cmd_q_wdg_timeout;
391 392
	u8 n_no_reclaim_cmds;
	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
393

394
	enum iwl_amsdu_size rx_buf_size;
395
	bool bc_table_dword;
396
	bool scd_set_active;
397
	bool wide_cmd_header;
398
	bool sw_csum_tx;
399
	u32 rx_page_order;
400

401 402
	/*protect hw register */
	spinlock_t reg_lock;
403
	bool cmd_hold_nic_awake;
404 405 406 407 408
	bool ref_cmd_in_flight;

	/* protect ref counter */
	spinlock_t ref_lock;
	u32 ref_count;
409 410 411 412

	dma_addr_t fw_mon_phys;
	struct page *fw_mon_page;
	u32 fw_mon_size;
413 414 415 416 417 418 419 420 421

	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
	bool msix_enabled;
	u32 allocated_vector;
	u32 default_irq_num;
	u32 fh_init_mask;
	u32 hw_init_mask;
	u32 fh_mask;
	u32 hw_mask;
422 423
};

424 425 426 427 428
static inline struct iwl_trans_pcie *
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
{
	return (void *)trans->trans_specific;
}
429

430 431 432 433 434 435 436
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
	return container_of((void *)trans_pcie, struct iwl_trans,
			    trans_specific);
}

437 438 439 440
/*
 * Convention: trans API functions: iwl_trans_pcie_XXX
 *	Other functions: iwl_pcie_XXX
 */
441 442 443 444 445
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg);
void iwl_trans_pcie_free(struct iwl_trans *trans);

446 447 448
/*****************************************************
* RX
******************************************************/
449
int iwl_pcie_rx_init(struct iwl_trans *trans);
450
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
451
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
452 453
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
454 455
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
456

457
/*****************************************************
458
* ICT - interrupt handling
459
******************************************************/
460
irqreturn_t iwl_pcie_isr(int irq, void *data);
461 462 463 464
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
void iwl_pcie_disable_ict(struct iwl_trans *trans);
465

466 467 468
/*****************************************************
* TX / HCMD
******************************************************/
469 470 471 472
int iwl_pcie_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
473
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
474 475
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout);
476 477
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
				bool configure_scd);
478 479
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
		      struct iwl_device_cmd *dev_cmd, int txq_id);
480
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
481
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
482
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
483
			    struct iwl_rx_cmd_buffer *rxb);
484 485
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs);
486 487
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);

488 489 490
void iwl_trans_pcie_ref(struct iwl_trans *trans);
void iwl_trans_pcie_unref(struct iwl_trans *trans);

491 492 493 494 495 496 497
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

498 499 500
/*****************************************************
* Error handling
******************************************************/
501
void iwl_pcie_dump_csr(struct iwl_trans *trans);
502

503 504 505
/*****************************************************
* Helpers
******************************************************/
506 507
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
508
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
509

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	clear_bit(STATUS_INT_ENABLED, &trans->status);
	if (!trans_pcie->msix_enabled) {
		/* disable interrupts from uCode/NIC to host */
		iwl_write32(trans, CSR_INT_MASK, 0x00000000);

		/* acknowledge/clear/reset any interrupts still pending
		 * from uCode or flow handler (Rx/Tx DMA) */
		iwl_write32(trans, CSR_INT, 0xffffffff);
		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
	} else {
		/* disable all the interrupt we might use */
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
	}
526 527 528 529 530
	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}

static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
D
Don Fry 已提交
531
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
532 533

	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
534
	set_bit(STATUS_INT_ENABLED, &trans->status);
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INI_SET_MASK;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		/*
		 * fh/hw_mask keeps all the unmasked causes.
		 * Unlike msi, in msix cause is enabled when it is unset.
		 */
		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    ~trans_pcie->fh_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    ~trans_pcie->hw_mask);
	}
}

static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
	trans_pcie->hw_mask = msk;
}

static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
	trans_pcie->fh_mask = msk;
566 567
}

568 569 570 571 572
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
573 574 575 576 577 578 579 580 581
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
		iwl_enable_fh_int_msk_msix(trans,
					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
	}
582 583
}

584 585
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
586 587
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

588
	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
589 590 591 592 593 594 595 596 597
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_enable_hw_int_msk_msix(trans,
					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
	}
598 599
}

600
static inline void iwl_wake_queue(struct iwl_trans *trans,
601
				  struct iwl_txq *txq)
602
{
603 604 605 606 607
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
608
	}
609 610 611
}

static inline void iwl_stop_queue(struct iwl_trans *trans,
612
				  struct iwl_txq *txq)
613
{
614
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
615

616 617 618 619 620 621
	if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
		iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
	} else
		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
				    txq->q.id);
622 623
}

624
static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
625 626 627 628 629 630 631 632 633 634 635
{
	return q->write_ptr >= q->read_ptr ?
		(i >= q->read_ptr && i < q->write_ptr) :
		!(i < q->read_ptr && i >= q->write_ptr);
}

static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
{
	return index & (q->n_window - 1);
}

636 637 638 639 640 641
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
	return !(iwl_read32(trans, CSR_GP_CNTRL) &
		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
						  u32 reg, u32 mask, u32 value)
{
	u32 v;

#ifdef CONFIG_IWLWIFI_DEBUG
	WARN_ON_ONCE(value & ~mask);
#endif

	v = iwl_read32(trans, reg);
	v &= ~mask;
	v |= value;
	iwl_write32(trans, reg, v);
}

static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
					      u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
}

static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
					    u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}

669 670
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);

671 672 673 674 675 676 677 678 679
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
#else
static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
{
	return 0;
}
#endif

680 681 682
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);

683
#endif /* __iwl_trans_int_pcie_h__ */