internal.h 32.0 KB
Newer Older
1
/******************************************************************************
2 3 4 5 6
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
7
 *
8 9
 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11
 * Copyright(c) 2018 - 2019 Intel Corporation
12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * The full GNU General Public License is included in this distribution in the
23
 * file called COPYING.
24 25
 *
 * Contact Information:
26
 *  Intel Linux Wireless <linuxwifi@intel.com>
27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
29 30 31 32 33
 * BSD LICENSE
 *
 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
 * Copyright(c) 2018 - 2019 Intel Corporation
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
63 64 65 66
 *****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__

E
Emmanuel Grumbach 已提交
67 68 69
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
70
#include <linux/wait.h>
71
#include <linux/pci.h>
72
#include <linux/timer.h>
73
#include <linux/cpu.h>
E
Emmanuel Grumbach 已提交
74

75
#include "iwl-fh.h"
E
Emmanuel Grumbach 已提交
76 77 78 79
#include "iwl-csr.h"
#include "iwl-trans.h"
#include "iwl-debug.h"
#include "iwl-io.h"
80
#include "iwl-op-mode.h"
81
#include "iwl-drv.h"
E
Emmanuel Grumbach 已提交
82

83 84 85 86 87 88
/*
 * RX related structures and functions
 */
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
89
#define RX_PENDING_WATERMARK 16
90
#define FIRST_RX_QUEUE 512
91

E
Emmanuel Grumbach 已提交
92
struct iwl_host_cmd;
93

94 95 96
/*This file includes the declaration that are internal to the
 * trans_pcie layer */

97 98 99 100
/**
 * struct iwl_rx_mem_buffer
 * @page_dma: bus address of rxb page
 * @page: driver's pointer to the rxb page
S
Sara Sharon 已提交
101
 * @invalid: rxb is in driver ownership - not owned by HW
102
 * @vid: index of this rxb in the global table
103 104
 * @offset: indicates which offset of the page (in bytes)
 *	this buffer uses (if multiple RBs fit into one page)
105
 */
106 107 108
struct iwl_rx_mem_buffer {
	dma_addr_t page_dma;
	struct page *page;
109
	u16 vid;
S
Sara Sharon 已提交
110
	bool invalid;
111
	struct list_head list;
112
	u32 offset;
113 114
};

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
/**
 * struct isr_statistics - interrupt statistics
 *
 */
struct isr_statistics {
	u32 hw;
	u32 sw;
	u32 err_code;
	u32 sch;
	u32 alive;
	u32 rfkill;
	u32 ctkill;
	u32 wakeup;
	u32 rx;
	u32 tx;
	u32 unhandled;
};

133 134 135 136 137 138 139 140
/**
 * struct iwl_rx_transfer_desc - transfer descriptor
 * @addr: ptr to free buffer start address
 * @rbid: unique tag of the buffer
 * @reserved: reserved
 */
struct iwl_rx_transfer_desc {
	__le16 rbid;
141 142
	__le16 reserved[3];
	__le64 addr;
143 144
} __packed;

145
#define IWL_RX_CD_FLAGS_FRAGMENTED	BIT(0)
146 147 148 149 150

/**
 * struct iwl_rx_completion_desc - completion descriptor
 * @reserved1: reserved
 * @rbid: unique tag of the received buffer
151
 * @flags: flags (0: fragmented, all others: reserved)
152 153 154
 * @reserved2: reserved
 */
struct iwl_rx_completion_desc {
155
	__le32 reserved1;
156
	__le16 rbid;
157 158
	u8 flags;
	u8 reserved2[25];
159 160
} __packed;

161
/**
162
 * struct iwl_rxq - Rx queue
163 164 165
 * @id: queue index
 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
 *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
166
 *	In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
167
 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
168 169
 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
170 171 172 173
 * @tr_tail: driver's pointer to the transmission ring tail buffer
 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
 * @cr_tail: driver's pointer to the completion ring tail buffer
 * @cr_tail_dma: physical address of the buffer for the completion ring tail
174 175 176
 * @read: Shared index to newest available Rx buffer
 * @write: Shared index to oldest written Rx packet
 * @free_count: Number of pre-allocated buffers in rx_free
177
 * @used_count: Number of RBDs handled to allocator to use for allocation
178
 * @write_actual:
179 180
 * @rx_free: list of RBDs with allocated RB ready for use
 * @rx_used: list of RBDs with no RB attached
181 182 183 184
 * @need_update: flag to indicate we need to update read/write index
 * @rb_stts: driver's pointer to receive buffer status
 * @rb_stts_dma: bus address of receive buffer status
 * @lock:
185
 * @queue: actual rx queue. Not used for multi-rx queue.
186 187
 * @next_rb_is_fragment: indicates that the previous RB that we handled set
 *	the fragmented flag, so the next one is still another fragment
188 189 190
 *
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 */
191
struct iwl_rxq {
192 193
	int id;
	void *bd;
194
	dma_addr_t bd_dma;
195 196 197 198 199
	union {
		void *used_bd;
		__le32 *bd_32;
		struct iwl_rx_completion_desc *cd;
	};
200
	dma_addr_t used_bd_dma;
201 202 203 204
	__le16 *tr_tail;
	dma_addr_t tr_tail_dma;
	__le16 *cr_tail;
	dma_addr_t cr_tail_dma;
205 206 207
	u32 read;
	u32 write;
	u32 free_count;
208
	u32 used_count;
209
	u32 write_actual;
210
	u32 queue_size;
211 212
	struct list_head rx_free;
	struct list_head rx_used;
213
	bool need_update, next_rb_is_fragment;
214
	void *rb_stts;
215 216
	dma_addr_t rb_stts_dma;
	spinlock_t lock;
217
	struct napi_struct napi;
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};

/**
 * struct iwl_rb_allocator - Rx allocator
 * @req_pending: number of requests the allcator had not processed yet
 * @req_ready: number of requests honored and ready for claiming
 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
 *	the queue. This is a list of &struct iwl_rx_mem_buffer
 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
 *	of &struct iwl_rx_mem_buffer
 * @lock: protects the rbd_allocated and rbd_empty lists
 * @alloc_wq: work queue for background calls
 * @rx_alloc: work struct for background calls
 */
struct iwl_rb_allocator {
	atomic_t req_pending;
	atomic_t req_ready;
	struct list_head rbd_allocated;
	struct list_head rbd_empty;
	spinlock_t lock;
	struct workqueue_struct *alloc_wq;
	struct work_struct rx_alloc;
241 242
};

243 244 245 246
/**
 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
 * @index -- current index
 */
247
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
248
{
249
	return ++index &
250
		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
251 252
}

253 254 255 256 257 258 259
/**
 * iwl_get_closed_rb_stts - get closed rb stts from different structs
 * @rxq - the rxq to get the rb stts from
 */
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
					    struct iwl_rxq *rxq)
{
260
	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
261 262 263 264 265 266 267 268 269 270
		__le16 *rb_stts = rxq->rb_stts;

		return READ_ONCE(*rb_stts);
	} else {
		struct iwl_rb_status *rb_stts = rxq->rb_stts;

		return READ_ONCE(rb_stts->closed_rb_num);
	}
}

271 272 273 274
/**
 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
 * @index -- current index
 */
275
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
276
{
277
	return --index &
278
		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
279 280
}

281
static inline dma_addr_t
282
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
283
{
284 285
	return txq->first_tb_dma +
	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
286 287
}

288 289 290 291 292
struct iwl_tso_hdr_page {
	struct page *page;
	u8 *pos;
};

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
#ifdef CONFIG_IWLWIFI_DEBUGFS
/**
 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
 * debugfs file
 *
 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
 *	set the file can no longer be used.
 */
enum iwl_fw_mon_dbgfs_state {
	IWL_FW_MON_DBGFS_STATE_CLOSED,
	IWL_FW_MON_DBGFS_STATE_OPEN,
	IWL_FW_MON_DBGFS_STATE_DISABLED,
};
#endif

310 311 312 313 314 315 316 317 318 319
/**
 * enum iwl_shared_irq_flags - level of sharing for irq
 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
 */
enum iwl_shared_irq_flags {
	IWL_SHARED_IRQ_NON_RX		= BIT(0),
	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
};

320 321 322 323 324 325 326 327 328 329 330 331
/**
 * enum iwl_image_response_code - image response values
 * @IWL_IMAGE_RESP_DEF: the default value of the register
 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
 * @IWL_IMAGE_RESP_FAIL: iml reading failed
 */
enum iwl_image_response_code {
	IWL_IMAGE_RESP_DEF		= 0,
	IWL_IMAGE_RESP_SUCCESS		= 1,
	IWL_IMAGE_RESP_FAIL		= 2,
};

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
/**
 * struct cont_rec: continuous recording data structure
 * @prev_wr_ptr: the last address that was read in monitor_data
 *	debugfs file
 * @prev_wrap_cnt: the wrap count that was used during the last read in
 *	monitor_data debugfs file
 * @state: the state of monitor_data debugfs file as described
 *	in &iwl_fw_mon_dbgfs_state enum
 * @mutex: locked while reading from monitor_data debugfs file
 */
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct cont_rec {
	u32 prev_wr_ptr;
	u32 prev_wrap_cnt;
	u8  state;
	/* Used to sync monitor_data debugfs file with driver unload flow */
	struct mutex mutex;
};
#endif

352 353
/**
 * struct iwl_trans_pcie - PCIe transport specific data
354
 * @rxq: all the RX queue data
355
 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
356
 * @global_table: table mapping received VID from hw to rxb
357
 * @rba: allocator for RX replenishing
358
 * @ctxt_info: context information for FW self init
359 360 361 362 363 364
 * @ctxt_info_gen3: context information for gen3 devices
 * @prph_info: prph info for self init
 * @prph_scratch: prph scratch for self init
 * @ctxt_info_dma_addr: dma addr of context information
 * @prph_info_dma_addr: dma addr of prph info
 * @prph_scratch_dma_addr: dma addr of prph scratch
365 366 367 368 369
 * @ctxt_info_dma_addr: dma addr of context information
 * @init_dram: DRAM data of firmware image (including paging).
 *	Context information addresses will be taken from here.
 *	This is driver's local copy for keeping track of size and
 *	count for allocating and freeing the memory.
370
 * @trans: pointer to the generic transport area
371 372
 * @scd_base_addr: scheduler sram base address in SRAM
 * @scd_bc_tbls: pointer to the byte count table of the scheduler
373
 * @kw: keep warm address
374 375
 * @pci_dev: basic pci-network driver stuff
 * @hw_base: pci hardware address support
376 377
 * @ucode_write_complete: indicates that the ucode has been copied.
 * @ucode_write_waitq: wait queue for uCode load
378
 * @cmd_queue - command queue number
379
 * @def_rx_queue - default rx queue number
380
 * @rx_buf_size: Rx buffer size
381
 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
382
 * @scd_set_active: should the transport configure the SCD for HCMD queue
383 384
 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
 *	frame.
385
 * @rx_page_order: page order for receive buffer size
386
 * @rx_buf_bytes: RX buffer (RB) size in bytes
387
 * @reg_lock: protect hw register access
388
 * @mutex: to protect stop_device / start_fw / start_hw
389
 * @cmd_in_flight: true when we have a host command in flight
390 391 392
#ifdef CONFIG_IWLWIFI_DEBUGFS
 * @fw_mon_data: fw continuous recording data
#endif
393 394
 * @msix_entries: array of MSI-X entries
 * @msix_enabled: true if managed to enable MSI-X
395 396 397 398
 * @shared_vec_mask: the type of causes the shared vector handles
 *	(see iwl_shared_irq_flags).
 * @alloc_vecs: the number of interrupt vectors allocated by the OS
 * @def_irq: default irq for non rx causes
399 400 401 402
 * @fh_init_mask: initial unmasked fh causes
 * @hw_init_mask: initial unmasked hw causes
 * @fh_mask: current unmasked fh causes
 * @hw_mask: current unmasked hw causes
403
 * @in_rescan: true if we have triggered a device rescan
404 405
 * @base_rb_stts: base virtual address of receive buffer status for all queues
 * @base_rb_stts_dma: base physical address of receive buffer status
406 407 408 409 410
 * @supported_dma_mask: DMA mask to validate the actual address against,
 *	will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
 * @alloc_page_lock: spinlock for the page allocator
 * @alloc_page: allocated page to still use parts of
 * @alloc_page_used: how much of the allocated page was already used (bytes)
411 412
 */
struct iwl_trans_pcie {
413
	struct iwl_rxq *rxq;
414 415
	struct iwl_rx_mem_buffer *rx_pool;
	struct iwl_rx_mem_buffer **global_table;
416
	struct iwl_rb_allocator rba;
417 418 419 420 421 422
	union {
		struct iwl_context_info *ctxt_info;
		struct iwl_context_info_gen3 *ctxt_info_gen3;
	};
	struct iwl_prph_info *prph_info;
	struct iwl_prph_scratch *prph_scratch;
423
	dma_addr_t ctxt_info_dma_addr;
424 425 426
	dma_addr_t prph_info_dma_addr;
	dma_addr_t prph_scratch_dma_addr;
	dma_addr_t iml_dma_addr;
427
	struct iwl_trans *trans;
428

429 430
	struct net_device napi_dev;

431 432
	struct __percpu iwl_tso_hdr_page *tso_hdr_page;

433 434 435 436 437
	/* INT ICT Table */
	__le32 *ict_tbl;
	dma_addr_t ict_tbl_dma;
	int ict_index;
	bool use_ict;
438
	bool is_down, opmode_down;
439
	s8 debug_rfkill;
440
	struct isr_statistics isr_stats;
441

J
Johannes Berg 已提交
442
	spinlock_t irq_lock;
443
	struct mutex mutex;
444
	u32 inta_mask;
445 446
	u32 scd_base_addr;
	struct iwl_dma_ptr scd_bc_tbls;
447
	struct iwl_dma_ptr kw;
448

449
	struct iwl_txq *txq_memory;
450 451 452 453

	/* PCI bus related data */
	struct pci_dev *pci_dev;
	void __iomem *hw_base;
454 455

	bool ucode_write_complete;
456
	bool sx_complete;
457
	wait_queue_head_t ucode_write_waitq;
458
	wait_queue_head_t wait_command_queue;
459
	wait_queue_head_t sx_waitq;
460

461 462
	u8 page_offs, dev_cmd_offs;

463
	u8 def_rx_queue;
464 465
	u8 n_no_reclaim_cmds;
	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
466
	u16 num_rx_bufs;
467

468
	enum iwl_amsdu_size rx_buf_size;
469
	bool bc_table_dword;
470
	bool scd_set_active;
471
	bool sw_csum_tx;
472
	bool pcie_dbg_dumped_once;
473
	u32 rx_page_order;
474
	u32 rx_buf_bytes;
475 476 477 478 479 480
	u32 supported_dma_mask;

	/* allocator lock for the two values below */
	spinlock_t alloc_page_lock;
	struct page *alloc_page;
	u32 alloc_page_used;
481

482 483
	/*protect hw register */
	spinlock_t reg_lock;
484
	bool cmd_hold_nic_awake;
485

486 487 488 489
#ifdef CONFIG_IWLWIFI_DEBUGFS
	struct cont_rec fw_mon_data;
#endif

490 491
	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
	bool msix_enabled;
492 493 494
	u8 shared_vec_mask;
	u32 alloc_vecs;
	u32 def_irq;
495 496 497 498
	u32 fh_init_mask;
	u32 hw_init_mask;
	u32 fh_mask;
	u32 hw_mask;
499
	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
500 501
	u16 tx_cmd_queue_size;
	bool in_rescan;
502 503 504

	void *base_rb_stts;
	dma_addr_t base_rb_stts_dma;
505 506
};

507 508 509 510 511
static inline struct iwl_trans_pcie *
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
{
	return (void *)trans->trans_specific;
}
512

513 514 515 516 517 518 519 520 521 522 523 524 525 526
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
				      struct msix_entry *entry)
{
	/*
	 * Before sending the interrupt the HW disables it to prevent
	 * a nested interrupt. This is done by writing 1 to the corresponding
	 * bit in the mask register. After handling the interrupt, it should be
	 * re-enabled by clearing this bit. This register is defined as
	 * write 1 clear (W1C) register, meaning that it's being clear
	 * by writing 1 to the bit.
	 */
	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}

527 528 529 530 531 532 533
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
	return container_of((void *)trans_pcie, struct iwl_trans,
			    trans_specific);
}

534 535 536 537
/*
 * Convention: trans API functions: iwl_trans_pcie_XXX
 *	Other functions: iwl_pcie_XXX
 */
538 539 540 541
struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev *pdev,
		      const struct pci_device_id *ent,
		      const struct iwl_cfg_trans_params *cfg_trans);
542 543
void iwl_trans_pcie_free(struct iwl_trans *trans);

544 545 546
/*****************************************************
* RX
******************************************************/
547
int iwl_pcie_rx_init(struct iwl_trans *trans);
548
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
549
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
550
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
551 552
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
553 554
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
555 556 557 558 559
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
			    struct iwl_rxq *rxq);
560

561
/*****************************************************
562
* ICT - interrupt handling
563
******************************************************/
564
irqreturn_t iwl_pcie_isr(int irq, void *data);
565 566 567 568
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
void iwl_pcie_disable_ict(struct iwl_trans *trans);
569

570 571 572
/*****************************************************
* TX / HCMD
******************************************************/
573 574 575 576 577 578 579 580 581 582
/*
 * We need this inline in case dma_addr_t is only 32-bits - since the
 * hardware is always 64-bit, the issue can still occur in that case,
 * so use u64 for 'phys' here to force the addition in 64-bit.
 */
static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
{
	return upper_32_bits(phys) != upper_32_bits(phys + len);
}

583
int iwl_pcie_tx_init(struct iwl_trans *trans);
584 585
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
			  int queue_size);
586 587 588
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
589
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
590 591
			       const struct iwl_trans_txq_scd_cfg *cfg,
			       unsigned int wdg_timeout);
592 593
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
				bool configure_scd);
594 595
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
					bool shared_mode);
596 597
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
				  struct iwl_txq *txq);
598
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
599
		      struct iwl_device_tx_cmd *dev_cmd, int txq_id);
600
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
601
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
602 603
void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
				  struct iwl_txq *txq);
604
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
605
			    struct iwl_rx_cmd_buffer *rxb);
606 607
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs);
608
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
609 610
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);

611
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
612
					  u8 idx)
613
{
614
	if (trans->trans_cfg->use_tfh) {
615 616
		struct iwl_tfh_tfd *tfd = _tfd;
		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
617 618

		return le16_to_cpu(tb->tb_len);
619 620 621
	} else {
		struct iwl_tfd *tfd = _tfd;
		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
622

623 624
		return le16_to_cpu(tb->hi_n_len) >> 4;
	}
625 626
}

627 628 629
/*****************************************************
* Error handling
******************************************************/
630
void iwl_pcie_dump_csr(struct iwl_trans *trans);
631

632 633 634
/*****************************************************
* Helpers
******************************************************/
635
static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
636
{
637
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
638

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	clear_bit(STATUS_INT_ENABLED, &trans->status);
	if (!trans_pcie->msix_enabled) {
		/* disable interrupts from uCode/NIC to host */
		iwl_write32(trans, CSR_INT_MASK, 0x00000000);

		/* acknowledge/clear/reset any interrupts still pending
		 * from uCode or flow handler (Rx/Tx DMA) */
		iwl_write32(trans, CSR_INT, 0xffffffff);
		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
	} else {
		/* disable all the interrupt we might use */
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
	}
655 656
	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677

#define IWL_NUM_OF_COMPLETION_RINGS	31
#define IWL_NUM_OF_TRANSFER_RINGS	527

static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
					    int start)
{
	int i = 0;

	while (start < fw->num_sec &&
	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
		start++;
		i++;
	}

	return i;
}

static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
678
	struct iwl_self_init_dram *dram = &trans->init_dram;
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
	int i;

	if (!dram->fw) {
		WARN_ON(dram->fw_cnt);
		return;
	}

	for (i = 0; i < dram->fw_cnt; i++)
		dma_free_coherent(trans->dev, dram->fw[i].size,
				  dram->fw[i].block, dram->fw[i].physical);

	kfree(dram->fw);
	dram->fw_cnt = 0;
	dram->fw = NULL;
}
694

695 696 697 698 699 700 701 702 703 704
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_disable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}

static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
705
{
D
Don Fry 已提交
706
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
707 708

	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
709
	set_bit(STATUS_INT_ENABLED, &trans->status);
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INI_SET_MASK;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		/*
		 * fh/hw_mask keeps all the unmasked causes.
		 * Unlike msi, in msix cause is enabled when it is unset.
		 */
		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    ~trans_pcie->fh_mask);
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    ~trans_pcie->hw_mask);
	}
}

727 728 729 730 731 732 733 734
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock(&trans_pcie->irq_lock);
	_iwl_enable_interrupts(trans);
	spin_unlock(&trans_pcie->irq_lock);
}
735 736 737 738 739 740 741 742 743 744 745 746 747 748
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
	trans_pcie->hw_mask = msk;
}

static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
	trans_pcie->fh_mask = msk;
749 750
}

751 752 753 754 755
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
756 757 758 759 760 761 762 763 764
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
			    trans_pcie->hw_init_mask);
		iwl_enable_fh_int_msk_msix(trans,
					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
	}
765 766
}

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");

	if (!trans_pcie->msix_enabled) {
		/*
		 * When we'll receive the ALIVE interrupt, the ISR will call
		 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
		 * interrupt (which is not really needed anymore) but also the
		 * RX interrupt which will allow us to receive the ALIVE
		 * notification (which is Rx) and continue the flow.
		 */
		trans_pcie->inta_mask =  CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_enable_hw_int_msk_msix(trans,
					   MSIX_HW_INT_CAUSES_REG_ALIVE);
		/*
		 * Leave all the FH causes enabled to get the ALIVE
		 * notification.
		 */
		iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
	}
}

794
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
795 796 797 798
{
	return index & (q->n_window - 1);
}

799
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
800 801
				     struct iwl_txq *txq, int idx)
{
802
	if (trans->trans_cfg->use_tfh)
803 804
		idx = iwl_pcie_get_cmd_index(txq, idx);

805
	return txq->tfds + trans->txqs.tfd.size * idx;
806 807
}

808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
static inline const char *queue_name(struct device *dev,
				     struct iwl_trans_pcie *trans_p, int i)
{
	if (trans_p->shared_vec_mask) {
		int vec = trans_p->shared_vec_mask &
			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;

		if (i == 0)
			return DRV_NAME ": shared IRQ";

		return devm_kasprintf(dev, GFP_KERNEL,
				      DRV_NAME ": queue %d", i + vec);
	}
	if (i == 0)
		return DRV_NAME ": default queue";

	if (i == trans_p->alloc_vecs - 1)
		return DRV_NAME ": exception";

	return devm_kasprintf(dev, GFP_KERNEL,
			      DRV_NAME  ": queue %d", i);
}

831 832
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
833 834
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

835
	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
836 837 838 839 840 841 842 843 844
	if (!trans_pcie->msix_enabled) {
		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
	} else {
		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
			    trans_pcie->fh_init_mask);
		iwl_enable_hw_int_msk_msix(trans,
					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
	}
845

846
	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
847 848 849 850 851 852 853 854
		/*
		 * On 9000-series devices this bit isn't enabled by default, so
		 * when we power down the device we need set the bit to allow it
		 * to wake up the PCI-E bus for RF-kill interrupts.
		 */
		iwl_set_bit(trans, CSR_GP_CNTRL,
			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
	}
855 856
}

857 858
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);

859
static inline void iwl_wake_queue(struct iwl_trans *trans,
860
				  struct iwl_txq *txq)
861
{
862
	if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
863 864
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
865
	}
866 867 868
}

static inline void iwl_stop_queue(struct iwl_trans *trans,
869
				  struct iwl_txq *txq)
870
{
871
	if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
872 873
		iwl_op_mode_queue_full(trans->op_mode, txq->id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
874 875
	} else
		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
876
				    txq->id);
877 878
}

879
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
880
{
881 882 883 884 885 886 887
	int index = iwl_pcie_get_cmd_index(q, i);
	int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
	int w = iwl_pcie_get_cmd_index(q, q->write_ptr);

	return w >= r ?
		(index >= r && index < w) :
		!(index < r && index >= w);
888 889
}

890 891
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
892 893 894 895
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	lockdep_assert_held(&trans_pcie->mutex);

896
	if (trans_pcie->debug_rfkill == 1)
897
		return true;
898

899 900 901 902
	return !(iwl_read32(trans, CSR_GP_CNTRL) &
		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
						  u32 reg, u32 mask, u32 value)
{
	u32 v;

#ifdef CONFIG_IWLWIFI_DEBUG
	WARN_ON_ONCE(value & ~mask);
#endif

	v = iwl_read32(trans, reg);
	v &= ~mask;
	v |= value;
	iwl_write32(trans, reg, v);
}

static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
					      u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
}

static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
					    u32 reg, u32 mask)
{
	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}

930 931
static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
{
932
	return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
933 934
}

935
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
936
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
937
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
938

939
#ifdef CONFIG_IWLWIFI_DEBUGFS
940
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
941
#else
942
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
943 944
#endif

945 946
void iwl_pcie_rx_allocator_work(struct work_struct *data);

947
/* common functions that are used by gen2 transport */
948
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
949 950 951
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
952
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
953 954
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
				       bool was_in_rfkill);
955
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
956
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
957
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
958
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
959
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
960
		      int slots_num, bool cmd_queue);
961
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
962
		       struct iwl_txq *txq, int slots_num,  bool cmd_queue);
963 964 965
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
			   struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
966
void iwl_pcie_apply_destination(struct iwl_trans *trans);
967 968
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
			    struct sk_buff *skb);
S
Sara Sharon 已提交
969
#ifdef CONFIG_INET
970 971
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
				      struct sk_buff *skb);
S
Sara Sharon 已提交
972
#endif
973

974 975 976
/* common functions that are used by gen3 transport */
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);

977 978 979 980
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
				 const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
981 982 983 984 985 986 987 988
void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
				   struct iwl_txq *txq);
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
				     struct iwl_txq **intxq, int size,
				     unsigned int timeout);
int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
				      struct iwl_txq *txq,
				      struct iwl_host_cmd *hcmd);
989
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
990
				 __le16 flags, u8 sta_id, u8 tid,
991
				 int cmd_id, int size,
992 993
				 unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
994
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
995
			   struct iwl_device_tx_cmd *dev_cmd, int txq_id);
996 997
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
				  struct iwl_host_cmd *cmd);
998 999
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
1000 1001 1002
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1003 1004
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
				  bool test, bool reset);
1005
#endif /* __iwl_trans_int_pcie_h__ */