iwl-trans.h 37.3 KB
Newer Older
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
27
 * in the file called COPYING.
28 29
 *
 * Contact Information:
30
 *  Intel Linux Wireless <linuxwifi@intel.com>
31 32 33 34
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
35
 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
36
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37
 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
67 68
#ifndef __iwl_trans_h__
#define __iwl_trans_h__
69

70
#include <linux/ieee80211.h>
71
#include <linux/mm.h> /* for page_address */
72
#include <linux/lockdep.h>
73
#include <linux/kernel.h>
E
Emmanuel Grumbach 已提交
74

75
#include "iwl-debug.h"
76
#include "iwl-config.h"
77
#include "fw/img.h"
78
#include "iwl-op-mode.h"
J
Johannes Berg 已提交
79 80
#include "fw/api/cmdhdr.h"
#include "fw/api/txq.h"
81

82 83 84
/**
 * DOC: Transport layer - what is it ?
 *
S
Sara Sharon 已提交
85
 * The transport layer is the layer that deals with the HW directly. It provides
86 87
 * an abstraction of the underlying HW to the upper layer. The transport layer
 * doesn't provide any policy, algorithm or anything of this kind, but only
S
Sara Sharon 已提交
88
 * mechanisms to make the HW do something. It is not completely stateless but
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
 * close to it.
 * We will have an implementation for each different supported bus.
 */

/**
 * DOC: Life cycle of the transport layer
 *
 * The transport layer has a very precise life cycle.
 *
 *	1) A helper function is called during the module initialization and
 *	   registers the bus driver's ops with the transport's alloc function.
 *	2) Bus's probe calls to the transport layer's allocation functions.
 *	   Of course this function is bus specific.
 *	3) This allocation functions will spawn the upper layer which will
 *	   register mac80211.
 *
 *	4) At some point (i.e. mac80211's start call), the op_mode will call
 *	   the following sequence:
 *	   start_hw
 *	   start_fw
 *
 *	5) Then when finished (or reset):
111
 *	   stop_device
112 113 114 115
 *
 *	6) Eventually, the free function will be called.
 */

116
#define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
117 118
#define FH_RSCSR_FRAME_INVALID		0x55550000
#define FH_RSCSR_FRAME_ALIGN		0x40
119
#define FH_RSCSR_RPA_EN			BIT(25)
120
#define FH_RSCSR_RADA_EN		BIT(26)
121 122
#define FH_RSCSR_RXQ_POS		16
#define FH_RSCSR_RXQ_MASK		0x3F0000
123 124 125 126 127 128 129 130 131

struct iwl_rx_packet {
	/*
	 * The first 4 bytes of the RX frame header contain both the RX frame
	 * size and some flags.
	 * Bit fields:
	 * 31:    flag flush RB request
	 * 30:    flag ignore TC (terminal counter) request
	 * 29:    flag fast IRQ request
132 133
	 * 28-27: Reserved
	 * 26:    RADA enabled
134
	 * 25:    Offload enabled
135 136 137 138 139
	 * 24:    RPF enabled
	 * 23:    RSS enabled
	 * 22:    Checksum enabled
	 * 21-16: RX queue
	 * 15-14: Reserved
140 141 142 143 144 145
	 * 13-00: RX frame size
	 */
	__le32 len_n_flags;
	struct iwl_cmd_header hdr;
	u8 data[];
} __packed;
146

147 148 149 150 151 152 153 154 155 156
static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
{
	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
}

static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
{
	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
}

157 158 159
/**
 * enum CMD_MODE - how to send the host commands ?
 *
160
 * @CMD_ASYNC: Return right away and don't wait for the response
E
Emmanuel Grumbach 已提交
161 162
 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
 *	the response. The caller needs to call iwl_free_resp when done.
163
 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
S
Sara Sharon 已提交
164
 *	command queue, but after other high priority commands. Valid only
165 166 167 168 169
 *	with CMD_ASYNC.
 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
 *	(i.e. mark it as non-idle).
170 171
 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
 *	called after this command completes. Valid only with CMD_ASYNC.
172 173
 */
enum CMD_MODE {
J
Johannes Berg 已提交
174 175
	CMD_ASYNC		= BIT(0),
	CMD_WANT_SKB		= BIT(1),
176
	CMD_SEND_IN_RFKILL	= BIT(2),
177 178 179 180
	CMD_HIGH_PRIO		= BIT(3),
	CMD_SEND_IN_IDLE	= BIT(4),
	CMD_MAKE_TRANS_IDLE	= BIT(5),
	CMD_WAKE_UP_TRANS	= BIT(6),
181
	CMD_WANT_ASYNC_CALLBACK	= BIT(7),
182 183 184 185 186 187 188 189 190 191 192 193
};

#define DEF_CMD_PAYLOAD_SIZE 320

/**
 * struct iwl_device_cmd
 *
 * For allocation of the command and tx queues, this establishes the overall
 * size of the largest command we send to uCode, except for commands that
 * aren't fully copied and use other TFD space.
 */
struct iwl_device_cmd {
194 195 196 197 198 199 200 201 202 203 204 205
	union {
		struct {
			struct iwl_cmd_header hdr;	/* uCode API */
			u8 payload[DEF_CMD_PAYLOAD_SIZE];
		};
		struct {
			struct iwl_cmd_header_wide hdr_wide;
			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
					sizeof(struct iwl_cmd_header_wide) +
					sizeof(struct iwl_cmd_header)];
		};
	};
206 207 208 209
} __packed;

#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))

210 211 212 213 214
/*
 * number of transfer buffers (fragments) per transmit frame descriptor;
 * this is just the driver's idea, the hardware supports 20
 */
#define IWL_MAX_CMD_TBS_PER_TFD	2
215

216
/**
J
Johannes Berg 已提交
217
 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
218
 *
219
 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
220
 *	ring. The transport layer doesn't map the command's buffer to DMA, but
221
 *	rather copies it to a previously allocated DMA buffer. This flag tells
222
 *	the transport layer not to copy the command, but to map the existing
223 224 225
 *	buffer (that is passed in) instead. This saves the memcpy and allows
 *	commands that are bigger than the fixed buffer to be submitted.
 *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
226 227 228
 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
 *	chunk internally and free it again after the command completes. This
 *	can (currently) be used only once per command.
229
 *	Note that a TFD entry after a DUP one cannot be a normal copied one.
230
 */
231 232
enum iwl_hcmd_dataflag {
	IWL_HCMD_DFL_NOCOPY	= BIT(0),
233
	IWL_HCMD_DFL_DUP	= BIT(1),
234 235 236 237
};

/**
 * struct iwl_host_cmd - Host command to the uCode
238
 *
239
 * @data: array of chunks that composes the data of the host command
240 241 242
 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
 * @_rx_page_order: (internally used to free response packet)
 * @_rx_page_addr: (internally used to free response packet)
243
 * @flags: can be CMD_*
244
 * @len: array of the lengths of the chunks in data
245
 * @dataflags: IWL_HCMD_DFL_*
246 247
 * @id: command id of the host command, for wide commands encoding the
 *	version and group as well
248 249
 */
struct iwl_host_cmd {
250
	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
251 252 253
	struct iwl_rx_packet *resp_pkt;
	unsigned long _rx_page_addr;
	u32 _rx_page_order;
254

255
	u32 flags;
256
	u32 id;
257 258
	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
259
};
260

261 262 263 264 265
static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
{
	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
}

266 267
struct iwl_rx_cmd_buffer {
	struct page *_page;
268 269
	int _offset;
	bool _page_stolen;
270
	u32 _rx_page_order;
271
	unsigned int truesize;
272 273 274 275
};

static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
{
276 277 278 279 280 281
	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
}

static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
{
	return r->_offset;
282 283 284 285
}

static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
{
286 287 288
	r->_page_stolen = true;
	get_page(r->_page);
	return r->_page;
289 290
}

291 292 293 294 295
static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
{
	__free_pages(r->_page, r->_rx_page_order);
}

296 297
#define MAX_NO_RECLAIM_CMDS	6

298 299
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))

300 301 302 303 304
/*
 * Maximum number of HW queues the transport layer
 * currently supports
 */
#define IWL_MAX_HW_QUEUES		32
S
Sara Sharon 已提交
305 306
#define IWL_MAX_TVQM_QUEUES		512

307
#define IWL_MAX_TID_COUNT	8
308
#define IWL_MGMT_TID		15
309
#define IWL_FRAME_LIMIT	64
310
#define IWL_MAX_RX_HW_QUEUES	16
311

312 313 314 315 316 317 318 319 320 321
/**
 * enum iwl_wowlan_status - WoWLAN image/device status
 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
 * @IWL_D3_STATUS_RESET: device was reset while suspended
 */
enum iwl_d3_status {
	IWL_D3_STATUS_ALIVE,
	IWL_D3_STATUS_RESET,
};

322 323 324 325 326 327
/**
 * enum iwl_trans_status: transport status flags
 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
 * @STATUS_DEVICE_ENABLED: APM is enabled
 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
 * @STATUS_INT_ENABLED: interrupts are enabled
328 329
 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
330
 * @STATUS_FW_ERROR: the fw is in error state
331 332 333
 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
 *	are sent
 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
334
 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
335 336 337 338 339 340
 */
enum iwl_trans_status {
	STATUS_SYNC_HCMD_ACTIVE,
	STATUS_DEVICE_ENABLED,
	STATUS_TPOWER_PMI,
	STATUS_INT_ENABLED,
341 342
	STATUS_RFKILL_HW,
	STATUS_RFKILL_OPMODE,
343
	STATUS_FW_ERROR,
344 345
	STATUS_TRANS_GOING_IDLE,
	STATUS_TRANS_IDLE,
346
	STATUS_TRANS_DEAD,
347 348
};

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
	switch (rb_size) {
	case IWL_AMSDU_4K:
		return get_order(4 * 1024);
	case IWL_AMSDU_8K:
		return get_order(8 * 1024);
	case IWL_AMSDU_12K:
		return get_order(12 * 1024);
	default:
		WARN_ON(1);
		return -1;
	}
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
struct iwl_hcmd_names {
	u8 cmd_id;
	const char *const cmd_name;
};

#define HCMD_NAME(x)	\
	{ .cmd_id = x, .cmd_name = #x }

struct iwl_hcmd_arr {
	const struct iwl_hcmd_names *arr;
	int size;
};

#define HCMD_ARR(x)	\
	{ .arr = x, .size = ARRAY_SIZE(x) }

381 382 383 384
/**
 * struct iwl_trans_config - transport configuration
 *
 * @op_mode: pointer to the upper layer.
385 386
 * @cmd_queue: the index of the command queue.
 *	Must be set before start_fw.
387
 * @cmd_fifo: the fifo for host commands
388
 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
389 390 391 392 393
 * @no_reclaim_cmds: Some devices erroneously don't set the
 *	SEQ_RX_FRAME bit on some notifications, this is the
 *	list of such notifications to filter. Max length is
 *	%MAX_NO_RECLAIM_CMDS.
 * @n_no_reclaim_cmds: # of commands in list
394
 * @rx_buf_size: RX buffer size needed for A-MSDUs
395
 *	if unset 4k will be the RX buffer size
396 397
 * @bc_table_dword: set to true if the BC table expects the byte count to be
 *	in DWORD (as opposed to bytes)
398
 * @scd_set_active: should the transport configure the SCD for HCMD queue
399
 * @sw_csum_tx: transport should compute the TCP checksum
400 401 402
 * @command_groups: array of command groups, each member is an array of the
 *	commands in the group; for debugging only
 * @command_groups_size: number of command groups, to avoid illegal access
403 404
 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
 *	space for at least two pointers
405 406 407
 */
struct iwl_trans_config {
	struct iwl_op_mode *op_mode;
408

409
	u8 cmd_queue;
410
	u8 cmd_fifo;
411
	unsigned int cmd_q_wdg_timeout;
412
	const u8 *no_reclaim_cmds;
413
	unsigned int n_no_reclaim_cmds;
414

415
	enum iwl_amsdu_size rx_buf_size;
416
	bool bc_table_dword;
417
	bool scd_set_active;
418
	bool sw_csum_tx;
419 420
	const struct iwl_hcmd_arr *command_groups;
	int command_groups_size;
421

422
	u8 cb_data_offs;
423 424
};

425 426 427 428 429
struct iwl_trans_dump_data {
	u32 len;
	u8 data[];
};

430 431
struct iwl_trans;

432 433
struct iwl_trans_txq_scd_cfg {
	u8 fifo;
434
	u8 sta_id;
435
	u8 tid;
436
	bool aggregate;
437 438 439
	int frame_limit;
};

440 441
/**
 * struct iwl_trans_ops - transport specific operations
442 443 444
 *
 * All the handlers MUST be implemented
 *
445 446 447
 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
 *	out of a low power state. From that point on, the HW can send
 *	interrupts. May sleep.
448
 * @op_mode_leave: Turn off the HW RF kill indication if on
449
 *	May sleep
450
 * @start_fw: allocates and inits all the resources for the transport
451 452
 *	layer. Also kick a fw image.
 *	May sleep
453 454
 * @fw_alive: called when the fw sends alive notification. If the fw provides
 *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
455
 *	May sleep
456
 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
457 458 459 460 461
 *	the HW. If low_power is true, the NIC will be put in low power state.
 *	From that point on, the HW will be stopped but will still issue an
 *	interrupt if the HW RF kill switch is triggered.
 *	This callback must do the right thing and not crash even if %start_hw()
 *	was called but not &start_fw(). May sleep.
462
 * @d3_suspend: put the device into the correct mode for WoWLAN during
463 464
 *	suspend. This is optional, if not implemented WoWLAN will not be
 *	supported. This callback may sleep.
465 466 467
 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
 *	talk to the WoWLAN image to get its status. This is optional, if not
 *	implemented WoWLAN will not be supported. This callback may sleep.
468 469 470
 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
 *	If RFkill is asserted in the middle of a SYNC host command, it must
 *	return -ERFKILL straight away.
E
Emmanuel Grumbach 已提交
471
 *	May sleep only if CMD_ASYNC is not set
472
 * @tx: send an skb. The transport relies on the op_mode to zero the
473 474 475 476
 *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
 *	the CSUM will be taken care of (TCP CSUM and IP header in case of
 *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
 *	header if it is IPv4.
477
 *	Must be atomic
478
 * @reclaim: free packet until ssn. Returns a list of freed packets.
479
 *	Must be atomic
480 481
 * @txq_enable: setup a queue. To setup an AC queue, use the
 *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
482 483
 *	this one. The op_mode must not configure the HCMD queue. The scheduler
 *	configuration may be %NULL, in which case the hardware will not be
484 485 486
 *	configured. If true is returned, the operation mode needs to increment
 *	the sequence number of the packets routed to this queue because of a
 *	hardware scheduler bug. May sleep.
487
 * @txq_disable: de-configure a Tx queue to send AMPDUs
488
 *	Must be atomic
489
 * @txq_set_shared_mode: change Tx queue shared/unshared marking
490 491
 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
492 493
 * @freeze_txq_timer: prevents the timer of the queue from firing until the
 *	queue is set to awake. Must be atomic.
494 495 496 497 498
 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
 *	that the transport needs to refcount the calls since this function
 *	will be called several times with block = true, and then the queues
 *	need to be unblocked only after the same number of calls with
 *	block = false.
499 500 501
 * @write8: write a u8 to a register at offset ofs from the BAR
 * @write32: write a u32 to a register at offset ofs from the BAR
 * @read32: read a u32 register at offset ofs from the BAR
502 503
 * @read_prph: read a DWORD from a periphery register
 * @write_prph: write a DWORD to a periphery register
504
 * @read_mem: read device's SRAM in DWORD
E
Emmanuel Grumbach 已提交
505 506
 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
 *	will be zeroed.
507
 * @configure: configure parameters required by the transport layer from
E
Emmanuel Grumbach 已提交
508 509
 *	the op_mode. May be called several times before start_fw, can't be
 *	called after that.
D
Don Fry 已提交
510
 * @set_pmi: set the power pmi state
511 512 513 514 515
 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
 *	Sleeping is not allowed between grab_nic_access and
 *	release_nic_access.
 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
 *	must be the same one that was sent before to the grab_nic_access.
516
 * @set_bits_mask - set SRAM register according to value and mask.
517 518 519 520 521
 * @ref: grab a reference to the transport/FW layers, disallowing
 *	certain low power states
 * @unref: release a reference previously taken with @ref. Note that
 *	initially the reference count is 1, making an initial @unref
 *	necessary to allow low power states.
522 523
 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
 *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
524
 *	Note that the transport must fill in the proper file headers.
525 526 527
 * @dump_regs: dump using IWL_ERR configuration space and memory mapped
 *	registers of the device to diagnose failure, e.g., when HW becomes
 *	inaccessible.
528 529 530
 */
struct iwl_trans_ops {

531
	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
532
	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
533 534
	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
			bool run_in_rfkill);
535
	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
536
	void (*stop_device)(struct iwl_trans *trans, bool low_power);
537

538
	void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
539
	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
540
			 bool test, bool reset);
541

542
	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
543

544
	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
545 546 547 548
		  struct iwl_device_cmd *dev_cmd, int queue);
	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
			struct sk_buff_head *skbs);

549
	bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
550 551
			   const struct iwl_trans_txq_scd_cfg *cfg,
			   unsigned int queue_wdg_timeout);
552 553
	void (*txq_disable)(struct iwl_trans *trans, int queue,
			    bool configure_scd);
554
	/* 22000 functions */
555 556 557 558 559
	int (*txq_alloc)(struct iwl_trans *trans,
			 struct iwl_tx_queue_cfg_cmd *cmd,
			 int cmd_id,
			 unsigned int queue_wdg_timeout);
	void (*txq_free)(struct iwl_trans *trans, int queue);
560

561 562 563
	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
				    bool shared);

564
	int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
565
	int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
566 567
	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
				 bool freeze);
568
	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
569

570 571 572
	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
573 574
	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
575 576 577
	int (*read_mem)(struct iwl_trans *trans, u32 addr,
			void *buf, int dwords);
	int (*write_mem)(struct iwl_trans *trans, u32 addr,
578
			 const void *buf, int dwords);
579 580
	void (*configure)(struct iwl_trans *trans,
			  const struct iwl_trans_config *trans_cfg);
D
Don Fry 已提交
581
	void (*set_pmi)(struct iwl_trans *trans, bool state);
582
	void (*sw_reset)(struct iwl_trans *trans);
583
	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
584 585
	void (*release_nic_access)(struct iwl_trans *trans,
				   unsigned long *flags);
586 587
	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
			      u32 value);
588 589
	void (*ref)(struct iwl_trans *trans);
	void (*unref)(struct iwl_trans *trans);
590
	int  (*suspend)(struct iwl_trans *trans);
591
	void (*resume)(struct iwl_trans *trans);
592

593
	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
594
						 const struct iwl_fw_dbg_trigger_tlv
595
						 *trigger);
596 597

	void (*dump_regs)(struct iwl_trans *trans);
598 599
};

600 601 602 603 604 605 606 607 608 609 610
/**
 * enum iwl_trans_state - state of the transport layer
 *
 * @IWL_TRANS_NO_FW: no fw has sent an alive response
 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
 */
enum iwl_trans_state {
	IWL_TRANS_NO_FW = 0,
	IWL_TRANS_FW_ALIVE	= 1,
};

611
/**
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
 * DOC: Platform power management
 *
 * There are two types of platform power management: system-wide
 * (WoWLAN) and runtime.
 *
 * In system-wide power management the entire platform goes into a low
 * power state (e.g. idle or suspend to RAM) at the same time and the
 * device is configured as a wakeup source for the entire platform.
 * This is usually triggered by userspace activity (e.g. the user
 * presses the suspend button or a power management daemon decides to
 * put the platform in low power mode).  The device's behavior in this
 * mode is dictated by the wake-on-WLAN configuration.
 *
 * In runtime power management, only the devices which are themselves
 * idle enter a low power state.  This is done at runtime, which means
 * that the entire system is still running normally.  This mode is
 * usually triggered automatically by the device driver and requires
 * the ability to enter and exit the low power modes in a very short
 * time, so there is not much impact in usability.
 *
 * The terms used for the device's behavior are as follows:
 *
 *	- D0: the device is fully powered and the host is awake;
 *	- D3: the device is in low power mode and only reacts to
 *		specific events (e.g. magic-packet received or scan
 *		results found);
 *	- D0I3: the device is in low power mode and reacts to any
 *		activity (e.g. RX);
 *
 * These terms reflect the power modes in the firmware and are not to
 * be confused with the physical device power state.  The NIC can be
 * in D0I3 mode even if, for instance, the PCI device is in D3 state.
644
 */
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666

/**
 * enum iwl_plat_pm_mode - platform power management mode
 *
 * This enumeration describes the device's platform power management
 * behavior when in idle mode (i.e. runtime power management) or when
 * in system-wide suspend (i.e WoWLAN).
 *
 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
 *	device.  At runtime, this means that nothing happens and the
 *	device always remains in active.  In system-wide suspend mode,
 *	it means that the all connections will be closed automatically
 *	by mac80211 before the platform is suspended.
 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
 *	For runtime power management, this mode is not officially
 *	supported.
 * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
 */
enum iwl_plat_pm_mode {
	IWL_PLAT_PM_MODE_DISABLED,
	IWL_PLAT_PM_MODE_D3,
	IWL_PLAT_PM_MODE_D0I3,
667 668
};

669 670 671 672 673
/* Max time to wait for trans to become idle/non-idle on d0i3
 * enter/exit (in msecs).
 */
#define IWL_TRANS_IDLE_TIMEOUT 2000

674 675
/**
 * struct iwl_trans - transport common data
676
 *
677
 * @ops - pointer to iwl_trans_ops
678
 * @op_mode - pointer to the op_mode
679
 * @cfg - pointer to the configuration
680
 * @drv - pointer to iwl_drv
681
 * @status: a bit-mask of transport status flags
682
 * @dev - pointer to struct device * that represents the device
J
Johannes Berg 已提交
683 684
 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
 *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
685
 * @hw_rf_id a u32 with the device RF ID
S
Sara Sharon 已提交
686
 * @hw_id: a u32 with the ID of the device / sub-device.
687
 *	Set during transport allocation.
688
 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
689
 * @pm_support: set to true in start_hw if link pm is supported
E
Emmanuel Grumbach 已提交
690
 * @ltr_enabled: set to true if the LTR is enabled
691
 * @wide_cmd_header: true when ucode supports wide command header format
692 693
 * @num_rx_queues: number of RX queues allocated by the transport;
 *	the transport must set this before calling iwl_drv_start()
694 695
 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
696 697 698 699
 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
 *	starting the firmware, used for tracing
 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
 *	start of the 802.11 header in the @rx_mpdu_cmd
700
 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
701 702
 * @dbg_dest_tlv: points to the destination TLV for debug
 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
703
 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
704
 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
705 706 707 708 709 710
 * @system_pm_mode: the system-wide power management mode in use.
 *	This mode is set dynamically, depending on the WoWLAN values
 *	configured from the userspace at runtime.
 * @runtime_pm_mode: the runtime power management mode in use.  This
 *	mode is set during the initialization phase and is not
 *	supposed to change during runtime.
711
 */
712 713
struct iwl_trans {
	const struct iwl_trans_ops *ops;
714
	struct iwl_op_mode *op_mode;
715
	const struct iwl_cfg *cfg;
716
	struct iwl_drv *drv;
717
	enum iwl_trans_state state;
718
	unsigned long status;
719

720
	struct device *dev;
J
Johannes Berg 已提交
721
	u32 max_skb_frags;
722
	u32 hw_rev;
723
	u32 hw_rf_id;
E
Emmanuel Grumbach 已提交
724
	u32 hw_id;
725
	char hw_id_str[52];
726

727 728
	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;

729
	bool pm_support;
E
Emmanuel Grumbach 已提交
730
	bool ltr_enabled;
731

732 733
	const struct iwl_hcmd_arr *command_groups;
	int command_groups_size;
734
	bool wide_cmd_header;
735

736 737
	u8 num_rx_queues;

738 739
	/* The following fields are internal only */
	struct kmem_cache *dev_cmd_pool;
740
	char dev_cmd_pool_name[50];
741

742 743
	struct dentry *dbgfs_dir;

744 745 746 747
#ifdef CONFIG_LOCKDEP
	struct lockdep_map sync_cmd_lockdep_map;
#endif

748
	const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
749 750
	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
751 752
	u8 dbg_dest_reg_num;

753 754
	enum iwl_plat_pm_mode system_pm_mode;
	enum iwl_plat_pm_mode runtime_pm_mode;
755
	bool suspending;
756

757 758
	/* pointer to trans specific struct */
	/*Ensure that this pointer will always be aligned to sizeof pointer */
759
	char trans_specific[0] __aligned(sizeof(void *));
760 761
};

762 763 764
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);

765
static inline void iwl_trans_configure(struct iwl_trans *trans,
766
				       const struct iwl_trans_config *trans_cfg)
767
{
768
	trans->op_mode = trans_cfg->op_mode;
769 770

	trans->ops->configure(trans, trans_cfg);
771
	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
772 773
}

774
static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
775
{
776 777
	might_sleep();

778 779 780 781 782 783
	return trans->ops->start_hw(trans, low_power);
}

static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
	return trans->ops->start_hw(trans, true);
784 785
}

786
static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
787
{
788 789
	might_sleep();

790 791
	if (trans->ops->op_mode_leave)
		trans->ops->op_mode_leave(trans);
792

793
	trans->op_mode = NULL;
794

795
	trans->state = IWL_TRANS_NO_FW;
796 797
}

798
static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
799
{
800 801
	might_sleep();

802
	trans->state = IWL_TRANS_FW_ALIVE;
803

804
	trans->ops->fw_alive(trans, scd_addr);
805 806
}

807
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
808 809
				     const struct fw_img *fw,
				     bool run_in_rfkill)
810
{
811 812
	might_sleep();

813 814
	WARN_ON_ONCE(!trans->rx_mpdu_cmd);

815
	clear_bit(STATUS_FW_ERROR, &trans->status);
816
	return trans->ops->start_fw(trans, fw, run_in_rfkill);
817 818
}

819 820
static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
					  bool low_power)
821
{
822 823
	might_sleep();

824
	trans->ops->stop_device(trans, low_power);
825 826

	trans->state = IWL_TRANS_NO_FW;
827 828
}

829 830 831 832 833
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
	_iwl_trans_stop_device(trans, true);
}

834 835
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
					bool reset)
836 837
{
	might_sleep();
838
	if (trans->ops->d3_suspend)
839
		trans->ops->d3_suspend(trans, test, reset);
840 841 842
}

static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
843
				      enum iwl_d3_status *status,
844
				      bool test, bool reset)
845 846
{
	might_sleep();
847 848 849
	if (!trans->ops->d3_resume)
		return 0;

850
	return trans->ops->d3_resume(trans, status, test, reset);
851 852
}

853
static inline int iwl_trans_suspend(struct iwl_trans *trans)
854
{
855 856 857 858
	if (!trans->ops->suspend)
		return 0;

	return trans->ops->suspend(trans);
859 860 861 862 863 864 865 866
}

static inline void iwl_trans_resume(struct iwl_trans *trans)
{
	if (trans->ops->resume)
		trans->ops->resume(trans);
}

867
static inline struct iwl_trans_dump_data *
868
iwl_trans_dump_data(struct iwl_trans *trans,
869
		    const struct iwl_fw_dbg_trigger_tlv *trigger)
870 871
{
	if (!trans->ops->dump_data)
872
		return NULL;
873
	return trans->ops->dump_data(trans, trigger);
874 875
}

876 877 878 879 880 881
static inline void iwl_trans_dump_regs(struct iwl_trans *trans)
{
	if (trans->ops->dump_regs)
		trans->ops->dump_regs(trans);
}

882 883 884
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
885
	return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
886 887
}

888 889
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);

890 891 892
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
					 struct iwl_device_cmd *dev_cmd)
{
893
	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
894 895
}

896
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
897
			       struct iwl_device_cmd *dev_cmd, int queue)
898
{
899 900 901
	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
		return -EIO;

902
	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
903
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
904 905
		return -EIO;
	}
906

907
	return trans->ops->tx(trans, skb, dev_cmd, queue);
908 909
}

910 911
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
				     int ssn, struct sk_buff_head *skbs)
912
{
913
	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
914
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
915 916
		return;
	}
917

918
	trans->ops->reclaim(trans, queue, ssn, skbs);
919 920
}

921 922
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
					 bool configure_scd)
923
{
924 925 926
	trans->ops->txq_disable(trans, queue, configure_scd);
}

927
static inline bool
928
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
929 930
			 const struct iwl_trans_txq_scd_cfg *cfg,
			 unsigned int queue_wdg_timeout)
931 932 933
{
	might_sleep();

934
	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
935
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
936
		return false;
937
	}
938

939 940
	return trans->ops->txq_enable(trans, queue, ssn,
				      cfg, queue_wdg_timeout);
941 942
}

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
	if (WARN_ON_ONCE(!trans->ops->txq_free))
		return;

	trans->ops->txq_free(trans, queue);
}

static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
		    struct iwl_tx_queue_cfg_cmd *cmd,
		    int cmd_id,
		    unsigned int queue_wdg_timeout)
{
	might_sleep();

	if (WARN_ON_ONCE(!trans->ops->txq_alloc))
		return -ENOTSUPP;

	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
		return -EIO;
	}

	return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
}

971 972 973 974 975 976 977
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
						 int queue, bool shared_mode)
{
	if (trans->ops->txq_set_shared_mode)
		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}

978 979
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
					int fifo, int sta_id, int tid,
980 981
					int frame_limit, u16 ssn,
					unsigned int queue_wdg_timeout)
982
{
983 984 985 986 987
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = fifo,
		.sta_id = sta_id,
		.tid = tid,
		.frame_limit = frame_limit,
988
		.aggregate = sta_id >= 0,
989 990
	};

991
	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
992 993
}

994 995 996
static inline
void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
			     unsigned int queue_wdg_timeout)
997
{
998 999 1000 1001 1002
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = fifo,
		.sta_id = -1,
		.tid = IWL_MAX_TID_COUNT,
		.frame_limit = IWL_FRAME_LIMIT,
1003
		.aggregate = false,
1004 1005
	};

1006
	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1007 1008
}

1009 1010 1011 1012
static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
					      unsigned long txqs,
					      bool freeze)
{
1013
	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1014
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1015 1016
		return;
	}
1017 1018 1019 1020 1021

	if (trans->ops->freeze_txq_timer)
		trans->ops->freeze_txq_timer(trans, txqs, freeze);
}

1022 1023 1024
static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
					    bool block)
{
1025
	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1026
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1027 1028
		return;
	}
1029 1030 1031 1032 1033

	if (trans->ops->block_txq_ptrs)
		trans->ops->block_txq_ptrs(trans, block);
}

1034 1035
static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
						 u32 txqs)
1036
{
1037 1038 1039
	if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
		return -ENOTSUPP;

1040
	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1041
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1042 1043
		return -EIO;
	}
1044

1045
	return trans->ops->wait_tx_queues_empty(trans, txqs);
1046 1047
}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
{
	if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
		return -ENOTSUPP;

	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
		return -EIO;
	}

	return trans->ops->wait_txq_empty(trans, queue);
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
	trans->ops->write8(trans, ofs, val);
}

static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
	trans->ops->write32(trans, ofs, val);
}

static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
	return trans->ops->read32(trans, ofs);
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
{
	return trans->ops->read_prph(trans, ofs);
}

static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
					u32 val)
{
	return trans->ops->write_prph(trans, ofs, val);
}

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
				     void *buf, int dwords)
{
	return trans->ops->read_mem(trans, addr, buf, dwords);
}

#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
	do {								      \
		if (__builtin_constant_p(bufsize))			      \
			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
	} while (0)

static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
	u32 value;

	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
		return 0xa5a5a5a5;

	return value;
}

static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1111
				      const void *buf, int dwords)
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
{
	return trans->ops->write_mem(trans, addr, buf, dwords);
}

static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
					u32 val)
{
	return iwl_trans_write_mem(trans, addr, &val, 1);
}

D
Don Fry 已提交
1122 1123
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
1124 1125
	if (trans->ops->set_pmi)
		trans->ops->set_pmi(trans, state);
D
Don Fry 已提交
1126 1127
}

1128 1129 1130 1131 1132 1133
static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
{
	if (trans->ops->sw_reset)
		trans->ops->sw_reset(trans);
}

1134 1135 1136 1137 1138 1139
static inline void
iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
{
	trans->ops->set_bits_mask(trans, reg, mask, value);
}

1140
#define iwl_trans_grab_nic_access(trans, flags)	\
1141
	__cond_lock(nic_access,				\
1142
		    likely((trans)->ops->grab_nic_access(trans, flags)))
1143

1144
static inline void __releases(nic_access)
1145
iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1146
{
1147
	trans->ops->release_nic_access(trans, flags);
1148
	__release(nic_access);
1149 1150
}

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
static inline void iwl_trans_fw_error(struct iwl_trans *trans)
{
	if (WARN_ON_ONCE(!trans->op_mode))
		return;

	/* prevent double restarts due to the same erroneous FW */
	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
		iwl_op_mode_nic_error(trans->op_mode);
}

1161 1162 1163 1164 1165 1166
/*****************************************************
 * transport helper functions
 *****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
				  struct device *dev,
				  const struct iwl_cfg *cfg,
1167
				  const struct iwl_trans_ops *ops);
1168
void iwl_trans_free(struct iwl_trans *trans);
1169 1170
void iwl_trans_ref(struct iwl_trans *trans);
void iwl_trans_unref(struct iwl_trans *trans);
1171

1172
/*****************************************************
1173
* driver (transport) register/unregister functions
1174
******************************************************/
E
Emmanuel Grumbach 已提交
1175 1176
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
1177

1178
#endif /* __iwl_trans_h__ */