iwl-trans.h 35.7 KB
Newer Older
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
26
 * in the file called COPYING.
27 28 29 30 31 32 33
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
34
 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
65 66
#ifndef __iwl_trans_h__
#define __iwl_trans_h__
67

68
#include <linux/ieee80211.h>
69
#include <linux/mm.h> /* for page_address */
70
#include <linux/lockdep.h>
E
Emmanuel Grumbach 已提交
71

72
#include "iwl-debug.h"
73 74
#include "iwl-config.h"
#include "iwl-fw.h"
75
#include "iwl-op-mode.h"
76

77 78 79
/**
 * DOC: Transport layer - what is it ?
 *
S
Sara Sharon 已提交
80
 * The transport layer is the layer that deals with the HW directly. It provides
81 82
 * an abstraction of the underlying HW to the upper layer. The transport layer
 * doesn't provide any policy, algorithm or anything of this kind, but only
S
Sara Sharon 已提交
83
 * mechanisms to make the HW do something. It is not completely stateless but
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
 * close to it.
 * We will have an implementation for each different supported bus.
 */

/**
 * DOC: Life cycle of the transport layer
 *
 * The transport layer has a very precise life cycle.
 *
 *	1) A helper function is called during the module initialization and
 *	   registers the bus driver's ops with the transport's alloc function.
 *	2) Bus's probe calls to the transport layer's allocation functions.
 *	   Of course this function is bus specific.
 *	3) This allocation functions will spawn the upper layer which will
 *	   register mac80211.
 *
 *	4) At some point (i.e. mac80211's start call), the op_mode will call
 *	   the following sequence:
 *	   start_hw
 *	   start_fw
 *
 *	5) Then when finished (or reset):
106
 *	   stop_device
107 108 109 110 111 112 113
 *
 *	6) Eventually, the free function will be called.
 */

/**
 * DOC: Host command section
 *
S
Sara Sharon 已提交
114
 * A host command is a command issued by the upper layer to the fw. There are
115 116
 * several versions of fw that have several APIs. The transport layer is
 * completely agnostic to these differences.
S
Sara Sharon 已提交
117
 * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
118
 */
119 120 121 122 123 124
#define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
#define SEQ_TO_INDEX(s)	((s) & 0xff)
#define INDEX_TO_SEQ(i)	((i) & 0xff)
#define SEQ_RX_FRAME	cpu_to_le16(0x8000)

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
/*
 * those functions retrieve specific information from
 * the id field in the iwl_host_cmd struct which contains
 * the command id, the group id and the version of the command
 * and vice versa
*/
static inline u8 iwl_cmd_opcode(u32 cmdid)
{
	return cmdid & 0xFF;
}

static inline u8 iwl_cmd_groupid(u32 cmdid)
{
	return ((cmdid & 0xFF00) >> 8);
}

static inline u8 iwl_cmd_version(u32 cmdid)
{
	return ((cmdid & 0xFF0000) >> 16);
}

static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
{
	return opcode + (groupid << 8) + (version << 16);
}

151 152 153
/* make u16 wide id out of u8 group and opcode */
#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)

154 155 156 157 158
/* due to the conversion, this group is special; new groups
 * should be defined in the appropriate fw-api header files
 */
#define IWL_ALWAYS_LONG_GROUP	1

159 160 161 162 163 164 165 166
/**
 * struct iwl_cmd_header
 *
 * This header format appears in the beginning of each command sent from the
 * driver, and each response/notification received from uCode.
 */
struct iwl_cmd_header {
	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
167
	u8 group_id;
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	/*
	 * The driver sets up the sequence number to values of its choosing.
	 * uCode does not use this value, but passes it back to the driver
	 * when sending the response to each driver-originated command, so
	 * the driver can match the response to the command.  Since the values
	 * don't get used by uCode, the driver may set up an arbitrary format.
	 *
	 * There is one exception:  uCode sets bit 15 when it originates
	 * the response/notification, i.e. when the response/notification
	 * is not a direct response to a command sent by the driver.  For
	 * example, uCode issues REPLY_RX when it sends a received frame
	 * to the driver; it is not a direct response to any driver command.
	 *
	 * The Linux driver uses the following format:
	 *
	 *  0:7		tfd index - position within TX queue
	 *  8:12	TX queue id
	 *  13:14	reserved
	 *  15		unsolicited RX or uCode-originated notification
	 */
	__le16 sequence;
} __packed;

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
/**
 * struct iwl_cmd_header_wide
 *
 * This header format appears in the beginning of each command sent from the
 * driver, and each response/notification received from uCode.
 * this is the wide version that contains more information about the command
 * like length, version and command type
 */
struct iwl_cmd_header_wide {
	u8 cmd;
	u8 group_id;
	__le16 sequence;
	__le16 length;
	u8 reserved;
	u8 version;
} __packed;

208
#define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
209 210
#define FH_RSCSR_FRAME_INVALID		0x55550000
#define FH_RSCSR_FRAME_ALIGN		0x40
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

struct iwl_rx_packet {
	/*
	 * The first 4 bytes of the RX frame header contain both the RX frame
	 * size and some flags.
	 * Bit fields:
	 * 31:    flag flush RB request
	 * 30:    flag ignore TC (terminal counter) request
	 * 29:    flag fast IRQ request
	 * 28-14: Reserved
	 * 13-00: RX frame size
	 */
	__le32 len_n_flags;
	struct iwl_cmd_header hdr;
	u8 data[];
} __packed;
227

228 229 230 231 232 233 234 235 236 237
static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
{
	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
}

static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
{
	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
}

238 239 240
/**
 * enum CMD_MODE - how to send the host commands ?
 *
241
 * @CMD_ASYNC: Return right away and don't wait for the response
E
Emmanuel Grumbach 已提交
242 243
 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
 *	the response. The caller needs to call iwl_free_resp when done.
244
 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
S
Sara Sharon 已提交
245
 *	command queue, but after other high priority commands. Valid only
246 247 248 249 250
 *	with CMD_ASYNC.
 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
 *	(i.e. mark it as non-idle).
251 252
 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
 *	called after this command completes. Valid only with CMD_ASYNC.
J
Johannes Berg 已提交
253 254
 * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
 *	check that we leave enough room for the TBs bitmap which needs 20 bits.
255 256
 */
enum CMD_MODE {
J
Johannes Berg 已提交
257 258
	CMD_ASYNC		= BIT(0),
	CMD_WANT_SKB		= BIT(1),
259
	CMD_SEND_IN_RFKILL	= BIT(2),
260 261 262 263
	CMD_HIGH_PRIO		= BIT(3),
	CMD_SEND_IN_IDLE	= BIT(4),
	CMD_MAKE_TRANS_IDLE	= BIT(5),
	CMD_WAKE_UP_TRANS	= BIT(6),
264
	CMD_WANT_ASYNC_CALLBACK	= BIT(7),
J
Johannes Berg 已提交
265 266

	CMD_TB_BITMAP_POS	= 11,
267 268 269 270 271 272 273 274 275 276 277 278
};

#define DEF_CMD_PAYLOAD_SIZE 320

/**
 * struct iwl_device_cmd
 *
 * For allocation of the command and tx queues, this establishes the overall
 * size of the largest command we send to uCode, except for commands that
 * aren't fully copied and use other TFD space.
 */
struct iwl_device_cmd {
279 280 281 282 283 284 285 286 287 288 289 290
	union {
		struct {
			struct iwl_cmd_header hdr;	/* uCode API */
			u8 payload[DEF_CMD_PAYLOAD_SIZE];
		};
		struct {
			struct iwl_cmd_header_wide hdr_wide;
			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
					sizeof(struct iwl_cmd_header_wide) +
					sizeof(struct iwl_cmd_header)];
		};
	};
291 292 293 294
} __packed;

#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))

295 296 297 298 299
/*
 * number of transfer buffers (fragments) per transmit frame descriptor;
 * this is just the driver's idea, the hardware supports 20
 */
#define IWL_MAX_CMD_TBS_PER_TFD	2
300

301 302 303
/**
 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
 *
304
 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
305
 *	ring. The transport layer doesn't map the command's buffer to DMA, but
306
 *	rather copies it to a previously allocated DMA buffer. This flag tells
307
 *	the transport layer not to copy the command, but to map the existing
308 309 310
 *	buffer (that is passed in) instead. This saves the memcpy and allows
 *	commands that are bigger than the fixed buffer to be submitted.
 *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
311 312 313
 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
 *	chunk internally and free it again after the command completes. This
 *	can (currently) be used only once per command.
314
 *	Note that a TFD entry after a DUP one cannot be a normal copied one.
315
 */
316 317
enum iwl_hcmd_dataflag {
	IWL_HCMD_DFL_NOCOPY	= BIT(0),
318
	IWL_HCMD_DFL_DUP	= BIT(1),
319 320 321 322
};

/**
 * struct iwl_host_cmd - Host command to the uCode
323
 *
324
 * @data: array of chunks that composes the data of the host command
325 326 327
 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
 * @_rx_page_order: (internally used to free response packet)
 * @_rx_page_addr: (internally used to free response packet)
328
 * @flags: can be CMD_*
329
 * @len: array of the lengths of the chunks in data
330
 * @dataflags: IWL_HCMD_DFL_*
331 332
 * @id: command id of the host command, for wide commands encoding the
 *	version and group as well
333 334
 */
struct iwl_host_cmd {
335
	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
336 337 338
	struct iwl_rx_packet *resp_pkt;
	unsigned long _rx_page_addr;
	u32 _rx_page_order;
339

340
	u32 flags;
341
	u32 id;
342 343
	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
344
};
345

346 347 348 349 350
static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
{
	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
}

351 352
struct iwl_rx_cmd_buffer {
	struct page *_page;
353 354
	int _offset;
	bool _page_stolen;
355
	u32 _rx_page_order;
356
	unsigned int truesize;
357 358 359 360
};

static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
{
361 362 363 364 365 366
	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
}

static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
{
	return r->_offset;
367 368 369 370
}

static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
{
371 372 373
	r->_page_stolen = true;
	get_page(r->_page);
	return r->_page;
374 375
}

376 377 378 379 380
static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
{
	__free_pages(r->_page, r->_rx_page_order);
}

381 382
#define MAX_NO_RECLAIM_CMDS	6

383 384
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))

385 386 387 388 389
/*
 * Maximum number of HW queues the transport layer
 * currently supports
 */
#define IWL_MAX_HW_QUEUES		32
390 391
#define IWL_MAX_TID_COUNT	8
#define IWL_FRAME_LIMIT	64
392
#define IWL_MAX_RX_HW_QUEUES	16
393

394 395 396 397 398 399 400 401 402 403
/**
 * enum iwl_wowlan_status - WoWLAN image/device status
 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
 * @IWL_D3_STATUS_RESET: device was reset while suspended
 */
enum iwl_d3_status {
	IWL_D3_STATUS_ALIVE,
	IWL_D3_STATUS_RESET,
};

404 405 406 407 408 409 410 411
/**
 * enum iwl_trans_status: transport status flags
 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
 * @STATUS_DEVICE_ENABLED: APM is enabled
 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
 * @STATUS_INT_ENABLED: interrupts are enabled
 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
 * @STATUS_FW_ERROR: the fw is in error state
412 413 414
 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
 *	are sent
 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
415
 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
416 417 418 419 420 421 422 423
 */
enum iwl_trans_status {
	STATUS_SYNC_HCMD_ACTIVE,
	STATUS_DEVICE_ENABLED,
	STATUS_TPOWER_PMI,
	STATUS_INT_ENABLED,
	STATUS_RFKILL,
	STATUS_FW_ERROR,
424 425
	STATUS_TRANS_GOING_IDLE,
	STATUS_TRANS_IDLE,
426
	STATUS_TRANS_DEAD,
427 428
};

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
	switch (rb_size) {
	case IWL_AMSDU_4K:
		return get_order(4 * 1024);
	case IWL_AMSDU_8K:
		return get_order(8 * 1024);
	case IWL_AMSDU_12K:
		return get_order(12 * 1024);
	default:
		WARN_ON(1);
		return -1;
	}
}

445 446 447 448
/**
 * struct iwl_trans_config - transport configuration
 *
 * @op_mode: pointer to the upper layer.
449 450
 * @cmd_queue: the index of the command queue.
 *	Must be set before start_fw.
451
 * @cmd_fifo: the fifo for host commands
452
 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
453 454 455 456 457
 * @no_reclaim_cmds: Some devices erroneously don't set the
 *	SEQ_RX_FRAME bit on some notifications, this is the
 *	list of such notifications to filter. Max length is
 *	%MAX_NO_RECLAIM_CMDS.
 * @n_no_reclaim_cmds: # of commands in list
458
 * @rx_buf_size: RX buffer size needed for A-MSDUs
459
 *	if unset 4k will be the RX buffer size
460 461
 * @bc_table_dword: set to true if the BC table expects the byte count to be
 *	in DWORD (as opposed to bytes)
462
 * @scd_set_active: should the transport configure the SCD for HCMD queue
463
 * @wide_cmd_header: firmware supports wide host command header
J
Johannes Berg 已提交
464 465
 * @command_names: array of command names, must be 256 entries
 *	(one for each command); for debugging only
466 467
 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
 *	we get the ALIVE from the uCode
468 469 470
 */
struct iwl_trans_config {
	struct iwl_op_mode *op_mode;
471

472
	u8 cmd_queue;
473
	u8 cmd_fifo;
474
	unsigned int cmd_q_wdg_timeout;
475
	const u8 *no_reclaim_cmds;
476
	unsigned int n_no_reclaim_cmds;
477

478
	enum iwl_amsdu_size rx_buf_size;
479
	bool bc_table_dword;
480
	bool scd_set_active;
481
	bool wide_cmd_header;
482
	const char *const *command_names;
483 484

	u32 sdio_adma_addr;
485 486
};

487 488 489 490 491
struct iwl_trans_dump_data {
	u32 len;
	u8 data[];
};

492 493
struct iwl_trans;

494 495 496 497
struct iwl_trans_txq_scd_cfg {
	u8 fifo;
	s8 sta_id;
	u8 tid;
498
	bool aggregate;
499 500 501
	int frame_limit;
};

502 503
/**
 * struct iwl_trans_ops - transport specific operations
504 505 506
 *
 * All the handlers MUST be implemented
 *
507 508 509
 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
 *	out of a low power state. From that point on, the HW can send
 *	interrupts. May sleep.
510
 * @op_mode_leave: Turn off the HW RF kill indication if on
511
 *	May sleep
512
 * @start_fw: allocates and inits all the resources for the transport
513 514
 *	layer. Also kick a fw image.
 *	May sleep
515 516
 * @fw_alive: called when the fw sends alive notification. If the fw provides
 *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
517
 *	May sleep
518
 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
519 520 521 522 523
 *	the HW. If low_power is true, the NIC will be put in low power state.
 *	From that point on, the HW will be stopped but will still issue an
 *	interrupt if the HW RF kill switch is triggered.
 *	This callback must do the right thing and not crash even if %start_hw()
 *	was called but not &start_fw(). May sleep.
524
 * @d3_suspend: put the device into the correct mode for WoWLAN during
525 526
 *	suspend. This is optional, if not implemented WoWLAN will not be
 *	supported. This callback may sleep.
527 528 529
 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
 *	talk to the WoWLAN image to get its status. This is optional, if not
 *	implemented WoWLAN will not be supported. This callback may sleep.
530 531 532
 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
 *	If RFkill is asserted in the middle of a SYNC host command, it must
 *	return -ERFKILL straight away.
E
Emmanuel Grumbach 已提交
533
 *	May sleep only if CMD_ASYNC is not set
534
 * @tx: send an skb
535
 *	Must be atomic
536
 * @reclaim: free packet until ssn. Returns a list of freed packets.
537
 *	Must be atomic
538 539
 * @txq_enable: setup a queue. To setup an AC queue, use the
 *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
540 541 542
 *	this one. The op_mode must not configure the HCMD queue. The scheduler
 *	configuration may be %NULL, in which case the hardware will not be
 *	configured. May sleep.
543
 * @txq_disable: de-configure a Tx queue to send AMPDUs
544
 *	Must be atomic
545
 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
546 547
 * @freeze_txq_timer: prevents the timer of the queue from firing until the
 *	queue is set to awake. Must be atomic.
548 549 550 551 552
 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
 *	that the transport needs to refcount the calls since this function
 *	will be called several times with block = true, and then the queues
 *	need to be unblocked only after the same number of calls with
 *	block = false.
553 554 555
 * @write8: write a u8 to a register at offset ofs from the BAR
 * @write32: write a u32 to a register at offset ofs from the BAR
 * @read32: read a u32 register at offset ofs from the BAR
556 557
 * @read_prph: read a DWORD from a periphery register
 * @write_prph: write a DWORD to a periphery register
558
 * @read_mem: read device's SRAM in DWORD
E
Emmanuel Grumbach 已提交
559 560
 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
 *	will be zeroed.
561
 * @configure: configure parameters required by the transport layer from
E
Emmanuel Grumbach 已提交
562 563
 *	the op_mode. May be called several times before start_fw, can't be
 *	called after that.
D
Don Fry 已提交
564
 * @set_pmi: set the power pmi state
565 566 567 568 569
 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
 *	Sleeping is not allowed between grab_nic_access and
 *	release_nic_access.
 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
 *	must be the same one that was sent before to the grab_nic_access.
570
 * @set_bits_mask - set SRAM register according to value and mask.
571 572 573 574 575
 * @ref: grab a reference to the transport/FW layers, disallowing
 *	certain low power states
 * @unref: release a reference previously taken with @ref. Note that
 *	initially the reference count is 1, making an initial @unref
 *	necessary to allow low power states.
576 577
 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
 *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
578
 *	Note that the transport must fill in the proper file headers.
579 580 581
 */
struct iwl_trans_ops {

582
	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
583
	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
584 585
	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
			bool run_in_rfkill);
586 587
	int (*update_sf)(struct iwl_trans *trans,
			 struct iwl_sf_region *st_fwrd_space);
588
	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
589
	void (*stop_device)(struct iwl_trans *trans, bool low_power);
590

591 592 593
	void (*d3_suspend)(struct iwl_trans *trans, bool test);
	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
			 bool test);
594

595
	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
596

597
	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
598 599 600 601
		  struct iwl_device_cmd *dev_cmd, int queue);
	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
			struct sk_buff_head *skbs);

602
	void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
603 604
			   const struct iwl_trans_txq_scd_cfg *cfg,
			   unsigned int queue_wdg_timeout);
605 606
	void (*txq_disable)(struct iwl_trans *trans, int queue,
			    bool configure_scd);
607

608
	int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
609 610
	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
				 bool freeze);
611
	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
612

613 614 615
	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
616 617
	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
618 619 620
	int (*read_mem)(struct iwl_trans *trans, u32 addr,
			void *buf, int dwords);
	int (*write_mem)(struct iwl_trans *trans, u32 addr,
621
			 const void *buf, int dwords);
622 623
	void (*configure)(struct iwl_trans *trans,
			  const struct iwl_trans_config *trans_cfg);
D
Don Fry 已提交
624
	void (*set_pmi)(struct iwl_trans *trans, bool state);
625 626 627 628
	bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
				unsigned long *flags);
	void (*release_nic_access)(struct iwl_trans *trans,
				   unsigned long *flags);
629 630
	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
			      u32 value);
631 632
	void (*ref)(struct iwl_trans *trans);
	void (*unref)(struct iwl_trans *trans);
633
	int  (*suspend)(struct iwl_trans *trans);
634
	void (*resume)(struct iwl_trans *trans);
635

636 637 638
	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
						 struct iwl_fw_dbg_trigger_tlv
						 *trigger);
639 640
};

641 642 643 644 645 646 647 648 649 650 651
/**
 * enum iwl_trans_state - state of the transport layer
 *
 * @IWL_TRANS_NO_FW: no fw has sent an alive response
 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
 */
enum iwl_trans_state {
	IWL_TRANS_NO_FW = 0,
	IWL_TRANS_FW_ALIVE	= 1,
};

652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
/**
 * enum iwl_d0i3_mode - d0i3 mode
 *
 * @IWL_D0I3_MODE_OFF - d0i3 is disabled
 * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle
 *	(e.g. no active references)
 * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend
 *	(in case of 'any' trigger)
 */
enum iwl_d0i3_mode {
	IWL_D0I3_MODE_OFF = 0,
	IWL_D0I3_MODE_ON_IDLE,
	IWL_D0I3_MODE_ON_SUSPEND,
};

667 668
/**
 * struct iwl_trans - transport common data
669
 *
670
 * @ops - pointer to iwl_trans_ops
671
 * @op_mode - pointer to the op_mode
672
 * @cfg - pointer to the configuration
673
 * @status: a bit-mask of transport status flags
674
 * @dev - pointer to struct device * that represents the device
J
Johannes Berg 已提交
675 676
 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
 *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
S
Sara Sharon 已提交
677
 * @hw_id: a u32 with the ID of the device / sub-device.
678
 *	Set during transport allocation.
679
 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
680
 * @pm_support: set to true in start_hw if link pm is supported
E
Emmanuel Grumbach 已提交
681
 * @ltr_enabled: set to true if the LTR is enabled
682 683
 * @num_rx_queues: number of RX queues allocated by the transport;
 *	the transport must set this before calling iwl_drv_start()
684 685 686 687 688
 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
 * @dev_cmd_headroom: room needed for the transport's private use before the
 *	device_cmd for Tx - for internal use only
 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
689 690 691 692
 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
 *	starting the firmware, used for tracing
 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
 *	start of the 802.11 header in the @rx_mpdu_cmd
693
 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
694 695
 * @dbg_dest_tlv: points to the destination TLV for debug
 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
696
 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
697
 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
698 699 700 701 702 703
 * @paging_req_addr: The location were the FW will upload / download the pages
 *	from. The address is set by the opmode
 * @paging_db: Pointer to the opmode paging data base, the pointer is set by
 *	the opmode.
 * @paging_download_buf: Buffer used for copying all of the pages before
 *	downloading them to the FW. The buffer is allocated in the opmode
704
 */
705 706
struct iwl_trans {
	const struct iwl_trans_ops *ops;
707
	struct iwl_op_mode *op_mode;
708
	const struct iwl_cfg *cfg;
709
	enum iwl_trans_state state;
710
	unsigned long status;
711

712
	struct device *dev;
J
Johannes Berg 已提交
713
	u32 max_skb_frags;
714
	u32 hw_rev;
E
Emmanuel Grumbach 已提交
715
	u32 hw_id;
716
	char hw_id_str[52];
717

718 719
	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;

720
	bool pm_support;
E
Emmanuel Grumbach 已提交
721
	bool ltr_enabled;
722

723 724
	u8 num_rx_queues;

725 726 727
	/* The following fields are internal only */
	struct kmem_cache *dev_cmd_pool;
	size_t dev_cmd_headroom;
728
	char dev_cmd_pool_name[50];
729

730 731
	struct dentry *dbgfs_dir;

732 733 734 735
#ifdef CONFIG_LOCKDEP
	struct lockdep_map sync_cmd_lockdep_map;
#endif

736 737
	u64 dflt_pwr_limit;

738
	const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
739 740
	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
741 742
	u8 dbg_dest_reg_num;

743 744 745 746 747 748 749 750
	/*
	 * Paging parameters - All of the parameters should be set by the
	 * opmode when paging is enabled
	 */
	u32 paging_req_addr;
	struct iwl_fw_paging *paging_db;
	void *paging_download_buf;

751 752
	enum iwl_d0i3_mode d0i3_mode;

753 754
	bool wowlan_d0i3;

755 756
	/* pointer to trans specific struct */
	/*Ensure that this pointer will always be aligned to sizeof pointer */
757
	char trans_specific[0] __aligned(sizeof(void *));
758 759
};

760
static inline void iwl_trans_configure(struct iwl_trans *trans,
761
				       const struct iwl_trans_config *trans_cfg)
762
{
763
	trans->op_mode = trans_cfg->op_mode;
764 765

	trans->ops->configure(trans, trans_cfg);
766 767
}

768
static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
769
{
770 771
	might_sleep();

772 773 774 775 776 777
	return trans->ops->start_hw(trans, low_power);
}

static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
	return trans->ops->start_hw(trans, true);
778 779
}

780
static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
781
{
782 783
	might_sleep();

784 785
	if (trans->ops->op_mode_leave)
		trans->ops->op_mode_leave(trans);
786

787
	trans->op_mode = NULL;
788

789
	trans->state = IWL_TRANS_NO_FW;
790 791
}

792
static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
793
{
794 795
	might_sleep();

796
	trans->state = IWL_TRANS_FW_ALIVE;
797

798
	trans->ops->fw_alive(trans, scd_addr);
799 800
}

801
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
802 803
				     const struct fw_img *fw,
				     bool run_in_rfkill)
804
{
805 806
	might_sleep();

807 808
	WARN_ON_ONCE(!trans->rx_mpdu_cmd);

809
	clear_bit(STATUS_FW_ERROR, &trans->status);
810
	return trans->ops->start_fw(trans, fw, run_in_rfkill);
811 812
}

813 814 815 816 817 818 819 820 821 822 823
static inline int iwl_trans_update_sf(struct iwl_trans *trans,
				      struct iwl_sf_region *st_fwrd_space)
{
	might_sleep();

	if (trans->ops->update_sf)
		return trans->ops->update_sf(trans, st_fwrd_space);

	return 0;
}

824 825
static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
					  bool low_power)
826
{
827 828
	might_sleep();

829
	trans->ops->stop_device(trans, low_power);
830 831

	trans->state = IWL_TRANS_NO_FW;
832 833
}

834 835 836 837 838
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
	_iwl_trans_stop_device(trans, true);
}

839
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
840 841
{
	might_sleep();
842 843
	if (trans->ops->d3_suspend)
		trans->ops->d3_suspend(trans, test);
844 845 846
}

static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
847 848
				      enum iwl_d3_status *status,
				      bool test)
849 850
{
	might_sleep();
851 852 853
	if (!trans->ops->d3_resume)
		return 0;

854
	return trans->ops->d3_resume(trans, status, test);
855 856
}

857 858 859 860 861 862 863 864 865 866 867 868
static inline void iwl_trans_ref(struct iwl_trans *trans)
{
	if (trans->ops->ref)
		trans->ops->ref(trans);
}

static inline void iwl_trans_unref(struct iwl_trans *trans)
{
	if (trans->ops->unref)
		trans->ops->unref(trans);
}

869
static inline int iwl_trans_suspend(struct iwl_trans *trans)
870
{
871 872 873 874
	if (!trans->ops->suspend)
		return 0;

	return trans->ops->suspend(trans);
875 876 877 878 879 880 881 882
}

static inline void iwl_trans_resume(struct iwl_trans *trans)
{
	if (trans->ops->resume)
		trans->ops->resume(trans);
}

883
static inline struct iwl_trans_dump_data *
884 885
iwl_trans_dump_data(struct iwl_trans *trans,
		    struct iwl_fw_dbg_trigger_tlv *trigger)
886 887
{
	if (!trans->ops->dump_data)
888
		return NULL;
889
	return trans->ops->dump_data(trans, trigger);
890 891
}

892
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
893
				     struct iwl_host_cmd *cmd)
894
{
895 896
	int ret;

897 898 899 900
	if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
		     test_bit(STATUS_RFKILL, &trans->status)))
		return -ERFKILL;

901 902 903
	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
		return -EIO;

904
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
905
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
906 907
		return -EIO;
	}
908

909 910 911 912
	if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) &&
		    !(cmd->flags & CMD_ASYNC)))
		return -EINVAL;

913 914 915 916 917 918 919 920 921
	if (!(cmd->flags & CMD_ASYNC))
		lock_map_acquire_read(&trans->sync_cmd_lockdep_map);

	ret = trans->ops->send_cmd(trans, cmd);

	if (!(cmd->flags & CMD_ASYNC))
		lock_map_release(&trans->sync_cmd_lockdep_map);

	return ret;
922 923
}

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
	u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);

	if (unlikely(dev_cmd_ptr == NULL))
		return NULL;

	return (struct iwl_device_cmd *)
			(dev_cmd_ptr + trans->dev_cmd_headroom);
}

static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
					 struct iwl_device_cmd *dev_cmd)
{
	u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;

	kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
}

944
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
945
			       struct iwl_device_cmd *dev_cmd, int queue)
946
{
947 948 949
	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
		return -EIO;

950
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
951
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
952

953
	return trans->ops->tx(trans, skb, dev_cmd, queue);
954 955
}

956 957
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
				     int ssn, struct sk_buff_head *skbs)
958
{
959
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
960
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
961

962
	trans->ops->reclaim(trans, queue, ssn, skbs);
963 964
}

965 966
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
					 bool configure_scd)
967
{
968 969 970 971 972
	trans->ops->txq_disable(trans, queue, configure_scd);
}

static inline void
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
973 974
			 const struct iwl_trans_txq_scd_cfg *cfg,
			 unsigned int queue_wdg_timeout)
975 976 977 978 979 980
{
	might_sleep();

	if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);

981
	trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
982 983
}

984 985
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
					int fifo, int sta_id, int tid,
986 987
					int frame_limit, u16 ssn,
					unsigned int queue_wdg_timeout)
988
{
989 990 991 992 993
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = fifo,
		.sta_id = sta_id,
		.tid = tid,
		.frame_limit = frame_limit,
994
		.aggregate = sta_id >= 0,
995 996
	};

997
	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
998 999
}

1000 1001 1002
static inline
void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
			     unsigned int queue_wdg_timeout)
1003
{
1004 1005 1006 1007 1008
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = fifo,
		.sta_id = -1,
		.tid = IWL_MAX_TID_COUNT,
		.frame_limit = IWL_FRAME_LIMIT,
1009
		.aggregate = false,
1010 1011
	};

1012
	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1013 1014
}

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
					      unsigned long txqs,
					      bool freeze)
{
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);

	if (trans->ops->freeze_txq_timer)
		trans->ops->freeze_txq_timer(trans, txqs, freeze);
}

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
					    bool block)
{
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);

	if (trans->ops->block_txq_ptrs)
		trans->ops->block_txq_ptrs(trans, block);
}

1036
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
1037
						u32 txqs)
1038
{
1039
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
1040
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1041

1042
	return trans->ops->wait_tx_queue_empty(trans, txqs);
1043 1044
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
	trans->ops->write8(trans, ofs, val);
}

static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
	trans->ops->write32(trans, ofs, val);
}

static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
	return trans->ops->read32(trans, ofs);
}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
{
	return trans->ops->read_prph(trans, ofs);
}

static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
					u32 val)
{
	return trans->ops->write_prph(trans, ofs, val);
}

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
				     void *buf, int dwords)
{
	return trans->ops->read_mem(trans, addr, buf, dwords);
}

#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
	do {								      \
		if (__builtin_constant_p(bufsize))			      \
			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
	} while (0)

static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
	u32 value;

	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
		return 0xa5a5a5a5;

	return value;
}

static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1095
				      const void *buf, int dwords)
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
{
	return trans->ops->write_mem(trans, addr, buf, dwords);
}

static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
					u32 val)
{
	return iwl_trans_write_mem(trans, addr, &val, 1);
}

D
Don Fry 已提交
1106 1107
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
1108 1109
	if (trans->ops->set_pmi)
		trans->ops->set_pmi(trans, state);
D
Don Fry 已提交
1110 1111
}

1112 1113 1114 1115 1116 1117
static inline void
iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
{
	trans->ops->set_bits_mask(trans, reg, mask, value);
}

1118
#define iwl_trans_grab_nic_access(trans, silent, flags)	\
1119
	__cond_lock(nic_access,				\
1120
		    likely((trans)->ops->grab_nic_access(trans, silent, flags)))
1121

1122
static inline void __releases(nic_access)
1123
iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1124
{
1125
	trans->ops->release_nic_access(trans, flags);
1126
	__release(nic_access);
1127 1128
}

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
static inline void iwl_trans_fw_error(struct iwl_trans *trans)
{
	if (WARN_ON_ONCE(!trans->op_mode))
		return;

	/* prevent double restarts due to the same erroneous FW */
	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
		iwl_op_mode_nic_error(trans->op_mode);
}

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
/*****************************************************
 * transport helper functions
 *****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
				  struct device *dev,
				  const struct iwl_cfg *cfg,
				  const struct iwl_trans_ops *ops,
				  size_t dev_cmd_headroom);
void iwl_trans_free(struct iwl_trans *trans);

1149
/*****************************************************
1150
* driver (transport) register/unregister functions
1151
******************************************************/
E
Emmanuel Grumbach 已提交
1152 1153
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
1154

1155
#endif /* __iwl_trans_h__ */