iwl-trans.h 35.5 KB
Newer Older
1 2 3 4 5 6 7
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
8
 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
26
 * in the file called COPYING.
27 28 29 30 31 32 33
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
34
 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35
 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
65 66
#ifndef __iwl_trans_h__
#define __iwl_trans_h__
67

68
#include <linux/ieee80211.h>
69
#include <linux/mm.h> /* for page_address */
70
#include <linux/lockdep.h>
E
Emmanuel Grumbach 已提交
71

72
#include "iwl-debug.h"
73 74
#include "iwl-config.h"
#include "iwl-fw.h"
75
#include "iwl-op-mode.h"
76

77 78 79
/**
 * DOC: Transport layer - what is it ?
 *
S
Sara Sharon 已提交
80
 * The transport layer is the layer that deals with the HW directly. It provides
81 82
 * an abstraction of the underlying HW to the upper layer. The transport layer
 * doesn't provide any policy, algorithm or anything of this kind, but only
S
Sara Sharon 已提交
83
 * mechanisms to make the HW do something. It is not completely stateless but
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
 * close to it.
 * We will have an implementation for each different supported bus.
 */

/**
 * DOC: Life cycle of the transport layer
 *
 * The transport layer has a very precise life cycle.
 *
 *	1) A helper function is called during the module initialization and
 *	   registers the bus driver's ops with the transport's alloc function.
 *	2) Bus's probe calls to the transport layer's allocation functions.
 *	   Of course this function is bus specific.
 *	3) This allocation functions will spawn the upper layer which will
 *	   register mac80211.
 *
 *	4) At some point (i.e. mac80211's start call), the op_mode will call
 *	   the following sequence:
 *	   start_hw
 *	   start_fw
 *
 *	5) Then when finished (or reset):
106
 *	   stop_device
107 108 109 110 111 112 113
 *
 *	6) Eventually, the free function will be called.
 */

/**
 * DOC: Host command section
 *
S
Sara Sharon 已提交
114
 * A host command is a command issued by the upper layer to the fw. There are
115 116
 * several versions of fw that have several APIs. The transport layer is
 * completely agnostic to these differences.
S
Sara Sharon 已提交
117
 * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
118
 */
119 120 121 122 123 124
#define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
#define SEQ_TO_INDEX(s)	((s) & 0xff)
#define INDEX_TO_SEQ(i)	((i) & 0xff)
#define SEQ_RX_FRAME	cpu_to_le16(0x8000)

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
/*
 * those functions retrieve specific information from
 * the id field in the iwl_host_cmd struct which contains
 * the command id, the group id and the version of the command
 * and vice versa
*/
static inline u8 iwl_cmd_opcode(u32 cmdid)
{
	return cmdid & 0xFF;
}

static inline u8 iwl_cmd_groupid(u32 cmdid)
{
	return ((cmdid & 0xFF00) >> 8);
}

static inline u8 iwl_cmd_version(u32 cmdid)
{
	return ((cmdid & 0xFF0000) >> 16);
}

static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
{
	return opcode + (groupid << 8) + (version << 16);
}

151 152 153
/* make u16 wide id out of u8 group and opcode */
#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)

154 155 156 157 158
/* due to the conversion, this group is special; new groups
 * should be defined in the appropriate fw-api header files
 */
#define IWL_ALWAYS_LONG_GROUP	1

159 160 161 162 163 164 165 166
/**
 * struct iwl_cmd_header
 *
 * This header format appears in the beginning of each command sent from the
 * driver, and each response/notification received from uCode.
 */
struct iwl_cmd_header {
	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
167
	u8 group_id;
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	/*
	 * The driver sets up the sequence number to values of its choosing.
	 * uCode does not use this value, but passes it back to the driver
	 * when sending the response to each driver-originated command, so
	 * the driver can match the response to the command.  Since the values
	 * don't get used by uCode, the driver may set up an arbitrary format.
	 *
	 * There is one exception:  uCode sets bit 15 when it originates
	 * the response/notification, i.e. when the response/notification
	 * is not a direct response to a command sent by the driver.  For
	 * example, uCode issues REPLY_RX when it sends a received frame
	 * to the driver; it is not a direct response to any driver command.
	 *
	 * The Linux driver uses the following format:
	 *
	 *  0:7		tfd index - position within TX queue
	 *  8:12	TX queue id
	 *  13:14	reserved
	 *  15		unsolicited RX or uCode-originated notification
	 */
	__le16 sequence;
} __packed;

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
/**
 * struct iwl_cmd_header_wide
 *
 * This header format appears in the beginning of each command sent from the
 * driver, and each response/notification received from uCode.
 * this is the wide version that contains more information about the command
 * like length, version and command type
 */
struct iwl_cmd_header_wide {
	u8 cmd;
	u8 group_id;
	__le16 sequence;
	__le16 length;
	u8 reserved;
	u8 version;
} __packed;

208
#define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
209 210
#define FH_RSCSR_FRAME_INVALID		0x55550000
#define FH_RSCSR_FRAME_ALIGN		0x40
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

struct iwl_rx_packet {
	/*
	 * The first 4 bytes of the RX frame header contain both the RX frame
	 * size and some flags.
	 * Bit fields:
	 * 31:    flag flush RB request
	 * 30:    flag ignore TC (terminal counter) request
	 * 29:    flag fast IRQ request
	 * 28-14: Reserved
	 * 13-00: RX frame size
	 */
	__le32 len_n_flags;
	struct iwl_cmd_header hdr;
	u8 data[];
} __packed;
227

228 229 230 231 232 233 234 235 236 237
static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
{
	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
}

static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
{
	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
}

238 239 240
/**
 * enum CMD_MODE - how to send the host commands ?
 *
241
 * @CMD_ASYNC: Return right away and don't wait for the response
E
Emmanuel Grumbach 已提交
242 243
 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
 *	the response. The caller needs to call iwl_free_resp when done.
244
 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
S
Sara Sharon 已提交
245
 *	command queue, but after other high priority commands. Valid only
246 247 248 249 250
 *	with CMD_ASYNC.
 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
 *	(i.e. mark it as non-idle).
J
Johannes Berg 已提交
251 252
 * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
 *	check that we leave enough room for the TBs bitmap which needs 20 bits.
253 254
 */
enum CMD_MODE {
J
Johannes Berg 已提交
255 256
	CMD_ASYNC		= BIT(0),
	CMD_WANT_SKB		= BIT(1),
257
	CMD_SEND_IN_RFKILL	= BIT(2),
258 259 260 261
	CMD_HIGH_PRIO		= BIT(3),
	CMD_SEND_IN_IDLE	= BIT(4),
	CMD_MAKE_TRANS_IDLE	= BIT(5),
	CMD_WAKE_UP_TRANS	= BIT(6),
J
Johannes Berg 已提交
262 263

	CMD_TB_BITMAP_POS	= 11,
264 265 266 267 268 269 270 271 272 273 274 275
};

#define DEF_CMD_PAYLOAD_SIZE 320

/**
 * struct iwl_device_cmd
 *
 * For allocation of the command and tx queues, this establishes the overall
 * size of the largest command we send to uCode, except for commands that
 * aren't fully copied and use other TFD space.
 */
struct iwl_device_cmd {
276 277 278 279 280 281 282 283 284 285 286 287
	union {
		struct {
			struct iwl_cmd_header hdr;	/* uCode API */
			u8 payload[DEF_CMD_PAYLOAD_SIZE];
		};
		struct {
			struct iwl_cmd_header_wide hdr_wide;
			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
					sizeof(struct iwl_cmd_header_wide) +
					sizeof(struct iwl_cmd_header)];
		};
	};
288 289 290 291
} __packed;

#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))

292 293 294 295 296
/*
 * number of transfer buffers (fragments) per transmit frame descriptor;
 * this is just the driver's idea, the hardware supports 20
 */
#define IWL_MAX_CMD_TBS_PER_TFD	2
297

298 299 300
/**
 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
 *
301
 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
302
 *	ring. The transport layer doesn't map the command's buffer to DMA, but
303
 *	rather copies it to a previously allocated DMA buffer. This flag tells
304
 *	the transport layer not to copy the command, but to map the existing
305 306 307
 *	buffer (that is passed in) instead. This saves the memcpy and allows
 *	commands that are bigger than the fixed buffer to be submitted.
 *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
308 309 310
 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
 *	chunk internally and free it again after the command completes. This
 *	can (currently) be used only once per command.
311
 *	Note that a TFD entry after a DUP one cannot be a normal copied one.
312
 */
313 314
enum iwl_hcmd_dataflag {
	IWL_HCMD_DFL_NOCOPY	= BIT(0),
315
	IWL_HCMD_DFL_DUP	= BIT(1),
316 317 318 319
};

/**
 * struct iwl_host_cmd - Host command to the uCode
320
 *
321
 * @data: array of chunks that composes the data of the host command
322 323 324
 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
 * @_rx_page_order: (internally used to free response packet)
 * @_rx_page_addr: (internally used to free response packet)
325
 * @flags: can be CMD_*
326
 * @len: array of the lengths of the chunks in data
327
 * @dataflags: IWL_HCMD_DFL_*
328 329
 * @id: command id of the host command, for wide commands encoding the
 *	version and group as well
330 331
 */
struct iwl_host_cmd {
332
	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
333 334 335
	struct iwl_rx_packet *resp_pkt;
	unsigned long _rx_page_addr;
	u32 _rx_page_order;
336

337
	u32 flags;
338
	u32 id;
339 340
	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
341
};
342

343 344 345 346 347
static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
{
	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
}

348 349
struct iwl_rx_cmd_buffer {
	struct page *_page;
350 351
	int _offset;
	bool _page_stolen;
352
	u32 _rx_page_order;
353
	unsigned int truesize;
354 355 356 357
};

static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
{
358 359 360 361 362 363
	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
}

static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
{
	return r->_offset;
364 365 366 367
}

static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
{
368 369 370
	r->_page_stolen = true;
	get_page(r->_page);
	return r->_page;
371 372
}

373 374 375 376 377
static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
{
	__free_pages(r->_page, r->_rx_page_order);
}

378 379
#define MAX_NO_RECLAIM_CMDS	6

380 381
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))

382 383 384 385 386
/*
 * Maximum number of HW queues the transport layer
 * currently supports
 */
#define IWL_MAX_HW_QUEUES		32
387 388
#define IWL_MAX_TID_COUNT	8
#define IWL_FRAME_LIMIT	64
389
#define IWL_MAX_RX_HW_QUEUES	16
390

391 392 393 394 395 396 397 398 399 400
/**
 * enum iwl_wowlan_status - WoWLAN image/device status
 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
 * @IWL_D3_STATUS_RESET: device was reset while suspended
 */
enum iwl_d3_status {
	IWL_D3_STATUS_ALIVE,
	IWL_D3_STATUS_RESET,
};

401 402 403 404 405 406 407 408
/**
 * enum iwl_trans_status: transport status flags
 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
 * @STATUS_DEVICE_ENABLED: APM is enabled
 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
 * @STATUS_INT_ENABLED: interrupts are enabled
 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
 * @STATUS_FW_ERROR: the fw is in error state
409 410 411
 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
 *	are sent
 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
412
 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
413 414 415 416 417 418 419 420
 */
enum iwl_trans_status {
	STATUS_SYNC_HCMD_ACTIVE,
	STATUS_DEVICE_ENABLED,
	STATUS_TPOWER_PMI,
	STATUS_INT_ENABLED,
	STATUS_RFKILL,
	STATUS_FW_ERROR,
421 422
	STATUS_TRANS_GOING_IDLE,
	STATUS_TRANS_IDLE,
423
	STATUS_TRANS_DEAD,
424 425
};

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
	switch (rb_size) {
	case IWL_AMSDU_4K:
		return get_order(4 * 1024);
	case IWL_AMSDU_8K:
		return get_order(8 * 1024);
	case IWL_AMSDU_12K:
		return get_order(12 * 1024);
	default:
		WARN_ON(1);
		return -1;
	}
}

442 443 444 445
/**
 * struct iwl_trans_config - transport configuration
 *
 * @op_mode: pointer to the upper layer.
446 447
 * @cmd_queue: the index of the command queue.
 *	Must be set before start_fw.
448
 * @cmd_fifo: the fifo for host commands
449
 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
450 451 452 453 454
 * @no_reclaim_cmds: Some devices erroneously don't set the
 *	SEQ_RX_FRAME bit on some notifications, this is the
 *	list of such notifications to filter. Max length is
 *	%MAX_NO_RECLAIM_CMDS.
 * @n_no_reclaim_cmds: # of commands in list
455
 * @rx_buf_size: RX buffer size needed for A-MSDUs
456
 *	if unset 4k will be the RX buffer size
457 458
 * @bc_table_dword: set to true if the BC table expects the byte count to be
 *	in DWORD (as opposed to bytes)
459
 * @scd_set_active: should the transport configure the SCD for HCMD queue
460
 * @wide_cmd_header: firmware supports wide host command header
J
Johannes Berg 已提交
461 462
 * @command_names: array of command names, must be 256 entries
 *	(one for each command); for debugging only
463 464
 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
 *	we get the ALIVE from the uCode
465 466 467
 */
struct iwl_trans_config {
	struct iwl_op_mode *op_mode;
468

469
	u8 cmd_queue;
470
	u8 cmd_fifo;
471
	unsigned int cmd_q_wdg_timeout;
472
	const u8 *no_reclaim_cmds;
473
	unsigned int n_no_reclaim_cmds;
474

475
	enum iwl_amsdu_size rx_buf_size;
476
	bool bc_table_dword;
477
	bool scd_set_active;
478
	bool wide_cmd_header;
479
	const char *const *command_names;
480 481

	u32 sdio_adma_addr;
482 483
};

484 485 486 487 488
struct iwl_trans_dump_data {
	u32 len;
	u8 data[];
};

489 490
struct iwl_trans;

491 492 493 494
struct iwl_trans_txq_scd_cfg {
	u8 fifo;
	s8 sta_id;
	u8 tid;
495
	bool aggregate;
496 497 498
	int frame_limit;
};

499 500
/**
 * struct iwl_trans_ops - transport specific operations
501 502 503
 *
 * All the handlers MUST be implemented
 *
504 505 506
 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
 *	out of a low power state. From that point on, the HW can send
 *	interrupts. May sleep.
507
 * @op_mode_leave: Turn off the HW RF kill indication if on
508
 *	May sleep
509
 * @start_fw: allocates and inits all the resources for the transport
510 511
 *	layer. Also kick a fw image.
 *	May sleep
512 513
 * @fw_alive: called when the fw sends alive notification. If the fw provides
 *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
514
 *	May sleep
515
 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
516 517 518 519 520
 *	the HW. If low_power is true, the NIC will be put in low power state.
 *	From that point on, the HW will be stopped but will still issue an
 *	interrupt if the HW RF kill switch is triggered.
 *	This callback must do the right thing and not crash even if %start_hw()
 *	was called but not &start_fw(). May sleep.
521
 * @d3_suspend: put the device into the correct mode for WoWLAN during
522 523
 *	suspend. This is optional, if not implemented WoWLAN will not be
 *	supported. This callback may sleep.
524 525 526
 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
 *	talk to the WoWLAN image to get its status. This is optional, if not
 *	implemented WoWLAN will not be supported. This callback may sleep.
527 528 529
 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
 *	If RFkill is asserted in the middle of a SYNC host command, it must
 *	return -ERFKILL straight away.
E
Emmanuel Grumbach 已提交
530
 *	May sleep only if CMD_ASYNC is not set
531
 * @tx: send an skb
532
 *	Must be atomic
533
 * @reclaim: free packet until ssn. Returns a list of freed packets.
534
 *	Must be atomic
535 536
 * @txq_enable: setup a queue. To setup an AC queue, use the
 *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
537 538 539
 *	this one. The op_mode must not configure the HCMD queue. The scheduler
 *	configuration may be %NULL, in which case the hardware will not be
 *	configured. May sleep.
540
 * @txq_disable: de-configure a Tx queue to send AMPDUs
541
 *	Must be atomic
542
 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
543 544
 * @freeze_txq_timer: prevents the timer of the queue from firing until the
 *	queue is set to awake. Must be atomic.
545 546 547 548 549
 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
 *	that the transport needs to refcount the calls since this function
 *	will be called several times with block = true, and then the queues
 *	need to be unblocked only after the same number of calls with
 *	block = false.
550 551 552
 * @write8: write a u8 to a register at offset ofs from the BAR
 * @write32: write a u32 to a register at offset ofs from the BAR
 * @read32: read a u32 register at offset ofs from the BAR
553 554
 * @read_prph: read a DWORD from a periphery register
 * @write_prph: write a DWORD to a periphery register
555
 * @read_mem: read device's SRAM in DWORD
E
Emmanuel Grumbach 已提交
556 557
 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
 *	will be zeroed.
558
 * @configure: configure parameters required by the transport layer from
E
Emmanuel Grumbach 已提交
559 560
 *	the op_mode. May be called several times before start_fw, can't be
 *	called after that.
D
Don Fry 已提交
561
 * @set_pmi: set the power pmi state
562 563 564 565 566
 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
 *	Sleeping is not allowed between grab_nic_access and
 *	release_nic_access.
 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
 *	must be the same one that was sent before to the grab_nic_access.
567
 * @set_bits_mask - set SRAM register according to value and mask.
568 569 570 571 572
 * @ref: grab a reference to the transport/FW layers, disallowing
 *	certain low power states
 * @unref: release a reference previously taken with @ref. Note that
 *	initially the reference count is 1, making an initial @unref
 *	necessary to allow low power states.
573 574
 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
 *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
575
 *	Note that the transport must fill in the proper file headers.
576 577 578
 */
struct iwl_trans_ops {

579
	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
580
	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
581 582
	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
			bool run_in_rfkill);
583 584
	int (*update_sf)(struct iwl_trans *trans,
			 struct iwl_sf_region *st_fwrd_space);
585
	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
586
	void (*stop_device)(struct iwl_trans *trans, bool low_power);
587

588 589 590
	void (*d3_suspend)(struct iwl_trans *trans, bool test);
	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
			 bool test);
591

592
	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
593

594
	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
595 596 597 598
		  struct iwl_device_cmd *dev_cmd, int queue);
	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
			struct sk_buff_head *skbs);

599
	void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
600 601
			   const struct iwl_trans_txq_scd_cfg *cfg,
			   unsigned int queue_wdg_timeout);
602 603
	void (*txq_disable)(struct iwl_trans *trans, int queue,
			    bool configure_scd);
604

605
	int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
606 607
	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
				 bool freeze);
608
	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
609

610 611 612
	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
613 614
	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
615 616 617
	int (*read_mem)(struct iwl_trans *trans, u32 addr,
			void *buf, int dwords);
	int (*write_mem)(struct iwl_trans *trans, u32 addr,
618
			 const void *buf, int dwords);
619 620
	void (*configure)(struct iwl_trans *trans,
			  const struct iwl_trans_config *trans_cfg);
D
Don Fry 已提交
621
	void (*set_pmi)(struct iwl_trans *trans, bool state);
622 623 624 625
	bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
				unsigned long *flags);
	void (*release_nic_access)(struct iwl_trans *trans,
				   unsigned long *flags);
626 627
	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
			      u32 value);
628 629
	void (*ref)(struct iwl_trans *trans);
	void (*unref)(struct iwl_trans *trans);
630
	int  (*suspend)(struct iwl_trans *trans);
631
	void (*resume)(struct iwl_trans *trans);
632

633 634 635
	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
						 struct iwl_fw_dbg_trigger_tlv
						 *trigger);
636 637
};

638 639 640 641 642 643 644 645 646 647 648
/**
 * enum iwl_trans_state - state of the transport layer
 *
 * @IWL_TRANS_NO_FW: no fw has sent an alive response
 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
 */
enum iwl_trans_state {
	IWL_TRANS_NO_FW = 0,
	IWL_TRANS_FW_ALIVE	= 1,
};

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
/**
 * enum iwl_d0i3_mode - d0i3 mode
 *
 * @IWL_D0I3_MODE_OFF - d0i3 is disabled
 * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle
 *	(e.g. no active references)
 * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend
 *	(in case of 'any' trigger)
 */
enum iwl_d0i3_mode {
	IWL_D0I3_MODE_OFF = 0,
	IWL_D0I3_MODE_ON_IDLE,
	IWL_D0I3_MODE_ON_SUSPEND,
};

664 665
/**
 * struct iwl_trans - transport common data
666
 *
667
 * @ops - pointer to iwl_trans_ops
668
 * @op_mode - pointer to the op_mode
669
 * @cfg - pointer to the configuration
670
 * @status: a bit-mask of transport status flags
671
 * @dev - pointer to struct device * that represents the device
J
Johannes Berg 已提交
672 673
 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
 *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
S
Sara Sharon 已提交
674
 * @hw_id: a u32 with the ID of the device / sub-device.
675
 *	Set during transport allocation.
676
 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
677
 * @pm_support: set to true in start_hw if link pm is supported
E
Emmanuel Grumbach 已提交
678
 * @ltr_enabled: set to true if the LTR is enabled
679 680
 * @num_rx_queues: number of RX queues allocated by the transport;
 *	the transport must set this before calling iwl_drv_start()
681 682 683 684 685
 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
 * @dev_cmd_headroom: room needed for the transport's private use before the
 *	device_cmd for Tx - for internal use only
 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
686 687 688 689
 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
 *	starting the firmware, used for tracing
 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
 *	start of the 802.11 header in the @rx_mpdu_cmd
690
 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
691 692
 * @dbg_dest_tlv: points to the destination TLV for debug
 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
693
 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
694
 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
695 696 697 698 699 700
 * @paging_req_addr: The location were the FW will upload / download the pages
 *	from. The address is set by the opmode
 * @paging_db: Pointer to the opmode paging data base, the pointer is set by
 *	the opmode.
 * @paging_download_buf: Buffer used for copying all of the pages before
 *	downloading them to the FW. The buffer is allocated in the opmode
701
 */
702 703
struct iwl_trans {
	const struct iwl_trans_ops *ops;
704
	struct iwl_op_mode *op_mode;
705
	const struct iwl_cfg *cfg;
706
	enum iwl_trans_state state;
707
	unsigned long status;
708

709
	struct device *dev;
J
Johannes Berg 已提交
710
	u32 max_skb_frags;
711
	u32 hw_rev;
E
Emmanuel Grumbach 已提交
712
	u32 hw_id;
713
	char hw_id_str[52];
714

715 716
	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;

717
	bool pm_support;
E
Emmanuel Grumbach 已提交
718
	bool ltr_enabled;
719

720 721
	u8 num_rx_queues;

722 723 724
	/* The following fields are internal only */
	struct kmem_cache *dev_cmd_pool;
	size_t dev_cmd_headroom;
725
	char dev_cmd_pool_name[50];
726

727 728
	struct dentry *dbgfs_dir;

729 730 731 732
#ifdef CONFIG_LOCKDEP
	struct lockdep_map sync_cmd_lockdep_map;
#endif

733 734
	u64 dflt_pwr_limit;

735
	const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
736 737
	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
738 739
	u8 dbg_dest_reg_num;

740 741 742 743 744 745 746 747
	/*
	 * Paging parameters - All of the parameters should be set by the
	 * opmode when paging is enabled
	 */
	u32 paging_req_addr;
	struct iwl_fw_paging *paging_db;
	void *paging_download_buf;

748 749
	enum iwl_d0i3_mode d0i3_mode;

750 751
	bool wowlan_d0i3;

752 753
	/* pointer to trans specific struct */
	/*Ensure that this pointer will always be aligned to sizeof pointer */
754
	char trans_specific[0] __aligned(sizeof(void *));
755 756
};

757
static inline void iwl_trans_configure(struct iwl_trans *trans,
758
				       const struct iwl_trans_config *trans_cfg)
759
{
760
	trans->op_mode = trans_cfg->op_mode;
761 762

	trans->ops->configure(trans, trans_cfg);
763 764
}

765
static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
766
{
767 768
	might_sleep();

769 770 771 772 773 774
	return trans->ops->start_hw(trans, low_power);
}

static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
	return trans->ops->start_hw(trans, true);
775 776
}

777
static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
778
{
779 780
	might_sleep();

781 782
	if (trans->ops->op_mode_leave)
		trans->ops->op_mode_leave(trans);
783

784
	trans->op_mode = NULL;
785

786
	trans->state = IWL_TRANS_NO_FW;
787 788
}

789
static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
790
{
791 792
	might_sleep();

793
	trans->state = IWL_TRANS_FW_ALIVE;
794

795
	trans->ops->fw_alive(trans, scd_addr);
796 797
}

798
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
799 800
				     const struct fw_img *fw,
				     bool run_in_rfkill)
801
{
802 803
	might_sleep();

804 805
	WARN_ON_ONCE(!trans->rx_mpdu_cmd);

806
	clear_bit(STATUS_FW_ERROR, &trans->status);
807
	return trans->ops->start_fw(trans, fw, run_in_rfkill);
808 809
}

810 811 812 813 814 815 816 817 818 819 820
static inline int iwl_trans_update_sf(struct iwl_trans *trans,
				      struct iwl_sf_region *st_fwrd_space)
{
	might_sleep();

	if (trans->ops->update_sf)
		return trans->ops->update_sf(trans, st_fwrd_space);

	return 0;
}

821 822
static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
					  bool low_power)
823
{
824 825
	might_sleep();

826
	trans->ops->stop_device(trans, low_power);
827 828

	trans->state = IWL_TRANS_NO_FW;
829 830
}

831 832 833 834 835
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
	_iwl_trans_stop_device(trans, true);
}

836
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
837 838
{
	might_sleep();
839 840
	if (trans->ops->d3_suspend)
		trans->ops->d3_suspend(trans, test);
841 842 843
}

static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
844 845
				      enum iwl_d3_status *status,
				      bool test)
846 847
{
	might_sleep();
848 849 850
	if (!trans->ops->d3_resume)
		return 0;

851
	return trans->ops->d3_resume(trans, status, test);
852 853
}

854 855 856 857 858 859 860 861 862 863 864 865
static inline void iwl_trans_ref(struct iwl_trans *trans)
{
	if (trans->ops->ref)
		trans->ops->ref(trans);
}

static inline void iwl_trans_unref(struct iwl_trans *trans)
{
	if (trans->ops->unref)
		trans->ops->unref(trans);
}

866
static inline int iwl_trans_suspend(struct iwl_trans *trans)
867
{
868 869 870 871
	if (!trans->ops->suspend)
		return 0;

	return trans->ops->suspend(trans);
872 873 874 875 876 877 878 879
}

static inline void iwl_trans_resume(struct iwl_trans *trans)
{
	if (trans->ops->resume)
		trans->ops->resume(trans);
}

880
static inline struct iwl_trans_dump_data *
881 882
iwl_trans_dump_data(struct iwl_trans *trans,
		    struct iwl_fw_dbg_trigger_tlv *trigger)
883 884
{
	if (!trans->ops->dump_data)
885
		return NULL;
886
	return trans->ops->dump_data(trans, trigger);
887 888
}

889
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
890
				     struct iwl_host_cmd *cmd)
891
{
892 893
	int ret;

894 895 896 897
	if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
		     test_bit(STATUS_RFKILL, &trans->status)))
		return -ERFKILL;

898 899 900
	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
		return -EIO;

901
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
902
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
903 904
		return -EIO;
	}
905

906 907 908 909 910 911 912 913 914
	if (!(cmd->flags & CMD_ASYNC))
		lock_map_acquire_read(&trans->sync_cmd_lockdep_map);

	ret = trans->ops->send_cmd(trans, cmd);

	if (!(cmd->flags & CMD_ASYNC))
		lock_map_release(&trans->sync_cmd_lockdep_map);

	return ret;
915 916
}

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
	u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);

	if (unlikely(dev_cmd_ptr == NULL))
		return NULL;

	return (struct iwl_device_cmd *)
			(dev_cmd_ptr + trans->dev_cmd_headroom);
}

static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
					 struct iwl_device_cmd *dev_cmd)
{
	u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;

	kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
}

937
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
938
			       struct iwl_device_cmd *dev_cmd, int queue)
939
{
940 941 942
	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
		return -EIO;

943
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
944
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
945

946
	return trans->ops->tx(trans, skb, dev_cmd, queue);
947 948
}

949 950
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
				     int ssn, struct sk_buff_head *skbs)
951
{
952
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
953
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
954

955
	trans->ops->reclaim(trans, queue, ssn, skbs);
956 957
}

958 959
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
					 bool configure_scd)
960
{
961 962 963 964 965
	trans->ops->txq_disable(trans, queue, configure_scd);
}

static inline void
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
966 967
			 const struct iwl_trans_txq_scd_cfg *cfg,
			 unsigned int queue_wdg_timeout)
968 969 970 971 972 973
{
	might_sleep();

	if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);

974
	trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
975 976
}

977 978
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
					int fifo, int sta_id, int tid,
979 980
					int frame_limit, u16 ssn,
					unsigned int queue_wdg_timeout)
981
{
982 983 984 985 986
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = fifo,
		.sta_id = sta_id,
		.tid = tid,
		.frame_limit = frame_limit,
987
		.aggregate = sta_id >= 0,
988 989
	};

990
	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
991 992
}

993 994 995
static inline
void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
			     unsigned int queue_wdg_timeout)
996
{
997 998 999 1000 1001
	struct iwl_trans_txq_scd_cfg cfg = {
		.fifo = fifo,
		.sta_id = -1,
		.tid = IWL_MAX_TID_COUNT,
		.frame_limit = IWL_FRAME_LIMIT,
1002
		.aggregate = false,
1003 1004
	};

1005
	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1006 1007
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
					      unsigned long txqs,
					      bool freeze)
{
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);

	if (trans->ops->freeze_txq_timer)
		trans->ops->freeze_txq_timer(trans, txqs, freeze);
}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
					    bool block)
{
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);

	if (trans->ops->block_txq_ptrs)
		trans->ops->block_txq_ptrs(trans, block);
}

1029
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
1030
						u32 txqs)
1031
{
1032
	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
1033
		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1034

1035
	return trans->ops->wait_tx_queue_empty(trans, txqs);
1036 1037
}

1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
	trans->ops->write8(trans, ofs, val);
}

static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
	trans->ops->write32(trans, ofs, val);
}

static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
	return trans->ops->read32(trans, ofs);
}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
{
	return trans->ops->read_prph(trans, ofs);
}

static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
					u32 val)
{
	return trans->ops->write_prph(trans, ofs, val);
}

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
				     void *buf, int dwords)
{
	return trans->ops->read_mem(trans, addr, buf, dwords);
}

#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
	do {								      \
		if (__builtin_constant_p(bufsize))			      \
			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
	} while (0)

static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
	u32 value;

	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
		return 0xa5a5a5a5;

	return value;
}

static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1088
				      const void *buf, int dwords)
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
{
	return trans->ops->write_mem(trans, addr, buf, dwords);
}

static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
					u32 val)
{
	return iwl_trans_write_mem(trans, addr, &val, 1);
}

D
Don Fry 已提交
1099 1100
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
1101 1102
	if (trans->ops->set_pmi)
		trans->ops->set_pmi(trans, state);
D
Don Fry 已提交
1103 1104
}

1105 1106 1107 1108 1109 1110
static inline void
iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
{
	trans->ops->set_bits_mask(trans, reg, mask, value);
}

1111
#define iwl_trans_grab_nic_access(trans, silent, flags)	\
1112
	__cond_lock(nic_access,				\
1113
		    likely((trans)->ops->grab_nic_access(trans, silent, flags)))
1114

1115
static inline void __releases(nic_access)
1116
iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1117
{
1118
	trans->ops->release_nic_access(trans, flags);
1119
	__release(nic_access);
1120 1121
}

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
static inline void iwl_trans_fw_error(struct iwl_trans *trans)
{
	if (WARN_ON_ONCE(!trans->op_mode))
		return;

	/* prevent double restarts due to the same erroneous FW */
	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
		iwl_op_mode_nic_error(trans->op_mode);
}

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
/*****************************************************
 * transport helper functions
 *****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
				  struct device *dev,
				  const struct iwl_cfg *cfg,
				  const struct iwl_trans_ops *ops,
				  size_t dev_cmd_headroom);
void iwl_trans_free(struct iwl_trans *trans);

1142
/*****************************************************
1143
* driver (transport) register/unregister functions
1144
******************************************************/
E
Emmanuel Grumbach 已提交
1145 1146
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
1147

1148
#endif /* __iwl_trans_h__ */