internal.h 13.9 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__

E
Emmanuel Grumbach 已提交
32 33 34
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
35
#include <linux/wait.h>
36
#include <linux/pci.h>
37
#include <linux/timer.h>
E
Emmanuel Grumbach 已提交
38

39
#include "iwl-fh.h"
E
Emmanuel Grumbach 已提交
40 41 42 43
#include "iwl-csr.h"
#include "iwl-trans.h"
#include "iwl-debug.h"
#include "iwl-io.h"
44
#include "iwl-op-mode.h"
E
Emmanuel Grumbach 已提交
45 46

struct iwl_host_cmd;
47

48 49 50
/*This file includes the declaration that are internal to the
 * trans_pcie layer */

51 52 53 54 55 56
struct iwl_rx_mem_buffer {
	dma_addr_t page_dma;
	struct page *page;
	struct list_head list;
};

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/**
 * struct isr_statistics - interrupt statistics
 *
 */
struct isr_statistics {
	u32 hw;
	u32 sw;
	u32 err_code;
	u32 sch;
	u32 alive;
	u32 rfkill;
	u32 ctkill;
	u32 wakeup;
	u32 rx;
	u32 tx;
	u32 unhandled;
};

75
/**
76
 * struct iwl_rxq - Rx queue
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
 * @pool:
 * @queue:
 * @read: Shared index to newest available Rx buffer
 * @write: Shared index to oldest written Rx packet
 * @free_count: Number of pre-allocated buffers in rx_free
 * @write_actual:
 * @rx_free: list of free SKBs for use
 * @rx_used: List of Rx buffers with no SKB
 * @need_update: flag to indicate we need to update read/write index
 * @rb_stts: driver's pointer to receive buffer status
 * @rb_stts_dma: bus address of receive buffer status
 * @lock:
 *
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 */
94
struct iwl_rxq {
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
	__le32 *bd;
	dma_addr_t bd_dma;
	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
	u32 read;
	u32 write;
	u32 free_count;
	u32 write_actual;
	struct list_head rx_free;
	struct list_head rx_used;
	int need_update;
	struct iwl_rb_status *rb_stts;
	dma_addr_t rb_stts_dma;
	spinlock_t lock;
};

E
Emmanuel Grumbach 已提交
111 112 113 114 115 116
struct iwl_dma_ptr {
	dma_addr_t dma;
	void *addr;
	size_t size;
};

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
/**
 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
 * @index -- current index
 * @n_bd -- total number of entries in queue (must be power of 2)
 */
static inline int iwl_queue_inc_wrap(int index, int n_bd)
{
	return ++index & (n_bd - 1);
}

/**
 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
 * @index -- current index
 * @n_bd -- total number of entries in queue (must be power of 2)
 */
static inline int iwl_queue_dec_wrap(int index, int n_bd)
{
	return --index & (n_bd - 1);
}

137 138 139 140 141 142
struct iwl_cmd_meta {
	/* only for SYNC commands, iff the reply skb is wanted */
	struct iwl_host_cmd *source;

	DEFINE_DMA_UNMAP_ADDR(mapping);
	DEFINE_DMA_UNMAP_LEN(len);
143 144

	u32 flags;
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
};

/*
 * Generic queue structure
 *
 * Contains common data for Rx and Tx queues.
 *
 * Note the difference between n_bd and n_window: the hardware
 * always assumes 256 descriptors, so n_bd is always 256 (unless
 * there might be HW changes in the future). For the normal TX
 * queues, n_window, which is the size of the software queue data
 * is also 256; however, for the command queue, n_window is only
 * 32 since we don't need so many commands pending. Since the HW
 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
 * the software buffers (in the variables @meta, @txb in struct
160 161
 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
 * the same struct) have 256.
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
 * This means that we end up with the following:
 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 *  SW entries:           | 0      | ... | 31          |
 * where N is a number between 0 and 7. This means that the SW
 * data is a window overlayed over the HW queue.
 */
struct iwl_queue {
	int n_bd;              /* number of BDs in this queue */
	int write_ptr;       /* 1-st empty entry (index) host_w*/
	int read_ptr;         /* last used entry (index) host_r*/
	/* use for monitoring and recovering the stuck queue */
	dma_addr_t dma_addr;   /* physical addr for BD's */
	int n_window;	       /* safe queue window */
	u32 id;
	int low_mark;	       /* low watermark, resume queue if free
				* space more than this */
	int high_mark;         /* high watermark, stop queue if free
				* space less than this */
};

182 183 184
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32

185
struct iwl_pcie_txq_entry {
186
	struct iwl_device_cmd *cmd;
187
	struct iwl_device_cmd *copy_cmd;
188
	struct sk_buff *skb;
189 190
	/* buffer to free after command completes */
	const void *free_buf;
191 192 193
	struct iwl_cmd_meta meta;
};

194
/**
195
 * struct iwl_txq - Tx Queue for DMA
196
 * @q: generic Rx/Tx queue descriptor
197 198 199 200 201
 * @tfds: transmit frame descriptors (DMA memory)
 * @entries: transmit entries (driver state)
 * @lock: queue lock
 * @stuck_timer: timer that fires if queue gets stuck
 * @trans_pcie: pointer back to transport (for timer)
202
 * @need_update: indicates need to update read/write index
203
 * @active: stores if queue is active
204 205 206 207
 *
 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 * descriptors) and required locking structures.
 */
208
struct iwl_txq {
209 210
	struct iwl_queue q;
	struct iwl_tfd *tfds;
211
	struct iwl_pcie_txq_entry *entries;
212
	spinlock_t lock;
213 214
	struct timer_list stuck_timer;
	struct iwl_trans_pcie *trans_pcie;
215 216 217 218
	u8 need_update;
	u8 active;
};

219 220
/**
 * struct iwl_trans_pcie - PCIe transport specific data
221 222
 * @rxq: all the RX queue data
 * @rx_replenish: work that will be called when buffers need to be allocated
223
 * @drv - pointer to iwl_drv
224
 * @trans: pointer to the generic transport area
J
Johannes Berg 已提交
225
 * @irq - the irq number for the device
226
 * @irq_requested: true when the irq has been requested
227 228
 * @scd_base_addr: scheduler sram base address in SRAM
 * @scd_bc_tbls: pointer to the byte count table of the scheduler
229
 * @kw: keep warm address
230 231
 * @pci_dev: basic pci-network driver stuff
 * @hw_base: pci hardware address support
232 233
 * @ucode_write_complete: indicates that the ucode has been copied.
 * @ucode_write_waitq: wait queue for uCode load
234
 * @status - transport specific status flags
235
 * @cmd_queue - command queue number
236 237
 * @rx_buf_size_8k: 8 kB RX buffer size
 * @rx_page_order: page order for receive buffer size
238
 * @wd_timeout: queue watchdog timeout (jiffies)
239 240
 */
struct iwl_trans_pcie {
241
	struct iwl_rxq rxq;
242 243
	struct work_struct rx_replenish;
	struct iwl_trans *trans;
244
	struct iwl_drv *drv;
245 246 247 248 249 250 251

	/* INT ICT Table */
	__le32 *ict_tbl;
	dma_addr_t ict_tbl_dma;
	int ict_index;
	u32 inta;
	bool use_ict;
252
	bool irq_requested;
253
	struct tasklet_struct irq_tasklet;
254
	struct isr_statistics isr_stats;
255

J
Johannes Berg 已提交
256
	unsigned int irq;
J
Johannes Berg 已提交
257
	spinlock_t irq_lock;
258
	u32 inta_mask;
259 260
	u32 scd_base_addr;
	struct iwl_dma_ptr scd_bc_tbls;
261
	struct iwl_dma_ptr kw;
262

263
	struct iwl_txq *txq;
264
	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
265
	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
266 267 268 269

	/* PCI bus related data */
	struct pci_dev *pci_dev;
	void __iomem *hw_base;
270 271 272

	bool ucode_write_complete;
	wait_queue_head_t ucode_write_waitq;
273 274
	wait_queue_head_t wait_command_queue;

275
	unsigned long status;
276
	u8 cmd_queue;
277
	u8 cmd_fifo;
278 279
	u8 n_no_reclaim_cmds;
	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
280 281 282

	bool rx_buf_size_8k;
	u32 rx_page_order;
283

J
Johannes Berg 已提交
284
	const char **command_names;
285 286 287

	/* queue watchdog */
	unsigned long wd_timeout;
288 289
};

290 291 292 293 294 295 296 297 298 299
/**
 * enum iwl_pcie_status: status of the PCIe transport
 * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
 * @STATUS_DEVICE_ENABLED: APM is enabled
 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
 * @STATUS_INT_ENABLED: interrupts are enabled
 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
 * @STATUS_FW_ERROR: the fw is in error state
 */
enum iwl_pcie_status {
300 301 302 303 304
	STATUS_HCMD_ACTIVE,
	STATUS_DEVICE_ENABLED,
	STATUS_TPOWER_PMI,
	STATUS_INT_ENABLED,
	STATUS_RFKILL,
305
	STATUS_FW_ERROR,
306
};
307

308 309 310
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
	((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))

311 312 313 314 315 316 317
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
	return container_of((void *)trans_pcie, struct iwl_trans,
			    trans_specific);
}

318 319 320 321 322
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
				       const struct pci_device_id *ent,
				       const struct iwl_cfg *cfg);
void iwl_trans_pcie_free(struct iwl_trans *trans);

323 324 325
/*****************************************************
* RX
******************************************************/
326 327 328 329
void iwl_pcie_rx_replenish_work(struct work_struct *data);
void iwl_pcie_rx_replenish(struct iwl_trans *trans);
void iwl_pcie_tasklet(struct iwl_trans *trans);
void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q);
330

331
/*****************************************************
332
* ICT - interrupt handling
333
******************************************************/
334 335 336 337 338
irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
void iwl_pcie_disable_ict(struct iwl_trans *trans);
339

340 341 342
/*****************************************************
* TX / HCMD
******************************************************/
343 344 345 346 347 348 349 350 351 352 353 354 355 356
void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
			  dma_addr_t addr, u16 len, u8 reset);
int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
			    struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
				      struct iwl_txq *txq, u16 byte_cnt);
void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			 int sta_id, int tid, int frame_limit, u16 ssn);
void iwl_pcie_txq_disable(struct iwl_trans *trans, int queue);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
			   enum dma_data_direction dma_dir);
int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index,
357
			 struct sk_buff_head *skbs);
358 359
void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id);
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
360
int iwl_queue_space(const struct iwl_queue *q);
361

362 363 364
/*****************************************************
* Error handling
******************************************************/
365 366
int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_pcie_dump_csr(struct iwl_trans *trans);
367

368 369 370
/*****************************************************
* Helpers
******************************************************/
371 372
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
D
Don Fry 已提交
373 374
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
375 376

	/* disable interrupts from uCode/NIC to host */
377
	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
378 379 380

	/* acknowledge/clear/reset any interrupts still pending
	 * from uCode or flow handler (Rx/Tx DMA) */
381 382
	iwl_write32(trans, CSR_INT, 0xffffffff);
	iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
383 384 385 386 387
	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}

static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
D
Don Fry 已提交
388
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
389 390

	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
D
Don Fry 已提交
391
	set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
392
	iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
393 394
}

395 396 397 398 399 400
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
	iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
}

401
static inline void iwl_wake_queue(struct iwl_trans *trans,
402
				  struct iwl_txq *txq)
403
{
404 405 406 407 408
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
409
	}
410 411 412
}

static inline void iwl_stop_queue(struct iwl_trans *trans,
413
				  struct iwl_txq *txq)
414
{
415
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
416

417 418 419 420 421 422
	if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
		iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
	} else
		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
				    txq->q.id);
423 424 425 426 427 428 429 430 431 432 433 434 435 436
}

static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
	return q->write_ptr >= q->read_ptr ?
		(i >= q->read_ptr && i < q->write_ptr) :
		!(i < q->read_ptr && i >= q->write_ptr);
}

static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
{
	return index & (q->n_window - 1);
}

437 438
static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
					 u8 cmd)
J
Johannes Berg 已提交
439 440 441 442 443 444
{
	if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
		return "UNKNOWN";
	return trans_pcie->command_names[cmd];
}

445 446 447 448 449 450
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
	return !(iwl_read32(trans, CSR_GP_CNTRL) &
		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}

451
#endif /* __iwl_trans_int_pcie_h__ */