提交 8d86422a 编写于 作者: W Winkler, Tomas 提交者: John W. Linville

iwlwifi: move rx queue read pointer into rxq

This patch moves rx status/read registers into
iwl_rx_queue structures. This solution is more memory
hungry but is more structured and provides needed RX/TX
separation
Signed-off-by: NTomas Winkler <tomas.winkler@intel.com>
Signed-off-by: NReinette Chatre <reinette.chatre@intel.com>
Signed-off-by: NJohn W. Linville <linville@tuxdriver.com>
上级 5c5aa3f1
...@@ -927,33 +927,6 @@ struct iwl4965_schedq_bc_tbl { ...@@ -927,33 +927,6 @@ struct iwl4965_schedq_bc_tbl {
*/ */
struct iwl4965_shared { struct iwl4965_shared {
struct iwl4965_schedq_bc_tbl queues_bc_tbls[IWL49_NUM_QUEUES]; struct iwl4965_schedq_bc_tbl queues_bc_tbls[IWL49_NUM_QUEUES];
__le32 rb_closed;
/* __le32 rb_closed_stts_rb_num:12; */
#define IWL_rb_closed_stts_rb_num_POS 0
#define IWL_rb_closed_stts_rb_num_LEN 12
#define IWL_rb_closed_stts_rb_num_SYM rb_closed
/* __le32 rsrv1:4; */
/* __le32 rb_closed_stts_rx_frame_num:12; */
#define IWL_rb_closed_stts_rx_frame_num_POS 16
#define IWL_rb_closed_stts_rx_frame_num_LEN 12
#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
/* __le32 rsrv2:4; */
__le32 frm_finished;
/* __le32 frame_finished_stts_rb_num:12; */
#define IWL_frame_finished_stts_rb_num_POS 0
#define IWL_frame_finished_stts_rb_num_LEN 12
#define IWL_frame_finished_stts_rb_num_SYM frm_finished
/* __le32 rsrv3:4; */
/* __le32 frame_finished_stts_rx_frame_num:12; */
#define IWL_frame_finished_stts_rx_frame_num_POS 16
#define IWL_frame_finished_stts_rx_frame_num_LEN 12
#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
/* __le32 rsrv4:4; */
__le32 padding1; /* so that allocation will be aligned to 16B */
__le32 padding2;
} __attribute__ ((packed)); } __attribute__ ((packed));
#endif /* __iwl4965_4965_hw_h__ */ #endif /* __iwl4965_4965_hw_h__ */
...@@ -1631,12 +1631,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) ...@@ -1631,12 +1631,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
} }
#endif #endif
static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
{
struct iwl4965_shared *s = priv->shared_virt;
return le32_to_cpu(s->rb_closed) & 0xFFF;
}
static int iwl4965_alloc_shared_mem(struct iwl_priv *priv) static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
{ {
priv->shared_virt = pci_alloc_consistent(priv->pci_dev, priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
...@@ -1647,8 +1641,6 @@ static int iwl4965_alloc_shared_mem(struct iwl_priv *priv) ...@@ -1647,8 +1641,6 @@ static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared)); memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
return 0; return 0;
} }
...@@ -2306,7 +2298,6 @@ static struct iwl_lib_ops iwl4965_lib = { ...@@ -2306,7 +2298,6 @@ static struct iwl_lib_ops iwl4965_lib = {
.set_hw_params = iwl4965_hw_set_hw_params, .set_hw_params = iwl4965_hw_set_hw_params,
.alloc_shared_mem = iwl4965_alloc_shared_mem, .alloc_shared_mem = iwl4965_alloc_shared_mem,
.free_shared_mem = iwl4965_free_shared_mem, .free_shared_mem = iwl4965_free_shared_mem,
.shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
.txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
.txq_set_sched = iwl4965_txq_set_sched, .txq_set_sched = iwl4965_txq_set_sched,
.txq_agg_enable = iwl4965_txq_agg_enable, .txq_agg_enable = iwl4965_txq_agg_enable,
......
...@@ -96,38 +96,9 @@ struct iwl5000_schedq_bc_tbl { ...@@ -96,38 +96,9 @@ struct iwl5000_schedq_bc_tbl {
/** /**
* struct iwl5000_shared * struct iwl5000_shared
* @rb_closed
* address is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG
*/ */
struct iwl5000_shared { struct iwl5000_shared {
struct iwl5000_schedq_bc_tbl queues_bc_tbls[IWL50_NUM_QUEUES]; struct iwl5000_schedq_bc_tbl queues_bc_tbls[IWL50_NUM_QUEUES];
__le32 rb_closed;
/* __le32 rb_closed_stts_rb_num:12; */
#define IWL_rb_closed_stts_rb_num_POS 0
#define IWL_rb_closed_stts_rb_num_LEN 12
#define IWL_rb_closed_stts_rb_num_SYM rb_closed
/* __le32 rsrv1:4; */
/* __le32 rb_closed_stts_rx_frame_num:12; */
#define IWL_rb_closed_stts_rx_frame_num_POS 16
#define IWL_rb_closed_stts_rx_frame_num_LEN 12
#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
/* __le32 rsrv2:4; */
__le32 frm_finished;
/* __le32 frame_finished_stts_rb_num:12; */
#define IWL_frame_finished_stts_rb_num_POS 0
#define IWL_frame_finished_stts_rb_num_LEN 12
#define IWL_frame_finished_stts_rb_num_SYM frm_finished
/* __le32 rsrv3:4; */
/* __le32 frame_finished_stts_rx_frame_num:12; */
#define IWL_frame_finished_stts_rx_frame_num_POS 16
#define IWL_frame_finished_stts_rx_frame_num_LEN 12
#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
/* __le32 rsrv4:4; */
__le32 padding1; /* so that allocation will be aligned to 16B */
__le32 padding2;
} __attribute__ ((packed)); } __attribute__ ((packed));
#endif /* __iwl_5000_hw_h__ */ #endif /* __iwl_5000_hw_h__ */
......
...@@ -863,8 +863,6 @@ static int iwl5000_alloc_shared_mem(struct iwl_priv *priv) ...@@ -863,8 +863,6 @@ static int iwl5000_alloc_shared_mem(struct iwl_priv *priv)
memset(priv->shared_virt, 0, sizeof(struct iwl5000_shared)); memset(priv->shared_virt, 0, sizeof(struct iwl5000_shared));
priv->rb_closed_offset = offsetof(struct iwl5000_shared, rb_closed);
return 0; return 0;
} }
...@@ -877,12 +875,6 @@ static void iwl5000_free_shared_mem(struct iwl_priv *priv) ...@@ -877,12 +875,6 @@ static void iwl5000_free_shared_mem(struct iwl_priv *priv)
priv->shared_phys); priv->shared_phys);
} }
static int iwl5000_shared_mem_rx_idx(struct iwl_priv *priv)
{
struct iwl5000_shared *s = priv->shared_virt;
return le32_to_cpu(s->rb_closed) & 0xFFF;
}
/** /**
* iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/ */
...@@ -1460,7 +1452,6 @@ static struct iwl_lib_ops iwl5000_lib = { ...@@ -1460,7 +1452,6 @@ static struct iwl_lib_ops iwl5000_lib = {
.set_hw_params = iwl5000_hw_set_hw_params, .set_hw_params = iwl5000_hw_set_hw_params,
.alloc_shared_mem = iwl5000_alloc_shared_mem, .alloc_shared_mem = iwl5000_alloc_shared_mem,
.free_shared_mem = iwl5000_free_shared_mem, .free_shared_mem = iwl5000_free_shared_mem,
.shared_mem_rx_idx = iwl5000_shared_mem_rx_idx,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched, .txq_set_sched = iwl5000_txq_set_sched,
......
...@@ -1359,7 +1359,7 @@ void iwl_rx_handle(struct iwl_priv *priv) ...@@ -1359,7 +1359,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
/* uCode's read index (stored in shared DRAM) indicates the last Rx /* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */ * buffer that the driver may process (last buffer filled by ucode). */
r = priv->cfg->ops->lib->shared_mem_rx_idx(priv); r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
i = rxq->read; i = rxq->read;
/* Rx interrupt, but nothing sent from uCode */ /* Rx interrupt, but nothing sent from uCode */
......
...@@ -105,7 +105,6 @@ struct iwl_lib_ops { ...@@ -105,7 +105,6 @@ struct iwl_lib_ops {
/* ucode shared memory */ /* ucode shared memory */
int (*alloc_shared_mem)(struct iwl_priv *priv); int (*alloc_shared_mem)(struct iwl_priv *priv);
void (*free_shared_mem)(struct iwl_priv *priv); void (*free_shared_mem)(struct iwl_priv *priv);
int (*shared_mem_rx_idx)(struct iwl_priv *priv);
/* Handling TX */ /* Handling TX */
void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
struct iwl_tx_queue *txq, struct iwl_tx_queue *txq,
......
...@@ -301,7 +301,6 @@ struct iwl_host_cmd { ...@@ -301,7 +301,6 @@ struct iwl_host_cmd {
/** /**
* struct iwl_rx_queue - Rx queue * struct iwl_rx_queue - Rx queue
* @processed: Internal index to last handled Rx packet
* @read: Shared index to newest available Rx buffer * @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet * @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free * @free_count: Number of pre-allocated buffers in rx_free
...@@ -316,13 +315,14 @@ struct iwl_rx_queue { ...@@ -316,13 +315,14 @@ struct iwl_rx_queue {
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 processed;
u32 read; u32 read;
u32 write; u32 write;
u32 free_count; u32 free_count;
struct list_head rx_free; struct list_head rx_free;
struct list_head rx_used; struct list_head rx_used;
int need_update; int need_update;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock; spinlock_t lock;
}; };
...@@ -967,10 +967,9 @@ struct iwl_priv { ...@@ -967,10 +967,9 @@ struct iwl_priv {
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
struct iwl_hw_params hw_params; struct iwl_hw_params hw_params;
/* driver/uCode shared Tx Byte Counts and Rx status */ /* driver/uCode shared Tx Byte Counts */
void *shared_virt; void *shared_virt;
int rb_closed_offset; /* Physical Pointer to Tx Byte Counts */
/* Physical Pointer to Tx Byte Counts and Rx status */
dma_addr_t shared_phys; dma_addr_t shared_phys;
/* Current association information needed to configure the /* Current association information needed to configure the
......
...@@ -403,5 +403,21 @@ ...@@ -403,5 +403,21 @@
#define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
/**
* struct iwl_rb_status - reseve buffer status
* host memory mapped FH registers
* @closed_rb_num [0:11] - Indicates the index of the RB which was closed
* @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
* @finished_rb_num [0:11] - Indicates the index of the current RB
* in which the last frame was written to
* @finished_fr_num [0:11] - Indicates the index of the RX Frame
* which was transfered
*/
struct iwl_rb_status {
__le16 closed_rb_num;
__le16 closed_fr_num;
__le16 finished_rb_num;
__le16 finished_fr_nam;
} __attribute__ ((packed));
#endif /* !__iwl_fh_h__ */ #endif /* !__iwl_fh_h__ */
...@@ -317,7 +317,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) ...@@ -317,7 +317,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr); rxq->dma_addr);
pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL; rxq->bd = NULL;
rxq->rb_stts = NULL;
} }
EXPORT_SYMBOL(iwl_rx_queue_free); EXPORT_SYMBOL(iwl_rx_queue_free);
...@@ -334,7 +337,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv) ...@@ -334,7 +337,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
if (!rxq->bd) if (!rxq->bd)
return -ENOMEM; goto err_bd;
rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
&rxq->rb_stts_dma);
if (!rxq->rb_stts)
goto err_rb;
/* Fill the rx_used queue with _all_ of the Rx buffers */ /* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
...@@ -346,6 +354,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv) ...@@ -346,6 +354,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
rxq->free_count = 0; rxq->free_count = 0;
rxq->need_update = 0; rxq->need_update = 0;
return 0; return 0;
err_rb:
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
err_bd:
return -ENOMEM;
} }
EXPORT_SYMBOL(iwl_rx_queue_alloc); EXPORT_SYMBOL(iwl_rx_queue_alloc);
...@@ -412,7 +426,7 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) ...@@ -412,7 +426,7 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
/* Tell device where in DRAM to update its Rx status */ /* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
(priv->shared_phys + priv->rb_closed_offset) >> 4); rxq->rb_stts_dma >> 4);
/* Enable Rx DMA /* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册