提交 13d866a9 编写于 作者: J Joe Perches 提交者: David S. Miller

s2io.c: Shorten code line length by using intermediate pointers

Repeated variable use and line wrapping is hard to read.
Use temp variables instead of direct references.

struct fifo_info *fifo = &mac_control->fifos[i];
struct ring_info *ring = &mac_control->rings[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
Signed-off-by: NJoe Perches <joe@perches.com>
Acked-by: NSreenivasa Honnur <sreenivasa.honnur@neterion.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 6fce365d
...@@ -364,13 +364,19 @@ static void s2io_vlan_rx_register(struct net_device *dev, ...@@ -364,13 +364,19 @@ static void s2io_vlan_rx_register(struct net_device *dev,
struct mac_info *mac_control = &nic->mac_control; struct mac_info *mac_control = &nic->mac_control;
struct config_param *config = &nic->config; struct config_param *config = &nic->config;
for (i = 0; i < config->tx_fifo_num; i++) for (i = 0; i < config->tx_fifo_num; i++) {
spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]); struct fifo_info *fifo = &mac_control->fifos[i];
spin_lock_irqsave(&fifo->tx_lock, flags[i]);
}
nic->vlgrp = grp; nic->vlgrp = grp;
for (i = config->tx_fifo_num - 1; i >= 0; i--)
spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, for (i = config->tx_fifo_num - 1; i >= 0; i--) {
flags[i]); struct fifo_info *fifo = &mac_control->fifos[i];
spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
}
} }
/* Unregister the vlan */ /* Unregister the vlan */
...@@ -382,15 +388,20 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ...@@ -382,15 +388,20 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct mac_info *mac_control = &nic->mac_control; struct mac_info *mac_control = &nic->mac_control;
struct config_param *config = &nic->config; struct config_param *config = &nic->config;
for (i = 0; i < config->tx_fifo_num; i++) for (i = 0; i < config->tx_fifo_num; i++) {
spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]); struct fifo_info *fifo = &mac_control->fifos[i];
spin_lock_irqsave(&fifo->tx_lock, flags[i]);
}
if (nic->vlgrp) if (nic->vlgrp)
vlan_group_set_device(nic->vlgrp, vid, NULL); vlan_group_set_device(nic->vlgrp, vid, NULL);
for (i = config->tx_fifo_num - 1; i >= 0; i--) for (i = config->tx_fifo_num - 1; i >= 0; i--) {
spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, struct fifo_info *fifo = &mac_control->fifos[i];
flags[i]);
spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
}
} }
/* /*
...@@ -635,11 +646,12 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -635,11 +646,12 @@ static int init_shared_mem(struct s2io_nic *nic)
mac_control = &nic->mac_control; mac_control = &nic->mac_control;
config = &nic->config; config = &nic->config;
/* Allocation and initialization of TXDLs in FIFOs */
/* Allocation and initialization of TXDLs in FIOFs */
size = 0; size = 0;
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
size += config->tx_cfg[i].fifo_len; struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
size += tx_cfg->fifo_len;
} }
if (size > MAX_AVAILABLE_TXDS) { if (size > MAX_AVAILABLE_TXDS) {
DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
...@@ -649,7 +661,9 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -649,7 +661,9 @@ static int init_shared_mem(struct s2io_nic *nic)
size = 0; size = 0;
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
size = config->tx_cfg[i].fifo_len; struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
size = tx_cfg->fifo_len;
/* /*
* Legal values are from 2 to 8192 * Legal values are from 2 to 8192
*/ */
...@@ -666,11 +680,13 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -666,11 +680,13 @@ static int init_shared_mem(struct s2io_nic *nic)
lst_per_page = PAGE_SIZE / lst_size; lst_per_page = PAGE_SIZE / lst_size;
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
int fifo_len = config->tx_cfg[i].fifo_len; struct fifo_info *fifo = &mac_control->fifos[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
int fifo_len = tx_cfg->fifo_len;
int list_holder_size = fifo_len * sizeof(struct list_info_hold); int list_holder_size = fifo_len * sizeof(struct list_info_hold);
mac_control->fifos[i].list_info = kzalloc(list_holder_size,
GFP_KERNEL); fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
if (!mac_control->fifos[i].list_info) { if (!fifo->list_info) {
DBG_PRINT(INFO_DBG, DBG_PRINT(INFO_DBG,
"Malloc failed for list_info\n"); "Malloc failed for list_info\n");
return -ENOMEM; return -ENOMEM;
...@@ -680,16 +696,17 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -680,16 +696,17 @@ static int init_shared_mem(struct s2io_nic *nic)
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
lst_per_page); lst_per_page);
mac_control->fifos[i].tx_curr_put_info.offset = 0; struct fifo_info *fifo = &mac_control->fifos[i];
mac_control->fifos[i].tx_curr_put_info.fifo_len = struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
config->tx_cfg[i].fifo_len - 1;
mac_control->fifos[i].tx_curr_get_info.offset = 0; fifo->tx_curr_put_info.offset = 0;
mac_control->fifos[i].tx_curr_get_info.fifo_len = fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
config->tx_cfg[i].fifo_len - 1; fifo->tx_curr_get_info.offset = 0;
mac_control->fifos[i].fifo_no = i; fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
mac_control->fifos[i].nic = nic; fifo->fifo_no = i;
mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; fifo->nic = nic;
mac_control->fifos[i].dev = dev; fifo->max_txds = MAX_SKB_FRAGS + 2;
fifo->dev = dev;
for (j = 0; j < page_num; j++) { for (j = 0; j < page_num; j++) {
int k = 0; int k = 0;
...@@ -726,11 +743,11 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -726,11 +743,11 @@ static int init_shared_mem(struct s2io_nic *nic)
} }
while (k < lst_per_page) { while (k < lst_per_page) {
int l = (j * lst_per_page) + k; int l = (j * lst_per_page) + k;
if (l == config->tx_cfg[i].fifo_len) if (l == tx_cfg->fifo_len)
break; break;
mac_control->fifos[i].list_info[l].list_virt_addr = fifo->list_info[l].list_virt_addr =
tmp_v + (k * lst_size); tmp_v + (k * lst_size);
mac_control->fifos[i].list_info[l].list_phy_addr = fifo->list_info[l].list_phy_addr =
tmp_p + (k * lst_size); tmp_p + (k * lst_size);
k++; k++;
} }
...@@ -738,10 +755,12 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -738,10 +755,12 @@ static int init_shared_mem(struct s2io_nic *nic)
} }
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
size = config->tx_cfg[i].fifo_len; struct fifo_info *fifo = &mac_control->fifos[i];
mac_control->fifos[i].ufo_in_band_v struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
= kcalloc(size, sizeof(u64), GFP_KERNEL);
if (!mac_control->fifos[i].ufo_in_band_v) size = tx_cfg->fifo_len;
fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
if (!fifo->ufo_in_band_v)
return -ENOMEM; return -ENOMEM;
mem_allocated += (size * sizeof(u64)); mem_allocated += (size * sizeof(u64));
} }
...@@ -749,20 +768,19 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -749,20 +768,19 @@ static int init_shared_mem(struct s2io_nic *nic)
/* Allocation and initialization of RXDs in Rings */ /* Allocation and initialization of RXDs in Rings */
size = 0; size = 0;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
if (config->rx_cfg[i].num_rxd % struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
(rxd_count[nic->rxd_mode] + 1)) { struct ring_info *ring = &mac_control->rings[i];
if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", i);
i);
DBG_PRINT(ERR_DBG, "RxDs per Block"); DBG_PRINT(ERR_DBG, "RxDs per Block");
return FAILURE; return FAILURE;
} }
size += config->rx_cfg[i].num_rxd; size += rx_cfg->num_rxd;
mac_control->rings[i].block_count = ring->block_count = rx_cfg->num_rxd /
config->rx_cfg[i].num_rxd /
(rxd_count[nic->rxd_mode] + 1 ); (rxd_count[nic->rxd_mode] + 1 );
mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd - ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
mac_control->rings[i].block_count;
} }
if (nic->rxd_mode == RXD_MODE_1) if (nic->rxd_mode == RXD_MODE_1)
size = (size * (sizeof(struct RxD1))); size = (size * (sizeof(struct RxD1)));
...@@ -770,26 +788,26 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -770,26 +788,26 @@ static int init_shared_mem(struct s2io_nic *nic)
size = (size * (sizeof(struct RxD3))); size = (size * (sizeof(struct RxD3)));
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
mac_control->rings[i].rx_curr_get_info.block_index = 0; struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
mac_control->rings[i].rx_curr_get_info.offset = 0; struct ring_info *ring = &mac_control->rings[i];
mac_control->rings[i].rx_curr_get_info.ring_len =
config->rx_cfg[i].num_rxd - 1; ring->rx_curr_get_info.block_index = 0;
mac_control->rings[i].rx_curr_put_info.block_index = 0; ring->rx_curr_get_info.offset = 0;
mac_control->rings[i].rx_curr_put_info.offset = 0; ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
mac_control->rings[i].rx_curr_put_info.ring_len = ring->rx_curr_put_info.block_index = 0;
config->rx_cfg[i].num_rxd - 1; ring->rx_curr_put_info.offset = 0;
mac_control->rings[i].nic = nic; ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
mac_control->rings[i].ring_no = i; ring->nic = nic;
mac_control->rings[i].lro = lro_enable; ring->ring_no = i;
ring->lro = lro_enable;
blk_cnt = config->rx_cfg[i].num_rxd /
(rxd_count[nic->rxd_mode] + 1); blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
/* Allocating all the Rx blocks */ /* Allocating all the Rx blocks */
for (j = 0; j < blk_cnt; j++) { for (j = 0; j < blk_cnt; j++) {
struct rx_block_info *rx_blocks; struct rx_block_info *rx_blocks;
int l; int l;
rx_blocks = &mac_control->rings[i].rx_blocks[j]; rx_blocks = &ring->rx_blocks[j];
size = SIZE_OF_BLOCK; //size is always page size size = SIZE_OF_BLOCK; //size is always page size
tmp_v_addr = pci_alloc_consistent(nic->pdev, size, tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
&tmp_p_addr); &tmp_p_addr);
...@@ -825,16 +843,11 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -825,16 +843,11 @@ static int init_shared_mem(struct s2io_nic *nic)
} }
/* Interlinking all Rx Blocks */ /* Interlinking all Rx Blocks */
for (j = 0; j < blk_cnt; j++) { for (j = 0; j < blk_cnt; j++) {
tmp_v_addr = int next = (j + 1) % blk_cnt;
mac_control->rings[i].rx_blocks[j].block_virt_addr; tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
tmp_v_addr_next = tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
mac_control->rings[i].rx_blocks[(j + 1) % tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
blk_cnt].block_virt_addr; tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
tmp_p_addr =
mac_control->rings[i].rx_blocks[j].block_dma_addr;
tmp_p_addr_next =
mac_control->rings[i].rx_blocks[(j + 1) %
blk_cnt].block_dma_addr;
pre_rxd_blk = (struct RxD_block *) tmp_v_addr; pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
pre_rxd_blk->reserved_2_pNext_RxD_block = pre_rxd_blk->reserved_2_pNext_RxD_block =
...@@ -849,26 +862,28 @@ static int init_shared_mem(struct s2io_nic *nic) ...@@ -849,26 +862,28 @@ static int init_shared_mem(struct s2io_nic *nic)
* and the buffers as well. * and the buffers as well.
*/ */
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
blk_cnt = config->rx_cfg[i].num_rxd / struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
blk_cnt = rx_cfg->num_rxd /
(rxd_count[nic->rxd_mode]+ 1); (rxd_count[nic->rxd_mode]+ 1);
mac_control->rings[i].ba = ring->ba = kmalloc((sizeof(struct buffAdd *) * blk_cnt),
kmalloc((sizeof(struct buffAdd *) * blk_cnt),
GFP_KERNEL); GFP_KERNEL);
if (!mac_control->rings[i].ba) if (!ring->ba)
return -ENOMEM; return -ENOMEM;
mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt); mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
for (j = 0; j < blk_cnt; j++) { for (j = 0; j < blk_cnt; j++) {
int k = 0; int k = 0;
mac_control->rings[i].ba[j] = ring->ba[j] =
kmalloc((sizeof(struct buffAdd) * kmalloc((sizeof(struct buffAdd) *
(rxd_count[nic->rxd_mode] + 1)), (rxd_count[nic->rxd_mode] + 1)),
GFP_KERNEL); GFP_KERNEL);
if (!mac_control->rings[i].ba[j]) if (!ring->ba[j])
return -ENOMEM; return -ENOMEM;
mem_allocated += (sizeof(struct buffAdd) * \ mem_allocated += (sizeof(struct buffAdd) * \
(rxd_count[nic->rxd_mode] + 1)); (rxd_count[nic->rxd_mode] + 1));
while (k != rxd_count[nic->rxd_mode]) { while (k != rxd_count[nic->rxd_mode]) {
ba = &mac_control->rings[i].ba[j][k]; ba = &ring->ba[j][k];
ba->ba_0_org = (void *) kmalloc ba->ba_0_org = (void *) kmalloc
(BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
...@@ -952,22 +967,23 @@ static void free_shared_mem(struct s2io_nic *nic) ...@@ -952,22 +967,23 @@ static void free_shared_mem(struct s2io_nic *nic)
lst_per_page = PAGE_SIZE / lst_size; lst_per_page = PAGE_SIZE / lst_size;
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, struct fifo_info *fifo = &mac_control->fifos[i];
lst_per_page); struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
for (j = 0; j < page_num; j++) { for (j = 0; j < page_num; j++) {
int mem_blks = (j * lst_per_page); int mem_blks = (j * lst_per_page);
if (!mac_control->fifos[i].list_info) struct list_info_hold *fli;
if (!fifo->list_info)
return; return;
if (!mac_control->fifos[i].list_info[mem_blks].
list_virt_addr) fli = &fifo->list_info[mem_blks];
if (!fli->list_virt_addr)
break; break;
pci_free_consistent(nic->pdev, PAGE_SIZE, pci_free_consistent(nic->pdev, PAGE_SIZE,
mac_control->fifos[i]. fli->list_virt_addr,
list_info[mem_blks]. fli->list_phy_addr);
list_virt_addr,
mac_control->fifos[i].
list_info[mem_blks].
list_phy_addr);
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= PAGE_SIZE; += PAGE_SIZE;
} }
...@@ -986,25 +1002,25 @@ static void free_shared_mem(struct s2io_nic *nic) ...@@ -986,25 +1002,25 @@ static void free_shared_mem(struct s2io_nic *nic)
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= PAGE_SIZE; += PAGE_SIZE;
} }
kfree(mac_control->fifos[i].list_info); kfree(fifo->list_info);
nic->mac_control.stats_info->sw_stat.mem_freed += nic->mac_control.stats_info->sw_stat.mem_freed +=
(nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold)); (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
} }
size = SIZE_OF_BLOCK; size = SIZE_OF_BLOCK;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
blk_cnt = mac_control->rings[i].block_count; struct ring_info *ring = &mac_control->rings[i];
blk_cnt = ring->block_count;
for (j = 0; j < blk_cnt; j++) { for (j = 0; j < blk_cnt; j++) {
tmp_v_addr = mac_control->rings[i].rx_blocks[j]. tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
block_virt_addr; tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
tmp_p_addr = mac_control->rings[i].rx_blocks[j].
block_dma_addr;
if (tmp_v_addr == NULL) if (tmp_v_addr == NULL)
break; break;
pci_free_consistent(nic->pdev, size, pci_free_consistent(nic->pdev, size,
tmp_v_addr, tmp_p_addr); tmp_v_addr, tmp_p_addr);
nic->mac_control.stats_info->sw_stat.mem_freed += size; nic->mac_control.stats_info->sw_stat.mem_freed += size;
kfree(mac_control->rings[i].rx_blocks[j].rxds); kfree(ring->rx_blocks[j].rxds);
nic->mac_control.stats_info->sw_stat.mem_freed += nic->mac_control.stats_info->sw_stat.mem_freed +=
( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
} }
...@@ -1013,15 +1029,17 @@ static void free_shared_mem(struct s2io_nic *nic) ...@@ -1013,15 +1029,17 @@ static void free_shared_mem(struct s2io_nic *nic)
if (nic->rxd_mode == RXD_MODE_3B) { if (nic->rxd_mode == RXD_MODE_3B) {
/* Freeing buffer storage addresses in 2BUFF mode. */ /* Freeing buffer storage addresses in 2BUFF mode. */
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
blk_cnt = config->rx_cfg[i].num_rxd / struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
blk_cnt = rx_cfg->num_rxd /
(rxd_count[nic->rxd_mode] + 1); (rxd_count[nic->rxd_mode] + 1);
for (j = 0; j < blk_cnt; j++) { for (j = 0; j < blk_cnt; j++) {
int k = 0; int k = 0;
if (!mac_control->rings[i].ba[j]) if (!ring->ba[j])
continue; continue;
while (k != rxd_count[nic->rxd_mode]) { while (k != rxd_count[nic->rxd_mode]) {
struct buffAdd *ba = struct buffAdd *ba = &ring->ba[j][k];
&mac_control->rings[i].ba[j][k];
kfree(ba->ba_0_org); kfree(ba->ba_0_org);
nic->mac_control.stats_info->sw_stat.\ nic->mac_control.stats_info->sw_stat.\
mem_freed += (BUF0_LEN + ALIGN_SIZE); mem_freed += (BUF0_LEN + ALIGN_SIZE);
...@@ -1030,22 +1048,25 @@ static void free_shared_mem(struct s2io_nic *nic) ...@@ -1030,22 +1048,25 @@ static void free_shared_mem(struct s2io_nic *nic)
mem_freed += (BUF1_LEN + ALIGN_SIZE); mem_freed += (BUF1_LEN + ALIGN_SIZE);
k++; k++;
} }
kfree(mac_control->rings[i].ba[j]); kfree(ring->ba[j]);
nic->mac_control.stats_info->sw_stat.mem_freed += nic->mac_control.stats_info->sw_stat.mem_freed +=
(sizeof(struct buffAdd) * (sizeof(struct buffAdd) *
(rxd_count[nic->rxd_mode] + 1)); (rxd_count[nic->rxd_mode] + 1));
} }
kfree(mac_control->rings[i].ba); kfree(ring->ba);
nic->mac_control.stats_info->sw_stat.mem_freed += nic->mac_control.stats_info->sw_stat.mem_freed +=
(sizeof(struct buffAdd *) * blk_cnt); (sizeof(struct buffAdd *) * blk_cnt);
} }
} }
for (i = 0; i < nic->config.tx_fifo_num; i++) { for (i = 0; i < nic->config.tx_fifo_num; i++) {
if (mac_control->fifos[i].ufo_in_band_v) { struct fifo_info *fifo = &mac_control->fifos[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
if (fifo->ufo_in_band_v) {
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= (config->tx_cfg[i].fifo_len * sizeof(u64)); += (tx_cfg->fifo_len * sizeof(u64));
kfree(mac_control->fifos[i].ufo_in_band_v); kfree(fifo->ufo_in_band_v);
} }
} }
...@@ -1339,10 +1360,10 @@ static int init_nic(struct s2io_nic *nic) ...@@ -1339,10 +1360,10 @@ static int init_nic(struct s2io_nic *nic)
for (i = 0, j = 0; i < config->tx_fifo_num; i++) { for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
val64 |= struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
13) | vBIT(config->tx_cfg[i].fifo_priority, val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
((j * 32) + 5), 3); vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
if (i == (config->tx_fifo_num - 1)) { if (i == (config->tx_fifo_num - 1)) {
if (i % 2 == 0) if (i % 2 == 0)
...@@ -1400,9 +1421,9 @@ static int init_nic(struct s2io_nic *nic) ...@@ -1400,9 +1421,9 @@ static int init_nic(struct s2io_nic *nic)
/* Rx DMA intialization. */ /* Rx DMA intialization. */
val64 = 0; val64 = 0;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
val64 |= struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
3); val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
} }
writeq(val64, &bar0->rx_queue_priority); writeq(val64, &bar0->rx_queue_priority);
...@@ -2276,7 +2297,9 @@ static int start_nic(struct s2io_nic *nic) ...@@ -2276,7 +2297,9 @@ static int start_nic(struct s2io_nic *nic)
/* PRC Initialization and configuration */ /* PRC Initialization and configuration */
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr, struct ring_info *ring = &mac_control->rings[i];
writeq((u64) ring->rx_blocks[0].block_dma_addr,
&bar0->prc_rxd0_n[i]); &bar0->prc_rxd0_n[i]);
val64 = readq(&bar0->prc_ctrl_n[i]); val64 = readq(&bar0->prc_ctrl_n[i]);
...@@ -2434,11 +2457,13 @@ static void free_tx_buffers(struct s2io_nic *nic) ...@@ -2434,11 +2457,13 @@ static void free_tx_buffers(struct s2io_nic *nic)
config = &nic->config; config = &nic->config;
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
struct fifo_info *fifo = &mac_control->fifos[i];
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
for (j = 0; j < config->tx_cfg[i].fifo_len; j++) { spin_lock_irqsave(&fifo->tx_lock, flags);
txdp = (struct TxD *) \ for (j = 0; j < tx_cfg->fifo_len; j++) {
mac_control->fifos[i].list_info[j].list_virt_addr; txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
if (skb) { if (skb) {
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
...@@ -2450,9 +2475,9 @@ static void free_tx_buffers(struct s2io_nic *nic) ...@@ -2450,9 +2475,9 @@ static void free_tx_buffers(struct s2io_nic *nic)
DBG_PRINT(INTR_DBG, DBG_PRINT(INTR_DBG,
"%s:forcibly freeing %d skbs on FIFO%d\n", "%s:forcibly freeing %d skbs on FIFO%d\n",
dev->name, cnt, i); dev->name, cnt, i);
mac_control->fifos[i].tx_curr_get_info.offset = 0; fifo->tx_curr_get_info.offset = 0;
mac_control->fifos[i].tx_curr_put_info.offset = 0; fifo->tx_curr_put_info.offset = 0;
spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags); spin_unlock_irqrestore(&fifo->tx_lock, flags);
} }
} }
...@@ -2795,14 +2820,16 @@ static void free_rx_buffers(struct s2io_nic *sp) ...@@ -2795,14 +2820,16 @@ static void free_rx_buffers(struct s2io_nic *sp)
config = &sp->config; config = &sp->config;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
for (blk = 0; blk < rx_ring_sz[i]; blk++) for (blk = 0; blk < rx_ring_sz[i]; blk++)
free_rxd_blk(sp,i,blk); free_rxd_blk(sp,i,blk);
mac_control->rings[i].rx_curr_put_info.block_index = 0; ring->rx_curr_put_info.block_index = 0;
mac_control->rings[i].rx_curr_get_info.block_index = 0; ring->rx_curr_get_info.block_index = 0;
mac_control->rings[i].rx_curr_put_info.offset = 0; ring->rx_curr_put_info.offset = 0;
mac_control->rings[i].rx_curr_get_info.offset = 0; ring->rx_curr_get_info.offset = 0;
mac_control->rings[i].rx_bufs_left = 0; ring->rx_bufs_left = 0;
DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
dev->name, buf_cnt, i); dev->name, buf_cnt, i);
} }
...@@ -2866,7 +2893,6 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) ...@@ -2866,7 +2893,6 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
static int s2io_poll_inta(struct napi_struct *napi, int budget) static int s2io_poll_inta(struct napi_struct *napi, int budget)
{ {
struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
struct ring_info *ring;
struct config_param *config; struct config_param *config;
struct mac_info *mac_control; struct mac_info *mac_control;
int pkts_processed = 0; int pkts_processed = 0;
...@@ -2881,7 +2907,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) ...@@ -2881,7 +2907,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
return 0; return 0;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
ring = &mac_control->rings[i]; struct ring_info *ring = &mac_control->rings[i];
ring_pkts_processed = rx_intr_handler(ring, budget); ring_pkts_processed = rx_intr_handler(ring, budget);
s2io_chk_rx_buffers(nic, ring); s2io_chk_rx_buffers(nic, ring);
pkts_processed += ring_pkts_processed; pkts_processed += ring_pkts_processed;
...@@ -2936,12 +2962,16 @@ static void s2io_netpoll(struct net_device *dev) ...@@ -2936,12 +2962,16 @@ static void s2io_netpoll(struct net_device *dev)
tx_intr_handler(&mac_control->fifos[i]); tx_intr_handler(&mac_control->fifos[i]);
/* check for received packet and indicate up to network */ /* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++) for (i = 0; i < config->rx_ring_num; i++) {
rx_intr_handler(&mac_control->rings[i], 0); struct ring_info *ring = &mac_control->rings[i];
rx_intr_handler(ring, 0);
}
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == struct ring_info *ring = &mac_control->rings[i];
-ENOMEM) {
if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
break; break;
...@@ -4803,8 +4833,11 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) ...@@ -4803,8 +4833,11 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
if (reason & GEN_INTR_RXTRAFFIC) if (reason & GEN_INTR_RXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) for (i = 0; i < config->rx_ring_num; i++) {
rx_intr_handler(&mac_control->rings[i], 0); struct ring_info *ring = &mac_control->rings[i];
rx_intr_handler(ring, 0);
}
} }
/* /*
...@@ -4825,8 +4858,11 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) ...@@ -4825,8 +4858,11 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
* Reallocate the buffers from the interrupt handler itself. * Reallocate the buffers from the interrupt handler itself.
*/ */
if (!config->napi) { if (!config->napi) {
for (i = 0; i < config->rx_ring_num; i++) for (i = 0; i < config->rx_ring_num; i++) {
s2io_chk_rx_buffers(sp, &mac_control->rings[i]); struct ring_info *ring = &mac_control->rings[i];
s2io_chk_rx_buffers(sp, ring);
}
} }
writeq(sp->general_int_mask, &bar0->general_int_mask); writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status); readl(&bar0->general_int_status);
...@@ -4923,8 +4959,10 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) ...@@ -4923,8 +4959,10 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
/* collect per-ring rx_packets and rx_bytes */ /* collect per-ring rx_packets and rx_bytes */
dev->stats.rx_packets = dev->stats.rx_bytes = 0; dev->stats.rx_packets = dev->stats.rx_bytes = 0;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
dev->stats.rx_packets += mac_control->rings[i].rx_packets; struct ring_info *ring = &mac_control->rings[i];
dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
dev->stats.rx_packets += ring->rx_packets;
dev->stats.rx_bytes += ring->rx_bytes;
} }
return (&dev->stats); return (&dev->stats);
...@@ -6974,15 +7012,16 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) ...@@ -6974,15 +7012,16 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
blk_cnt = config->rx_cfg[i].num_rxd / struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
(rxd_count[sp->rxd_mode] +1); struct ring_info *ring = &mac_control->rings[i];
blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] +1);
for (j = 0; j < blk_cnt; j++) { for (j = 0; j < blk_cnt; j++) {
for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
rxdp = mac_control->rings[i]. rxdp = ring-> rx_blocks[j].rxds[k].virt_addr;
rx_blocks[j].rxds[k].virt_addr;
if(sp->rxd_mode == RXD_MODE_3B) if(sp->rxd_mode == RXD_MODE_3B)
ba = &mac_control->rings[i].ba[j][k]; ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba, if (set_rxd_buffer_pointer(sp, rxdp, ba,
&skb,(u64 *)&temp0_64, &skb,(u64 *)&temp0_64,
(u64 *)&temp1_64, (u64 *)&temp1_64,
...@@ -7205,8 +7244,10 @@ static int s2io_card_up(struct s2io_nic * sp) ...@@ -7205,8 +7244,10 @@ static int s2io_card_up(struct s2io_nic * sp)
config = &sp->config; config = &sp->config;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
mac_control->rings[i].mtu = dev->mtu; struct ring_info *ring = &mac_control->rings[i];
ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
ring->mtu = dev->mtu;
ret = fill_rx_buffers(sp, ring, 1);
if (ret) { if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name); dev->name);
...@@ -7215,7 +7256,7 @@ static int s2io_card_up(struct s2io_nic * sp) ...@@ -7215,7 +7256,7 @@ static int s2io_card_up(struct s2io_nic * sp)
return -ENOMEM; return -ENOMEM;
} }
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
mac_control->rings[i].rx_bufs_left); ring->rx_bufs_left);
} }
/* Initialise napi */ /* Initialise napi */
...@@ -7875,8 +7916,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -7875,8 +7916,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
config->multiq = dev_multiq; config->multiq = dev_multiq;
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
config->tx_cfg[i].fifo_len = tx_fifo_len[i]; struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
config->tx_cfg[i].fifo_priority = i;
tx_cfg->fifo_len = tx_fifo_len[i];
tx_cfg->fifo_priority = i;
} }
/* mapping the QoS priority to the configured fifos */ /* mapping the QoS priority to the configured fifos */
...@@ -7890,9 +7933,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -7890,9 +7933,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
config->tx_intr_type = TXD_INT_TYPE_UTILZ; config->tx_intr_type = TXD_INT_TYPE_UTILZ;
for (i = 0; i < config->tx_fifo_num; i++) { for (i = 0; i < config->tx_fifo_num; i++) {
config->tx_cfg[i].f_no_snoop = struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
(NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
if (config->tx_cfg[i].fifo_len < 65) { tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
if (tx_cfg->fifo_len < 65) {
config->tx_intr_type = TXD_INT_TYPE_PER_LIST; config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
break; break;
} }
...@@ -7903,20 +7947,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -7903,20 +7947,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Rx side parameters. */ /* Rx side parameters. */
config->rx_ring_num = rx_ring_num; config->rx_ring_num = rx_ring_num;
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
config->rx_cfg[i].num_rxd = rx_ring_sz[i] * struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
(rxd_count[sp->rxd_mode] + 1); struct ring_info *ring = &mac_control->rings[i];
config->rx_cfg[i].ring_priority = i;
mac_control->rings[i].rx_bufs_left = 0; rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
mac_control->rings[i].rxd_mode = sp->rxd_mode; rx_cfg->ring_priority = i;
mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode]; ring->rx_bufs_left = 0;
mac_control->rings[i].pdev = sp->pdev; ring->rxd_mode = sp->rxd_mode;
mac_control->rings[i].dev = sp->dev; ring->rxd_count = rxd_count[sp->rxd_mode];
ring->pdev = sp->pdev;
ring->dev = sp->dev;
} }
for (i = 0; i < rx_ring_num; i++) { for (i = 0; i < rx_ring_num; i++) {
config->rx_cfg[i].ring_org = RING_ORG_BUFF1; struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
config->rx_cfg[i].f_no_snoop =
(NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER); rx_cfg->ring_org = RING_ORG_BUFF1;
rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
} }
/* Setting Mac Control parameters */ /* Setting Mac Control parameters */
...@@ -8015,9 +8062,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -8015,9 +8062,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
} }
if (config->intr_type == MSI_X) { if (config->intr_type == MSI_X) {
for (i = 0; i < config->rx_ring_num ; i++) for (i = 0; i < config->rx_ring_num ; i++) {
netif_napi_add(dev, &mac_control->rings[i].napi, struct ring_info *ring = &mac_control->rings[i];
s2io_poll_msix, 64);
netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
}
} else { } else {
netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
} }
...@@ -8089,8 +8138,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -8089,8 +8138,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
sp->state = 0; sp->state = 0;
/* Initialize spinlocks */ /* Initialize spinlocks */
for (i = 0; i < sp->config.tx_fifo_num; i++) for (i = 0; i < sp->config.tx_fifo_num; i++) {
spin_lock_init(&mac_control->fifos[i].tx_lock); struct fifo_info *fifo = &mac_control->fifos[i];
spin_lock_init(&fifo->tx_lock);
}
/* /*
* SXE-002: Configure link and activity LED to init state * SXE-002: Configure link and activity LED to init state
...@@ -8165,8 +8217,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -8165,8 +8217,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
break; break;
} }
if (sp->config.multiq) { if (sp->config.multiq) {
for (i = 0; i < sp->config.tx_fifo_num; i++) for (i = 0; i < sp->config.tx_fifo_num; i++) {
mac_control->fifos[i].multiq = config->multiq; struct fifo_info *fifo = &mac_control->fifos[i];
fifo->multiq = config->multiq;
}
DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
dev->name); dev->name);
} else } else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册