提交 528f7272 编写于 作者: J Jon Mason 提交者: David S. Miller

vxge: code cleanup and reorganization

Move function locations to remove the need for internal declarations and
other misc clean-ups.
Signed-off-by: NJon Mason <jon.mason@exar.com>
Signed-off-by: NArpit Patel <arpit.patel@exar.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 deef4b52
因为 它太大了无法显示 source diff 。你可以改为 查看blob
...@@ -700,7 +700,7 @@ struct __vxge_hw_virtualpath { ...@@ -700,7 +700,7 @@ struct __vxge_hw_virtualpath {
* *
* This structure is used to store the callback information. * This structure is used to store the callback information.
*/ */
struct __vxge_hw_vpath_handle{ struct __vxge_hw_vpath_handle {
struct list_head item; struct list_head item;
struct __vxge_hw_virtualpath *vpath; struct __vxge_hw_virtualpath *vpath;
}; };
...@@ -815,8 +815,8 @@ struct vxge_hw_device_hw_info { ...@@ -815,8 +815,8 @@ struct vxge_hw_device_hw_info {
u8 serial_number[VXGE_HW_INFO_LEN]; u8 serial_number[VXGE_HW_INFO_LEN];
u8 part_number[VXGE_HW_INFO_LEN]; u8 part_number[VXGE_HW_INFO_LEN];
u8 product_desc[VXGE_HW_INFO_LEN]; u8 product_desc[VXGE_HW_INFO_LEN];
u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
}; };
/** /**
...@@ -862,16 +862,6 @@ struct vxge_hw_device_attr { ...@@ -862,16 +862,6 @@ struct vxge_hw_device_attr {
VXGE_HW_STATS_OP_READ, \ VXGE_HW_STATS_OP_READ, \
loc, \ loc, \
offset, \ offset, \
&val64); \
\
if (status != VXGE_HW_OK) \
return status; \
}
#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
status = __vxge_hw_vpath_stats_access(vpath, \
VXGE_HW_STATS_OP_READ, \
offset, \
&val64); \ &val64); \
if (status != VXGE_HW_OK) \ if (status != VXGE_HW_OK) \
return status; \ return status; \
...@@ -1927,6 +1917,15 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev, ...@@ -1927,6 +1917,15 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
return vaddr; return vaddr;
} }
static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
struct pci_dev **p_dma_acch)
{
unsigned long misaligned = *(unsigned long *)p_dma_acch;
u8 *tmp = (u8 *)vaddr;
tmp -= misaligned;
kfree((void *)tmp);
}
/* /*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space * __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/ */
...@@ -1996,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set( ...@@ -1996,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
void void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
#ifndef readq #ifndef readq
static inline u64 readq(void __iomem *addr) static inline u64 readq(void __iomem *addr)
{ {
......
...@@ -84,15 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0); ...@@ -84,15 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config; static struct vxge_drv_config *driver_config;
static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
static inline int is_vxge_card_up(struct vxgedev *vdev) static inline int is_vxge_card_up(struct vxgedev *vdev)
{ {
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
...@@ -149,8 +140,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) ...@@ -149,8 +140,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state * This function is called during interrupt context to notify link up state
* change. * change.
*/ */
static void static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{ {
struct net_device *dev = hldev->ndev; struct net_device *dev = hldev->ndev;
struct vxgedev *vdev = netdev_priv(dev); struct vxgedev *vdev = netdev_priv(dev);
...@@ -173,8 +163,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev) ...@@ -173,8 +163,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state * This function is called during interrupt context to notify link down state
* change. * change.
*/ */
static void static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{ {
struct net_device *dev = hldev->ndev; struct net_device *dev = hldev->ndev;
struct vxgedev *vdev = netdev_priv(dev); struct vxgedev *vdev = netdev_priv(dev);
...@@ -196,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev) ...@@ -196,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
* *
* Allocate SKB. * Allocate SKB.
*/ */
static struct sk_buff* static struct sk_buff *
vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
{ {
struct net_device *dev; struct net_device *dev;
...@@ -414,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, ...@@ -414,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
prefetch((char *)skb + L1_CACHE_BYTES); prefetch((char *)skb + L1_CACHE_BYTES);
if (unlikely(t_code)) { if (unlikely(t_code)) {
if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
VXGE_HW_OK) { VXGE_HW_OK) {
...@@ -437,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, ...@@ -437,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
} }
if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
if (!vxge_rx_map(dtr, ring)) { if (!vxge_rx_map(dtr, ring)) {
skb_put(skb, pkt_length); skb_put(skb, pkt_length);
...@@ -678,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list( ...@@ -678,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list(
return FALSE; return FALSE;
} }
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
return TRUE;
new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
if (!new_mac_entry) {
vxge_debug_mem(VXGE_ERR,
"%s: memory allocation failed",
VXGE_DRIVER_NAME);
return FALSE;
}
list_add(&new_mac_entry->item, &vpath->mac_addr_list);
/* Copy the new mac address to the list */
mac_address = (u8 *)&new_mac_entry->macaddr;
memcpy(mac_address, mac->macaddr, ETH_ALEN);
new_mac_entry->state = mac->state;
vpath->mac_addr_cnt++;
/* Is this a multicast address */
if (0x01 & mac->macaddr[0])
vpath->mcast_addr_cnt++;
return TRUE;
}
/* Add a mac address to DA table */
static enum vxge_hw_status
vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
if (0x01 & mac->macaddr[0]) /* multicast address */
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
else
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
vpath = &vdev->vpaths[mac->vpath_no];
status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
mac->macmask, duplicate_mode);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA config add entry failed for vpath:%d",
vpath->device_id);
} else
if (FALSE == vxge_mac_list_add(vpath, mac))
status = -EPERM;
return status;
}
static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
{ {
struct macInfo mac_info; struct macInfo mac_info;
...@@ -1023,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) ...@@ -1023,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
"%s:%d Exiting...", __func__, __LINE__); "%s:%d Exiting...", __func__, __LINE__);
} }
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
u8 *mac_address = (u8 *) (&del_mac);
/* Copy the mac address to delete from the list */
memcpy(mac_address, mac->macaddr, ETH_ALEN);
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
list_del(entry);
kfree((struct vxge_mac_addrs *)entry);
vpath->mac_addr_cnt--;
/* Is this a multicast address */
if (0x01 & mac->macaddr[0])
vpath->mcast_addr_cnt--;
return TRUE;
}
}
return FALSE;
}
/* delete a mac address from DA table */
static enum vxge_hw_status
vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
vpath = &vdev->vpaths[mac->vpath_no];
status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
mac->macmask);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA config delete entry failed for vpath:%d",
vpath->device_id);
} else
vxge_mac_list_del(vpath, mac);
return status;
}
/** /**
* vxge_set_multicast * vxge_set_multicast
* @dev: pointer to the device structure * @dev: pointer to the device structure
...@@ -1333,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) ...@@ -1333,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
} }
} }
/* list all mac addresses from DA table */
static enum vxge_hw_status
vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
unsigned char macmask[ETH_ALEN];
unsigned char macaddr[ETH_ALEN];
status = vxge_hw_vpath_mac_addr_get(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA config list entry failed for vpath:%d",
vpath->device_id);
return status;
}
while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK)
break;
}
return status;
}
/* Store all mac addresses from the list to the DA table */
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
u8 *mac_address = NULL;
struct list_head *entry, *next;
memset(&mac_info, 0, sizeof(struct macInfo));
if (vpath->is_open) {
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
mac_address =
(u8 *)&
((struct vxge_mac_addrs *)entry)->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
((struct vxge_mac_addrs *)entry)->state =
VXGE_LL_MAC_ADDR_IN_DA_TABLE;
/* does this mac address already exist in da table? */
status = vxge_search_mac_addr_in_da_table(vpath,
&mac_info);
if (status != VXGE_HW_OK) {
/* Add this mac address to the DA table */
status = vxge_hw_vpath_mac_addr_add(
vpath->handle, mac_info.macaddr,
mac_info.macmask,
VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA add entry failed for vpath:%d",
vpath->device_id);
((struct vxge_mac_addrs *)entry)->state
= VXGE_LL_MAC_ADDR_IN_LIST;
}
}
}
}
return status;
}
/* Store all vlan ids from the list to the vid table */
static enum vxge_hw_status
vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
u16 vid;
if (vdev->vlgrp && vpath->is_open) {
for (vid = 0; vid < VLAN_N_VID; vid++) {
if (!vlan_group_get_device(vdev->vlgrp, vid))
continue;
/* Add these vlan to the vid table */
status = vxge_hw_vpath_vid_add(vpath->handle, vid);
}
}
return status;
}
/* /*
* vxge_reset_vpath * vxge_reset_vpath
* @vdev: pointer to vdev * @vdev: pointer to vdev
...@@ -1745,7 +1923,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) ...@@ -1745,7 +1923,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
vdev->config.rth_algorithm, vdev->config.rth_algorithm,
&hash_types, &hash_types,
vdev->config.rth_bkt_sz); vdev->config.rth_bkt_sz);
if (status != VXGE_HW_OK) { if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"RTH configuration failed for vpath:%d", "RTH configuration failed for vpath:%d",
...@@ -1757,199 +1934,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) ...@@ -1757,199 +1934,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status; return status;
} }
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
return TRUE;
new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
if (!new_mac_entry) {
vxge_debug_mem(VXGE_ERR,
"%s: memory allocation failed",
VXGE_DRIVER_NAME);
return FALSE;
}
list_add(&new_mac_entry->item, &vpath->mac_addr_list);
/* Copy the new mac address to the list */
mac_address = (u8 *)&new_mac_entry->macaddr;
memcpy(mac_address, mac->macaddr, ETH_ALEN);
new_mac_entry->state = mac->state;
vpath->mac_addr_cnt++;
/* Is this a multicast address */
if (0x01 & mac->macaddr[0])
vpath->mcast_addr_cnt++;
return TRUE;
}
/* Add a mac address to DA table */
static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
if (0x01 & mac->macaddr[0]) /* multicast address */
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
else
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
vpath = &vdev->vpaths[mac->vpath_no];
status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
mac->macmask, duplicate_mode);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA config add entry failed for vpath:%d",
vpath->device_id);
} else
if (FALSE == vxge_mac_list_add(vpath, mac))
status = -EPERM;
return status;
}
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
u8 *mac_address = (u8 *)(&del_mac);
/* Copy the mac address to delete from the list */
memcpy(mac_address, mac->macaddr, ETH_ALEN);
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
list_del(entry);
kfree((struct vxge_mac_addrs *)entry);
vpath->mac_addr_cnt--;
/* Is this a multicast address */
if (0x01 & mac->macaddr[0])
vpath->mcast_addr_cnt--;
return TRUE;
}
}
return FALSE;
}
/* delete a mac address from DA table */
static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
vpath = &vdev->vpaths[mac->vpath_no];
status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
mac->macmask);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA config delete entry failed for vpath:%d",
vpath->device_id);
} else
vxge_mac_list_del(vpath, mac);
return status;
}
/* list all mac addresses from DA table */
enum vxge_hw_status
static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
unsigned char macmask[ETH_ALEN];
unsigned char macaddr[ETH_ALEN];
status = vxge_hw_vpath_mac_addr_get(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA config list entry failed for vpath:%d",
vpath->device_id);
return status;
}
while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK)
break;
}
return status;
}
/* Store all vlan ids from the list to the vid table */
static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
u16 vid;
if (vdev->vlgrp && vpath->is_open) {
for (vid = 0; vid < VLAN_N_VID; vid++) {
if (!vlan_group_get_device(vdev->vlgrp, vid))
continue;
/* Add these vlan to the vid table */
status = vxge_hw_vpath_vid_add(vpath->handle, vid);
}
}
return status;
}
/* Store all mac addresses from the list to the DA table */
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
u8 *mac_address = NULL;
struct list_head *entry, *next;
memset(&mac_info, 0, sizeof(struct macInfo));
if (vpath->is_open) {
list_for_each_safe(entry, next, &vpath->mac_addr_list) {
mac_address =
(u8 *)&
((struct vxge_mac_addrs *)entry)->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
((struct vxge_mac_addrs *)entry)->state =
VXGE_LL_MAC_ADDR_IN_DA_TABLE;
/* does this mac address already exist in da table? */
status = vxge_search_mac_addr_in_da_table(vpath,
&mac_info);
if (status != VXGE_HW_OK) {
/* Add this mac address to the DA table */
status = vxge_hw_vpath_mac_addr_add(
vpath->handle, mac_info.macaddr,
mac_info.macmask,
VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"DA add entry failed for vpath:%d",
vpath->device_id);
((struct vxge_mac_addrs *)entry)->state
= VXGE_LL_MAC_ADDR_IN_LIST;
}
}
}
}
return status;
}
/* reset vpaths */ /* reset vpaths */
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{ {
...@@ -2042,6 +2026,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) ...@@ -2042,6 +2026,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vpath->ring.ndev = vdev->ndev; vpath->ring.ndev = vdev->ndev;
vpath->ring.pdev = vdev->pdev; vpath->ring.pdev = vdev->pdev;
status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
if (status == VXGE_HW_OK) { if (status == VXGE_HW_OK) {
vpath->fifo.handle = vpath->fifo.handle =
...@@ -2070,9 +2055,8 @@ static int vxge_open_vpaths(struct vxgedev *vdev) ...@@ -2070,9 +2055,8 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vdev->stats.vpaths_open++; vdev->stats.vpaths_open++;
} else { } else {
vdev->stats.vpath_open_fail++; vdev->stats.vpath_open_fail++;
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
"%s: vpath: %d failed to open " "open with status: %d",
"with status: %d",
vdev->ndev->name, vpath->device_id, vdev->ndev->name, vpath->device_id,
status); status);
vxge_close_vpaths(vdev, 0); vxge_close_vpaths(vdev, 0);
...@@ -2082,6 +2066,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) ...@@ -2082,6 +2066,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vp_id = vpath->handle->vpath->vp_id; vp_id = vpath->handle->vpath->vp_id;
vdev->vpaths_deployed |= vxge_mBIT(vp_id); vdev->vpaths_deployed |= vxge_mBIT(vp_id);
} }
return VXGE_HW_OK; return VXGE_HW_OK;
} }
...@@ -2114,8 +2099,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) ...@@ -2114,8 +2099,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
if (unlikely(!is_vxge_card_up(vdev))) if (unlikely(!is_vxge_card_up(vdev)))
return IRQ_HANDLED; return IRQ_HANDLED;
status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
&reason);
if (status == VXGE_HW_OK) { if (status == VXGE_HW_OK) {
vxge_hw_device_mask_all(hldev); vxge_hw_device_mask_all(hldev);
...@@ -2568,8 +2552,7 @@ static void vxge_poll_vp_lockup(unsigned long data) ...@@ -2568,8 +2552,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as * Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure. * defined in errno.h file on failure.
*/ */
static int static int vxge_open(struct net_device *dev)
vxge_open(struct net_device *dev)
{ {
enum vxge_hw_status status; enum vxge_hw_status status;
struct vxgedev *vdev; struct vxgedev *vdev;
...@@ -2578,6 +2561,7 @@ vxge_open(struct net_device *dev) ...@@ -2578,6 +2561,7 @@ vxge_open(struct net_device *dev)
int ret = 0; int ret = 0;
int i; int i;
u64 val64, function_mode; u64 val64, function_mode;
vxge_debug_entryexit(VXGE_TRACE, vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", dev->name, __func__, __LINE__); "%s: %s:%d", dev->name, __func__, __LINE__);
...@@ -2830,7 +2814,6 @@ static int do_vxge_close(struct net_device *dev, int do_io) ...@@ -2830,7 +2814,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
struct vxge_hw_mrpcim_reg, struct vxge_hw_mrpcim_reg,
rts_mgr_cbasin_cfg), rts_mgr_cbasin_cfg),
&val64); &val64);
if (status == VXGE_HW_OK) { if (status == VXGE_HW_OK) {
val64 &= ~vpath_vector; val64 &= ~vpath_vector;
status = vxge_hw_mgmt_reg_write(vdev->devh, status = vxge_hw_mgmt_reg_write(vdev->devh,
...@@ -2914,8 +2897,7 @@ static int do_vxge_close(struct net_device *dev, int do_io) ...@@ -2914,8 +2897,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as * Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure. * defined in errno.h file on failure.
*/ */
static int static int vxge_close(struct net_device *dev)
vxge_close(struct net_device *dev)
{ {
do_vxge_close(dev, 1); do_vxge_close(dev, 1);
return 0; return 0;
...@@ -2989,9 +2971,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) ...@@ -2989,9 +2971,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
net_stats->rx_dropped += net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
vdev->vpaths[k].ring.stats.rx_dropped;
net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
...@@ -3264,15 +3244,12 @@ static const struct net_device_ops vxge_netdev_ops = { ...@@ -3264,15 +3244,12 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_start_xmit = vxge_xmit, .ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = vxge_set_multicast, .ndo_set_multicast_list = vxge_set_multicast,
.ndo_do_ioctl = vxge_ioctl, .ndo_do_ioctl = vxge_ioctl,
.ndo_set_mac_address = vxge_set_mac_addr, .ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu, .ndo_change_mtu = vxge_change_mtu,
.ndo_vlan_rx_register = vxge_vlan_rx_register, .ndo_vlan_rx_register = vxge_vlan_rx_register,
.ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
.ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
.ndo_tx_timeout = vxge_tx_watchdog, .ndo_tx_timeout = vxge_tx_watchdog,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vxge_netpoll, .ndo_poll_controller = vxge_netpoll,
...@@ -3698,9 +3675,9 @@ static int __devinit vxge_config_vpaths( ...@@ -3698,9 +3675,9 @@ static int __devinit vxge_config_vpaths(
device_config->vp_config[i].tti.timer_ac_en = device_config->vp_config[i].tti.timer_ac_en =
VXGE_HW_TIM_TIMER_AC_ENABLE; VXGE_HW_TIM_TIMER_AC_ENABLE;
/* For msi-x with napi (each vector /* For msi-x with napi (each vector has a handler of its own) -
has a handler of its own) - * Set CI to OFF for all vpaths
Set CI to OFF for all vpaths */ */
device_config->vp_config[i].tti.timer_ci_en = device_config->vp_config[i].tti.timer_ci_en =
VXGE_HW_TIM_TIMER_CI_DISABLE; VXGE_HW_TIM_TIMER_CI_DISABLE;
...@@ -3730,10 +3707,13 @@ static int __devinit vxge_config_vpaths( ...@@ -3730,10 +3707,13 @@ static int __devinit vxge_config_vpaths(
device_config->vp_config[i].ring.ring_blocks = device_config->vp_config[i].ring.ring_blocks =
VXGE_HW_DEF_RING_BLOCKS; VXGE_HW_DEF_RING_BLOCKS;
device_config->vp_config[i].ring.buffer_mode = device_config->vp_config[i].ring.buffer_mode =
VXGE_HW_RING_RXD_BUFFER_MODE_1; VXGE_HW_RING_RXD_BUFFER_MODE_1;
device_config->vp_config[i].ring.rxds_limit = device_config->vp_config[i].ring.rxds_limit =
VXGE_HW_DEF_RING_RXDS_LIMIT; VXGE_HW_DEF_RING_RXDS_LIMIT;
device_config->vp_config[i].ring.scatter_mode = device_config->vp_config[i].ring.scatter_mode =
VXGE_HW_RING_SCATTER_MODE_A; VXGE_HW_RING_SCATTER_MODE_A;
...@@ -3813,6 +3793,7 @@ static void __devinit vxge_device_config_init( ...@@ -3813,6 +3793,7 @@ static void __devinit vxge_device_config_init(
device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
break; break;
} }
/* Timer period between device poll */ /* Timer period between device poll */
device_config->device_poll_millis = VXGE_TIMER_DELAY; device_config->device_poll_millis = VXGE_TIMER_DELAY;
...@@ -3824,16 +3805,10 @@ static void __devinit vxge_device_config_init( ...@@ -3824,16 +3805,10 @@ static void __devinit vxge_device_config_init(
vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
__func__); __func__);
vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
device_config->dma_blockpool_initial);
vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
device_config->dma_blockpool_max);
vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
device_config->intr_mode); device_config->intr_mode);
vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
device_config->device_poll_millis); device_config->device_poll_millis);
vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
device_config->rts_mac_en);
vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
device_config->rth_en); device_config->rth_en);
vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
...@@ -4013,7 +3988,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) ...@@ -4013,7 +3988,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
} }
pci_set_master(pdev); pci_set_master(pdev);
vxge_reset(vdev); do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
} }
...@@ -4244,9 +4219,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -4244,9 +4219,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
attr.pdev = pdev; attr.pdev = pdev;
/* In SRIOV-17 mode, functions of the same adapter /* In SRIOV-17 mode, functions of the same adapter
* can be deployed on different buses */ * can be deployed on different buses
if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || */
(device != PCI_SLOT(pdev->devfn)))) if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
!pdev->is_virtfn)
new_device = 1; new_device = 1;
bus = pdev->bus->number; bus = pdev->bus->number;
...@@ -4264,6 +4240,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -4264,6 +4240,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
driver_config->config_dev_cnt = 0; driver_config->config_dev_cnt = 0;
driver_config->total_dev_cnt = 0; driver_config->total_dev_cnt = 0;
} }
/* Now making the CPU based no of vpath calculation /* Now making the CPU based no of vpath calculation
* applicable for individual functions as well. * applicable for individual functions as well.
*/ */
...@@ -4286,11 +4263,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -4286,11 +4263,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0; goto _exit0;
} }
ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
if (!ll_config) { if (!ll_config) {
ret = -ENOMEM; ret = -ENOMEM;
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"ll_config : malloc failed %s %d", "device_config : malloc failed %s %d",
__FILE__, __LINE__); __FILE__, __LINE__);
goto _exit0; goto _exit0;
} }
...@@ -4746,6 +4723,10 @@ vxge_starter(void) ...@@ -4746,6 +4723,10 @@ vxge_starter(void)
return -ENOMEM; return -ENOMEM;
ret = pci_register_driver(&vxge_driver); ret = pci_register_driver(&vxge_driver);
if (ret) {
kfree(driver_config);
goto err;
}
if (driver_config->config_dev_cnt && if (driver_config->config_dev_cnt &&
(driver_config->config_dev_cnt != driver_config->total_dev_cnt)) (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
...@@ -4753,10 +4734,7 @@ vxge_starter(void) ...@@ -4753,10 +4734,7 @@ vxge_starter(void)
"%s: Configured %d of %d devices", "%s: Configured %d of %d devices",
VXGE_DRIVER_NAME, driver_config->config_dev_cnt, VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
driver_config->total_dev_cnt); driver_config->total_dev_cnt);
err:
if (ret)
kfree(driver_config);
return ret; return ret;
} }
......
...@@ -305,8 +305,8 @@ struct vxge_vpath { ...@@ -305,8 +305,8 @@ struct vxge_vpath {
int is_configured; int is_configured;
int is_open; int is_open;
struct vxgedev *vdev; struct vxgedev *vdev;
u8 (macaddr)[ETH_ALEN]; u8 macaddr[ETH_ALEN];
u8 (macmask)[ETH_ALEN]; u8 macmask[ETH_ALEN];
#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 #define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
/* mac addresses currently programmed into NIC */ /* mac addresses currently programmed into NIC */
...@@ -420,10 +420,8 @@ struct vxge_tx_priv { ...@@ -420,10 +420,8 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \ mod_timer(&timer, (jiffies + exp)); \
} while (0); } while (0);
extern void vxge_initialize_ethtool_ops(struct net_device *ndev); void vxge_initialize_ethtool_ops(struct net_device *ndev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
/** /**
......
...@@ -17,13 +17,6 @@ ...@@ -17,13 +17,6 @@
#include "vxge-config.h" #include "vxge-config.h"
#include "vxge-main.h" #include "vxge-main.h"
static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
u32 vp_id, enum vxge_hw_event type);
static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms);
/* /*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts. * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle. * @vp: Virtual Path handle.
...@@ -418,151 +411,6 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) ...@@ -418,151 +411,6 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
val32 = readl(&hldev->common_reg->titan_general_int_status); val32 = readl(&hldev->common_reg->titan_general_int_status);
} }
/**
* vxge_hw_device_begin_irq - Begin IRQ processing.
* @hldev: HW device handle.
* @skip_alarms: Do not clear the alarms
* @reason: "Reason" for the interrupt, the value of Titan's
* general_int_status register.
*
* The function performs two actions, It first checks whether (shared IRQ) the
* interrupt was raised by the device. Next, it masks the device interrupts.
*
* Note:
* vxge_hw_device_begin_irq() does not flush MMIO writes through the
* bridge. Therefore, two back-to-back interrupts are potentially possible.
*
* Returns: 0, if the interrupt is not "ours" (note that in this case the
* device remain enabled).
* Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
* status.
*/
enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
u32 skip_alarms, u64 *reason)
{
u32 i;
u64 val64;
u64 adapter_status;
u64 vpath_mask;
enum vxge_hw_status ret = VXGE_HW_OK;
val64 = readq(&hldev->common_reg->titan_general_int_status);
if (unlikely(!val64)) {
/* not Titan interrupt */
*reason = 0;
ret = VXGE_HW_ERR_WRONG_IRQ;
goto exit;
}
if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
adapter_status = readq(&hldev->common_reg->adapter_status);
if (adapter_status == VXGE_HW_ALL_FOXES) {
__vxge_hw_device_handle_error(hldev,
NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
*reason = 0;
ret = VXGE_HW_ERR_SLOT_FREEZE;
goto exit;
}
}
hldev->stats.sw_dev_info_stats.total_intr_cnt++;
*reason = val64;
vpath_mask = hldev->vpaths_deployed >>
(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
if (val64 &
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
return VXGE_HW_OK;
}
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
if (unlikely(val64 &
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
enum vxge_hw_status error_level = VXGE_HW_OK;
hldev->stats.sw_dev_err_stats.vpath_alarms++;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
continue;
ret = __vxge_hw_vpath_alarm_process(
&hldev->virtual_paths[i], skip_alarms);
error_level = VXGE_HW_SET_LEVEL(ret, error_level);
if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
(ret == VXGE_HW_ERR_SLOT_FREEZE)))
break;
}
ret = error_level;
}
exit:
return ret;
}
/*
* __vxge_hw_device_handle_link_up_ind
* @hldev: HW device handle.
*
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
* If the previous link state is not down, return.
*/
if (hldev->link_state == VXGE_HW_LINK_UP)
goto exit;
hldev->link_state = VXGE_HW_LINK_UP;
/* notify driver */
if (hldev->uld_callbacks.link_up)
hldev->uld_callbacks.link_up(hldev);
exit:
return VXGE_HW_OK;
}
/*
* __vxge_hw_device_handle_link_down_ind
* @hldev: HW device handle.
*
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
* If the previous link state is not down, return.
*/
if (hldev->link_state == VXGE_HW_LINK_DOWN)
goto exit;
hldev->link_state = VXGE_HW_LINK_DOWN;
/* notify driver */
if (hldev->uld_callbacks.link_down)
hldev->uld_callbacks.link_down(hldev);
exit:
return VXGE_HW_OK;
}
/** /**
* __vxge_hw_device_handle_error - Handle error * __vxge_hw_device_handle_error - Handle error
* @hldev: HW device * @hldev: HW device
...@@ -572,9 +420,7 @@ __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) ...@@ -572,9 +420,7 @@ __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
* Handle error. * Handle error.
*/ */
static enum vxge_hw_status static enum vxge_hw_status
__vxge_hw_device_handle_error( __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
struct __vxge_hw_device *hldev,
u32 vp_id,
enum vxge_hw_event type) enum vxge_hw_event type)
{ {
switch (type) { switch (type) {
...@@ -615,97 +461,520 @@ __vxge_hw_device_handle_error( ...@@ -615,97 +461,520 @@ __vxge_hw_device_handle_error(
return VXGE_HW_OK; return VXGE_HW_OK;
} }
/** /*
* vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the * __vxge_hw_device_handle_link_down_ind
* condition that has caused the Tx and RX interrupt. * @hldev: HW device handle.
* @hldev: HW device.
* *
* Acknowledge (that is, clear) the condition that has caused * Link down indication handler. The function is invoked by HW when
* the Tx and Rx interrupt. * Titan indicates that the link is down.
* See also: vxge_hw_device_begin_irq(),
* vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
*/ */
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{ {
/*
* If the previous link state is not down, return.
*/
if (hldev->link_state == VXGE_HW_LINK_DOWN)
goto exit;
if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || hldev->link_state = VXGE_HW_LINK_DOWN;
(hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status0);
}
if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || /* notify driver */
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { if (hldev->uld_callbacks.link_down)
__vxge_hw_pio_mem_write32_upper( hldev->uld_callbacks.link_down(hldev);
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | exit:
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), return VXGE_HW_OK;
&hldev->common_reg->tim_int_status1);
}
} }
/* /*
* vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel * __vxge_hw_device_handle_link_up_ind
* @channel: Channel * @hldev: HW device handle.
* @dtrh: Buffer to return the DTR pointer
*
* Allocates a dtr from the reserve array. If the reserve array is empty,
* it swaps the reserve and free arrays.
* *
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/ */
static enum vxge_hw_status static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{ {
void **tmp_arr; /*
* If the previous link state is not down, return.
*/
if (hldev->link_state == VXGE_HW_LINK_UP)
goto exit;
if (channel->reserve_ptr - channel->reserve_top > 0) { hldev->link_state = VXGE_HW_LINK_UP;
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_ptr];
/* notify driver */
if (hldev->uld_callbacks.link_up)
hldev->uld_callbacks.link_up(hldev);
exit:
return VXGE_HW_OK; return VXGE_HW_OK;
}
/* switch between empty and full arrays */
/* the idea behind such a design is that by having free and reserved
* arrays separated we basically separated irq and non-irq parts.
* i.e. no additional lock need to be done when we free a resource */
if (channel->length - channel->free_ptr > 0) {
tmp_arr = channel->reserve_arr;
channel->reserve_arr = channel->free_arr;
channel->free_arr = tmp_arr;
channel->reserve_ptr = channel->length;
channel->reserve_top = channel->free_ptr;
channel->free_ptr = channel->length;
channel->stats->reserve_free_swaps_cnt++;
goto _alloc_after_swap;
}
channel->stats->full_cnt++;
*dtrh = NULL;
return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
} }
/* /*
* vxge_hw_channel_dtr_post - Post a dtr to the channel * __vxge_hw_vpath_alarm_process - Process Alarms.
* @channelh: Channel * @vpath: Virtual Path.
* @dtrh: DTR pointer * @skip_alarms: Do not clear the alarms
* *
* Posts a dtr to work array. * Process vpath alarms.
* *
*/ */
static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, static enum vxge_hw_status
void *dtrh) __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms)
{ {
vxge_assert(channel->work_arr[channel->post_index] == NULL); u64 val64;
u64 alarm_status;
channel->work_arr[channel->post_index++] = dtrh; u64 pic_status;
struct __vxge_hw_device *hldev = NULL;
enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
u64 mask64;
struct vxge_hw_vpath_stats_sw_info *sw_stats;
struct vxge_hw_vpath_reg __iomem *vp_reg;
if (vpath == NULL) {
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
alarm_event);
goto out2;
}
hldev = vpath->hldev;
vp_reg = vpath->vp_reg;
alarm_status = readq(&vp_reg->vpath_general_int_status);
if (alarm_status == VXGE_HW_ALL_FOXES) {
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
alarm_event);
goto out;
}
sw_stats = vpath->sw_stats;
if (alarm_status & ~(
VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
sw_stats->error_stats.unknown_alarms++;
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
alarm_event);
goto out;
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
val64 = readq(&vp_reg->xgmac_vp_int_status);
if (val64 &
VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
))) {
sw_stats->error_stats.network_sustained_fault++;
writeq(
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
&vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_device_handle_link_down_ind(hldev);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_LINK_DOWN, alarm_event);
}
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
))) {
sw_stats->error_stats.network_sustained_ok++;
writeq(
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
&vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_device_handle_link_up_ind(hldev);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_LINK_UP, alarm_event);
}
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
if (skip_alarms)
return VXGE_HW_OK;
}
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
pic_status = readq(&vp_reg->vpath_ppif_int_status);
if (pic_status &
VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
val64 = readq(&vp_reg->general_errors_reg);
mask64 = readq(&vp_reg->general_errors_mask);
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
~mask64) {
sw_stats->error_stats.ini_serr_det++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_SERR, alarm_event);
}
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
~mask64) {
sw_stats->error_stats.dblgen_fifo0_overflow++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR, alarm_event);
}
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
~mask64)
sw_stats->error_stats.statsb_pif_chain_error++;
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
~mask64)
sw_stats->error_stats.statsb_drop_timeout++;
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
~mask64)
sw_stats->error_stats.target_illegal_access++;
if (!skip_alarms) {
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->general_errors_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
if (pic_status &
VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
val64 = readq(&vp_reg->kdfcctl_errors_reg);
mask64 = readq(&vp_reg->kdfcctl_errors_mask);
if ((val64 &
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
~mask64) {
sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
}
if ((val64 &
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
~mask64) {
sw_stats->error_stats.kdfcctl_fifo0_poison++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
}
if ((val64 &
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
~mask64) {
sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
}
if (!skip_alarms) {
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->kdfcctl_errors_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
val64 = readq(&vp_reg->wrdma_alarm_status);
if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
val64 = readq(&vp_reg->prc_alarm_reg);
mask64 = readq(&vp_reg->prc_alarm_mask);
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
~mask64)
sw_stats->error_stats.prc_ring_bumps++;
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
~mask64) {
sw_stats->error_stats.prc_rxdcm_sc_err++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_VPATH_ERR,
alarm_event);
}
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
& ~mask64) {
sw_stats->error_stats.prc_rxdcm_sc_abort++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_VPATH_ERR,
alarm_event);
}
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
& ~mask64) {
sw_stats->error_stats.prc_quanta_size_err++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_VPATH_ERR,
alarm_event);
}
if (!skip_alarms) {
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->prc_alarm_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
}
out:
hldev->stats.sw_dev_err_stats.vpath_alarms++;
out2:
if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
(alarm_event == VXGE_HW_EVENT_UNKNOWN))
return VXGE_HW_OK;
__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
if (alarm_event == VXGE_HW_EVENT_SERR)
return VXGE_HW_ERR_CRITICAL;
return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
VXGE_HW_ERR_SLOT_FREEZE :
(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
VXGE_HW_ERR_VPATH;
}
/**
* vxge_hw_device_begin_irq - Begin IRQ processing.
* @hldev: HW device handle.
* @skip_alarms: Do not clear the alarms
* @reason: "Reason" for the interrupt, the value of Titan's
* general_int_status register.
*
* The function performs two actions, It first checks whether (shared IRQ) the
* interrupt was raised by the device. Next, it masks the device interrupts.
*
* Note:
* vxge_hw_device_begin_irq() does not flush MMIO writes through the
* bridge. Therefore, two back-to-back interrupts are potentially possible.
*
* Returns: 0, if the interrupt is not "ours" (note that in this case the
* device remain enabled).
* Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
* status.
*/
enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
u32 skip_alarms, u64 *reason)
{
u32 i;
u64 val64;
u64 adapter_status;
u64 vpath_mask;
enum vxge_hw_status ret = VXGE_HW_OK;
val64 = readq(&hldev->common_reg->titan_general_int_status);
if (unlikely(!val64)) {
/* not Titan interrupt */
*reason = 0;
ret = VXGE_HW_ERR_WRONG_IRQ;
goto exit;
}
if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
adapter_status = readq(&hldev->common_reg->adapter_status);
if (adapter_status == VXGE_HW_ALL_FOXES) {
__vxge_hw_device_handle_error(hldev,
NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
*reason = 0;
ret = VXGE_HW_ERR_SLOT_FREEZE;
goto exit;
}
}
hldev->stats.sw_dev_info_stats.total_intr_cnt++;
*reason = val64;
vpath_mask = hldev->vpaths_deployed >>
(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
if (val64 &
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
return VXGE_HW_OK;
}
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
if (unlikely(val64 &
VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
enum vxge_hw_status error_level = VXGE_HW_OK;
hldev->stats.sw_dev_err_stats.vpath_alarms++;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
continue;
ret = __vxge_hw_vpath_alarm_process(
&hldev->virtual_paths[i], skip_alarms);
error_level = VXGE_HW_SET_LEVEL(ret, error_level);
if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
(ret == VXGE_HW_ERR_SLOT_FREEZE)))
break;
}
ret = error_level;
}
exit:
return ret;
}
/**
* vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
* condition that has caused the Tx and RX interrupt.
* @hldev: HW device.
*
* Acknowledge (that is, clear) the condition that has caused
* the Tx and Rx interrupt.
* See also: vxge_hw_device_begin_irq(),
* vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
*/
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
{
if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
(hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status0);
}
if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
__vxge_hw_pio_mem_write32_upper(
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status1);
}
}
/*
* vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
* @channel: Channel
* @dtrh: Buffer to return the DTR pointer
*
* Allocates a dtr from the reserve array. If the reserve array is empty,
* it swaps the reserve and free arrays.
*
*/
static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
if (channel->reserve_ptr - channel->reserve_top > 0) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_ptr];
return VXGE_HW_OK;
}
/* switch between empty and full arrays */
/* the idea behind such a design is that by having free and reserved
* arrays separated we basically separated irq and non-irq parts.
* i.e. no additional lock need to be done when we free a resource */
if (channel->length - channel->free_ptr > 0) {
tmp_arr = channel->reserve_arr;
channel->reserve_arr = channel->free_arr;
channel->free_arr = tmp_arr;
channel->reserve_ptr = channel->length;
channel->reserve_top = channel->free_ptr;
channel->free_ptr = channel->length;
channel->stats->reserve_free_swaps_cnt++;
goto _alloc_after_swap;
}
channel->stats->full_cnt++;
*dtrh = NULL;
return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
}
/*
* vxge_hw_channel_dtr_post - Post a dtr to the channel
* @channelh: Channel
* @dtrh: DTR pointer
*
* Posts a dtr to work array.
*
*/
static void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
channel->work_arr[channel->post_index++] = dtrh;
/* wrap-around */ /* wrap-around */
if (channel->post_index == channel->length) if (channel->post_index == channel->length)
channel->post_index = 0; channel->post_index = 0;
...@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) ...@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
*/ */
void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
{ {
struct __vxge_hw_channel *channel;
channel = &ring->channel;
wmb(); wmb();
vxge_hw_ring_rxd_post_post(ring, rxdh); vxge_hw_ring_rxd_post_post(ring, rxdh);
} }
...@@ -1544,605 +1809,327 @@ vxge_hw_vpath_mac_addr_get_next( ...@@ -1544,605 +1809,327 @@ vxge_hw_vpath_mac_addr_get_next(
data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
for (i = ETH_ALEN; i > 0; i--) {
macaddr[i-1] = (u8)(data1 & 0xFF);
data1 >>= 8;
macaddr_mask[i-1] = (u8)(data2 & 0xFF);
data2 >>= 8;
}
exit:
return status;
}
/**
* vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
* to MAC address table.
* @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
*
* Delete the given mac address and mac address mask into the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
* vxge_hw_vpath_mac_addr_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vp,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN])
{
u32 i;
u64 data1 = 0ULL;
u64 data2 = 0ULL;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
for (i = 0; i < ETH_ALEN; i++) {
data1 <<= 8;
data1 |= (u8)macaddr[i];
data2 <<= 8;
data2 |= (u8)macaddr_mask[i];
}
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
0,
VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
exit:
return status;
}
/**
* vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
* see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
* vxge_hw_vpath_vid_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
{
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
exit:
return status;
}
/**
* vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
* from vlan id table.
* @vp: Vpath handle.
* @vid: Buffer to return vlan id
*
* Returns the first vlan id in the list for this vpath.
* see also: vxge_hw_vpath_vid_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
{
u64 data;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, vid, &data);
*vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
exit:
return status;
}
/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
* see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
* vxge_hw_vpath_vid_get_next
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
{
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
exit:
return status;
}
/**
* vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
* @vp: Vpath handle.
*
* Enable promiscuous mode of Titan-e operation.
*
* See also: vxge_hw_vpath_promisc_disable().
*/
enum vxge_hw_status vxge_hw_vpath_promisc_enable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
/* Enable promiscous mode for function 0 only */
if (!(vpath->hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
return VXGE_HW_OK;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | for (i = ETH_ALEN; i > 0; i--) {
VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | macaddr[i-1] = (u8)(data1 & 0xFF);
VXGE_HW_RXMAC_VCFG0_BCAST_EN | data1 >>= 8;
VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0); macaddr_mask[i-1] = (u8)(data2 & 0xFF);
data2 >>= 8;
} }
exit: exit:
return status; return status;
} }
/** /**
* vxge_hw_vpath_promisc_disable - Disable promiscuous mode. * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
* to MAC address table.
* @vp: Vpath handle. * @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
* *
* Disable promiscuous mode of Titan-e operation. * Delete the given mac address and mac address mask into the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
* vxge_hw_vpath_mac_addr_get_next
* *
* See also: vxge_hw_vpath_promisc_enable().
*/ */
enum vxge_hw_status vxge_hw_vpath_promisc_disable( enum vxge_hw_status
struct __vxge_hw_vpath_handle *vp) vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vp,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN])
{ {
u64 val64; u32 i;
struct __vxge_hw_virtualpath *vpath; u64 data1 = 0ULL;
u64 data2 = 0ULL;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) { if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE; status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit; goto exit;
} }
vpath = vp->vpath; for (i = 0; i < ETH_ALEN; i++) {
data1 <<= 8;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0); data1 |= (u8)macaddr[i];
if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
writeq(val64, &vpath->vp_reg->rxmac_vcfg0); data2 <<= 8;
data2 |= (u8)macaddr_mask[i];
} }
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
0,
VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
exit: exit:
return status; return status;
} }
/* /**
* vxge_hw_vpath_bcast_enable - Enable broadcast * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle. * @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
* see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
* vxge_hw_vpath_vid_get_next
* *
* Enable receiving broadcasts.
*/ */
enum vxge_hw_status vxge_hw_vpath_bcast_enable( enum vxge_hw_status
struct __vxge_hw_vpath_handle *vp) vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
{ {
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) { if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE; status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit; goto exit;
} }
vpath = vp->vpath; status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
val64 = readq(&vpath->vp_reg->rxmac_vcfg0); VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit: exit:
return status; return status;
} }
/** /**
* vxge_hw_vpath_mcast_enable - Enable multicast addresses. * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
* from vlan id table.
* @vp: Vpath handle. * @vp: Vpath handle.
* @vid: Buffer to return vlan id
* *
* Enable Titan-e multicast addresses. * Returns the first vlan id in the list for this vpath.
* Returns: VXGE_HW_OK on success. * see also: vxge_hw_vpath_vid_get_next
* *
*/ */
enum vxge_hw_status vxge_hw_vpath_mcast_enable( enum vxge_hw_status
struct __vxge_hw_vpath_handle *vp) vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
{ {
u64 val64; u64 data;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) { if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE; status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit; goto exit;
} }
vpath = vp->vpath; status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
val64 = readq(&vpath->vp_reg->rxmac_vcfg0); VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, vid, &data);
if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit: exit:
return status; return status;
} }
/** /**
* vxge_hw_vpath_mcast_disable - Disable multicast addresses. * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle. * @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
* *
* Disable Titan-e multicast addresses. * Adds the given vlan id into the list for this vpath.
* Returns: VXGE_HW_OK - success. * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
* VXGE_HW_ERR_INVALID_HANDLE - Invalid handle * vxge_hw_vpath_vid_get_next
* *
*/ */
enum vxge_hw_status enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
{ {
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
if ((vp == NULL) || (vp->vpath->ringh == NULL)) { if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE; status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit; goto exit;
} }
vpath = vp->vpath; status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
val64 = readq(&vpath->vp_reg->rxmac_vcfg0); VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
}
exit: exit:
return status; return status;
} }
/* /**
* __vxge_hw_vpath_alarm_process - Process Alarms. * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
* @vpath: Virtual Path. * @vp: Vpath handle.
* @skip_alarms: Do not clear the alarms
* *
* Process vpath alarms. * Enable promiscuous mode of Titan-e operation.
* *
* See also: vxge_hw_vpath_promisc_disable().
*/ */
static enum vxge_hw_status enum vxge_hw_status vxge_hw_vpath_promisc_enable(
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, struct __vxge_hw_vpath_handle *vp)
u32 skip_alarms) {
{ u64 val64;
u64 val64; struct __vxge_hw_virtualpath *vpath;
u64 alarm_status; enum vxge_hw_status status = VXGE_HW_OK;
u64 pic_status;
struct __vxge_hw_device *hldev = NULL;
enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
u64 mask64;
struct vxge_hw_vpath_stats_sw_info *sw_stats;
struct vxge_hw_vpath_reg __iomem *vp_reg;
if (vpath == NULL) {
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
alarm_event);
goto out2;
}
hldev = vpath->hldev;
vp_reg = vpath->vp_reg;
alarm_status = readq(&vp_reg->vpath_general_int_status);
if (alarm_status == VXGE_HW_ALL_FOXES) {
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
alarm_event);
goto out;
}
sw_stats = vpath->sw_stats;
if (alarm_status & ~(
VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
sw_stats->error_stats.unknown_alarms++;
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
alarm_event);
goto out;
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
val64 = readq(&vp_reg->xgmac_vp_int_status);
if (val64 &
VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
))) {
sw_stats->error_stats.network_sustained_fault++;
writeq(
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
&vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_device_handle_link_down_ind(hldev);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_LINK_DOWN, alarm_event);
}
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
))) {
sw_stats->error_stats.network_sustained_ok++;
writeq(
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
&vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_device_handle_link_up_ind(hldev);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_LINK_UP, alarm_event);
}
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
if (skip_alarms) if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
return VXGE_HW_OK; status = VXGE_HW_ERR_INVALID_HANDLE;
} goto exit;
} }
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { vpath = vp->vpath;
pic_status = readq(&vp_reg->vpath_ppif_int_status); /* Enable promiscous mode for function 0 only */
if (!(vpath->hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
return VXGE_HW_OK;
if (pic_status & val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
val64 = readq(&vp_reg->general_errors_reg); if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
mask64 = readq(&vp_reg->general_errors_mask);
if ((val64 & val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
~mask64) { VXGE_HW_RXMAC_VCFG0_BCAST_EN |
sw_stats->error_stats.ini_serr_det++; VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
alarm_event = VXGE_HW_SET_LEVEL( writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
VXGE_HW_EVENT_SERR, alarm_event);
} }
exit:
return status;
}
if ((val64 & /**
VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
~mask64) { * @vp: Vpath handle.
sw_stats->error_stats.dblgen_fifo0_overflow++; *
* Disable promiscuous mode of Titan-e operation.
*
* See also: vxge_hw_vpath_promisc_enable().
*/
enum vxge_hw_status vxge_hw_vpath_promisc_disable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
alarm_event = VXGE_HW_SET_LEVEL( if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
VXGE_HW_EVENT_FIFO_ERR, alarm_event); status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
} }
if ((val64 & vpath = vp->vpath;
VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
~mask64)
sw_stats->error_stats.statsb_pif_chain_error++;
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
~mask64)
sw_stats->error_stats.statsb_drop_timeout++;
if ((val64 &
VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
~mask64)
sw_stats->error_stats.target_illegal_access++;
if (!skip_alarms) {
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->general_errors_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
if (pic_status & val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
val64 = readq(&vp_reg->kdfcctl_errors_reg); if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
mask64 = readq(&vp_reg->kdfcctl_errors_mask);
if ((val64 & val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
~mask64) { VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
alarm_event = VXGE_HW_SET_LEVEL( writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
} }
exit:
return status;
}
if ((val64 & /*
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & * vxge_hw_vpath_bcast_enable - Enable broadcast
~mask64) { * @vp: Vpath handle.
sw_stats->error_stats.kdfcctl_fifo0_poison++; *
* Enable receiving broadcasts.
*/
enum vxge_hw_status vxge_hw_vpath_bcast_enable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
alarm_event = VXGE_HW_SET_LEVEL( if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
VXGE_HW_EVENT_FIFO_ERR, status = VXGE_HW_ERR_INVALID_HANDLE;
alarm_event); goto exit;
} }
if ((val64 & vpath = vp->vpath;
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
~mask64) {
sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_FIFO_ERR,
alarm_event);
}
if (!skip_alarms) { val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->kdfcctl_errors_reg);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
} }
exit:
return status;
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { /**
* vxge_hw_vpath_mcast_enable - Enable multicast addresses.
val64 = readq(&vp_reg->wrdma_alarm_status); * @vp: Vpath handle.
*
if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { * Enable Titan-e multicast addresses.
* Returns: VXGE_HW_OK on success.
*
*/
enum vxge_hw_status vxge_hw_vpath_mcast_enable(
struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
val64 = readq(&vp_reg->prc_alarm_reg); if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
mask64 = readq(&vp_reg->prc_alarm_mask); status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& vpath = vp->vpath;
~mask64)
sw_stats->error_stats.prc_ring_bumps++;
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
~mask64) {
sw_stats->error_stats.prc_rxdcm_sc_err++;
alarm_event = VXGE_HW_SET_LEVEL( if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
VXGE_HW_EVENT_VPATH_ERR, val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
alarm_event); writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
} }
exit:
return status;
}
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) /**
& ~mask64) { * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
sw_stats->error_stats.prc_rxdcm_sc_abort++; * @vp: Vpath handle.
*
* Disable Titan-e multicast addresses.
* Returns: VXGE_HW_OK - success.
* VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
*
*/
enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
alarm_event = VXGE_HW_SET_LEVEL( if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
VXGE_HW_EVENT_VPATH_ERR, status = VXGE_HW_ERR_INVALID_HANDLE;
alarm_event); goto exit;
} }
if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) vpath = vp->vpath;
& ~mask64) {
sw_stats->error_stats.prc_quanta_size_err++;
alarm_event = VXGE_HW_SET_LEVEL( val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
VXGE_HW_EVENT_VPATH_ERR,
alarm_event);
}
if (!skip_alarms) { if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
writeq(VXGE_HW_INTR_MASK_ALL, val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
&vp_reg->prc_alarm_reg); writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
alarm_event = VXGE_HW_SET_LEVEL(
VXGE_HW_EVENT_ALARM_CLEARED,
alarm_event);
}
}
} }
out: exit:
hldev->stats.sw_dev_err_stats.vpath_alarms++; return status;
out2:
if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
(alarm_event == VXGE_HW_EVENT_UNKNOWN))
return VXGE_HW_OK;
__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
if (alarm_event == VXGE_HW_EVENT_SERR)
return VXGE_HW_ERR_CRITICAL;
return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
VXGE_HW_ERR_SLOT_FREEZE :
(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
VXGE_HW_ERR_VPATH;
} }
/* /*
......
...@@ -2081,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv { ...@@ -2081,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv {
#endif #endif
}; };
/* ========================= FIFO PRIVATE API ============================= */
struct vxge_hw_fifo_attr;
struct vxge_hw_mempool_cbs { struct vxge_hw_mempool_cbs {
void (*item_func_alloc)( void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh, struct vxge_hw_mempool *mempoolh,
...@@ -2158,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode { ...@@ -2158,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode {
enum vxge_hw_status enum vxge_hw_status
vxge_hw_vpath_mac_addr_add( vxge_hw_vpath_mac_addr_add(
struct __vxge_hw_vpath_handle *vpath_handle, struct __vxge_hw_vpath_handle *vpath_handle,
u8 (macaddr)[ETH_ALEN], u8 *macaddr,
u8 (macaddr_mask)[ETH_ALEN], u8 *macaddr_mask,
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
enum vxge_hw_status enum vxge_hw_status
vxge_hw_vpath_mac_addr_get( vxge_hw_vpath_mac_addr_get(
struct __vxge_hw_vpath_handle *vpath_handle, struct __vxge_hw_vpath_handle *vpath_handle,
u8 (macaddr)[ETH_ALEN], u8 *macaddr,
u8 (macaddr_mask)[ETH_ALEN]); u8 *macaddr_mask);
enum vxge_hw_status enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next( vxge_hw_vpath_mac_addr_get_next(
struct __vxge_hw_vpath_handle *vpath_handle, struct __vxge_hw_vpath_handle *vpath_handle,
u8 (macaddr)[ETH_ALEN], u8 *macaddr,
u8 (macaddr_mask)[ETH_ALEN]); u8 *macaddr_mask);
enum vxge_hw_status enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete( vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vpath_handle, struct __vxge_hw_vpath_handle *vpath_handle,
u8 (macaddr)[ETH_ALEN], u8 *macaddr,
u8 (macaddr_mask)[ETH_ALEN]); u8 *macaddr_mask);
enum vxge_hw_status enum vxge_hw_status
vxge_hw_vpath_vid_add( vxge_hw_vpath_vid_add(
...@@ -2285,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); ...@@ -2285,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
int int
vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
void void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册