提交 42821a5b 编写于 作者: S stephen hemminger 提交者: David S. Miller

vxge: make functions local and remove dead code

Use results of make namespacecheck to make functions local and
remove code that is not used.

Also rename initialize_ethtool_ops to vxge_initialize_ethtool_ops.

Compile tested only.
Signed-off-by: NStephen Hemminger <shemminger@vyatta.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 89ff05ec
此差异已折叠。
......@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
char version[VXGE_HW_FW_STRLEN];
};
u64
__vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg);
/**
* struct vxge_hw_fifo_config - Configuration of fifo.
* @enable: Is this fifo to be commissioned
......@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
u8 hash_type_ipv6ex_en;
};
u32
vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
void vxge_hw_device_debug_set(
struct __vxge_hw_device *devh,
enum vxge_debug_level level,
......@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
u32
vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
/**
* vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
* @buf_mode: Buffer mode (1, 3 or 5)
......@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
struct vxge_hw_fifo_attr fifo_attr;
};
enum vxge_hw_status
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool *blockpool,
u32 pool_size,
u32 pool_max);
void
__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
struct __vxge_hw_blockpool_entry *
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
u32 size);
void
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool_entry *entry);
void *
__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
u32 size,
struct vxge_hw_mempool_dma *dma_object);
void
__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
void *memblock,
u32 size,
struct vxge_hw_mempool_dma *dma_object);
enum vxge_hw_status
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
enum vxge_hw_status
vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
struct vxge_hw_device_config *dev_config, int size);
enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
struct vxge_hw_device_config *device_config);
......@@ -1954,38 +1893,6 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
return vaddr;
}
extern void vxge_hw_blockpool_block_add(
struct __vxge_hw_device *devh,
void *block_addr,
u32 length,
struct pci_dev *dma_h,
struct pci_dev *acc_handle);
static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
unsigned long size)
{
gfp_t flags;
void *vaddr;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
vaddr = kmalloc((size), flags);
vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
}
static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
struct pci_dev **p_dma_acch)
{
unsigned long misaligned = *(unsigned long *)p_dma_acch;
u8 *tmp = (u8 *)vaddr;
tmp -= misaligned;
kfree((void *)tmp);
}
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
......@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
(*memblock_item_idx) * mempool->items_priv_size;
}
enum vxge_hw_status
__vxge_hw_mempool_grow(
struct vxge_hw_mempool *mempool,
u32 num_allocate,
u32 *num_allocated);
struct vxge_hw_mempool*
__vxge_hw_mempool_create(
struct __vxge_hw_device *devh,
u32 memblock_size,
u32 item_size,
u32 private_size,
u32 items_initial,
u32 items_max,
struct vxge_hw_mempool_cbs *mp_callback,
void *userdata);
struct __vxge_hw_channel*
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
enum __vxge_hw_channel_type type, u32 length,
u32 per_dtr_space, void *userdata);
void
__vxge_hw_channel_free(
struct __vxge_hw_channel *channel);
enum vxge_hw_status
__vxge_hw_channel_initialize(
struct __vxge_hw_channel *channel);
enum vxge_hw_status
__vxge_hw_channel_reset(
struct __vxge_hw_channel *channel);
/*
* __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
* for the fifo.
......@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
struct vxge_hw_vpath_attr *attr,
struct __vxge_hw_vpath_handle **vpath_handle);
enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
enum vxge_hw_status vxge_hw_vpath_close(
struct __vxge_hw_vpath_handle *vpath_handle);
......@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 new_mtu);
enum vxge_hw_status vxge_hw_vpath_stats_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
enum vxge_hw_status
__vxge_hw_vpath_stats_access(
struct __vxge_hw_virtualpath *vpath,
u32 operation,
u32 offset,
u64 *stat);
enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
enum vxge_hw_status
__vxge_hw_vpath_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_vpath_stats_hw_info *hw_stats);
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
void
__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg);
enum vxge_hw_status
__vxge_hw_device_register_poll(
void __iomem *reg,
u64 mask, u32 max_millis);
#ifndef readq
static inline u64 readq(void __iomem *addr)
......@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
writel(val, addr);
}
static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
u64 mask, u32 max_millis)
{
enum vxge_hw_status status = VXGE_HW_OK;
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
wmb();
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
wmb();
status = __vxge_hw_device_register_poll(addr, mask, max_millis);
return status;
}
struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0);
enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
void
__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
void
__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_vpath_pci_read(
struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0,
u32 offset,
u32 *val);
enum vxge_hw_status
__vxge_hw_vpath_addr_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN]);
u32
__vxge_hw_vpath_func_id_get(
u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
/**
* vxge_debug
* @level: level of debug verbosity.
......
......@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_ethtool_stats = vxge_get_ethtool_stats,
};
void initialize_ethtool_ops(struct net_device *ndev)
void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
}
......@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
......@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state
* change.
*/
void
static void
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
......@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state
* change.
*/
void
static void
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
......@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
* If the interrupt is because of a received frame or if the receive ring
* contains fresh as yet un-processed frames, this function is called.
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
......@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skb, int *more)
......@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
*
* Enables the interrupts for the vpath
*/
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id = 0;
......@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
*
* Disables the interrupts for the vpath
*/
void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id;
......@@ -1553,7 +1563,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
*
* driver may reset the chip on events of serr, eccerr, etc
*/
int vxge_reset(struct vxgedev *vdev)
static int vxge_reset(struct vxgedev *vdev)
{
return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
......@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
......@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
}
/* Add a mac address to DA table */
enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
......@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
return status;
}
int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
......@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
return FALSE;
}
/* delete a mac address from DA table */
enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
......@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
}
/* Store all vlan ids from the list to the vid table */
enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
......@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
}
/* Store all mac addresses from the list to the DA table */
enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
......@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
}
/* reset vpaths */
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
......@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
}
/* close vpaths */
void vxge_close_vpaths(struct vxgedev *vdev, int index)
static void vxge_close_vpaths(struct vxgedev *vdev, int index)
{
struct vxge_vpath *vpath;
int i;
......@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
}
/* open vpaths */
int vxge_open_vpaths(struct vxgedev *vdev)
static int vxge_open_vpaths(struct vxgedev *vdev)
{
struct vxge_hw_vpath_attr attr;
enum vxge_hw_status status;
......@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
int
static int
vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
......@@ -2721,7 +2733,7 @@ vxge_open(struct net_device *dev)
}
/* Loop throught the mac address list and delete all the entries */
void vxge_free_mac_add_list(struct vxge_vpath *vpath)
static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
{
struct list_head *entry, *next;
......@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
}
}
int do_vxge_close(struct net_device *dev, int do_io)
static int do_vxge_close(struct net_device *dev, int do_io)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
......@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
int
static int
vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
......@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct vxge_config *config,
int high_dma, int no_of_vpath,
struct vxgedev **vdev_out)
static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct vxge_config *config,
int high_dma, int no_of_vpath,
struct vxgedev **vdev_out)
{
struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK;
......@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
initialize_ethtool_ops(ndev);
vxge_initialize_ethtool_ops(ndev);
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
......@@ -3249,7 +3261,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
*
* This function will unregister and free network device
*/
void
static void
vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
......
......@@ -396,64 +396,7 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \
} while (0);
int __devinit vxge_device_register(struct __vxge_hw_device *devh,
struct vxge_config *config,
int high_dma, int no_of_vpath,
struct vxgedev **vdev);
void vxge_device_unregister(struct __vxge_hw_device *devh);
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
void vxge_callback_link_up(struct __vxge_hw_device *devh);
void vxge_callback_link_down(struct __vxge_hw_device *devh);
enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
int vxge_reset(struct vxgedev *vdev);
enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata);
enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skbs, int *more);
int vxge_close(struct net_device *dev);
int vxge_open(struct net_device *dev);
void vxge_close_vpaths(struct vxgedev *vdev, int index);
int vxge_open_vpaths(struct vxgedev *vdev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
int vxge_mac_list_add(struct vxge_vpath *vpath,
struct macInfo *mac);
void vxge_free_mac_add_list(struct vxge_vpath *vpath);
enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
int do_vxge_close(struct net_device *dev, int do_io);
extern void initialize_ethtool_ops(struct net_device *ndev);
extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
......
......@@ -17,6 +17,13 @@
#include "vxge-config.h"
#include "vxge-main.h"
static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
u32 vp_id, enum vxge_hw_event type);
static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms);
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
......@@ -513,7 +520,7 @@ enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
......@@ -538,7 +545,7 @@ __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
......@@ -564,7 +571,7 @@ __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
*
* Handle error.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
......@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
* it swaps the reserve and free arrays.
*
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
......@@ -692,7 +699,8 @@ vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
* Posts a dtr to work array.
*
*/
void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
......@@ -1657,37 +1665,6 @@ vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
return status;
}
/**
* vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
* from vlan id table.
* @vp: Vpath handle.
* @vid: Buffer to return vlan id
*
* Returns the next vlan id in the list for this vpath.
* see also: vxge_hw_vpath_vid_get
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
{
u64 data;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, vid, &data);
*vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
exit:
return status;
}
/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
......@@ -1898,9 +1875,9 @@ vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
* Process vpath alarms.
*
*/
enum vxge_hw_status __vxge_hw_vpath_alarm_process(
struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms)
static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms)
{
u64 val64;
u64 alarm_status;
......@@ -2264,36 +2241,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
}
/**
* vxge_hw_vpath_msix_clear - Clear MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
*
* The function clears the msix interrupt for the given msix_id
*
* Returns: 0,
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
* status.
* See also:
*/
void
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
if (hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clr_msix_one_shot_vec[msix_id%4]);
} else {
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clear_msix_mask_vect[msix_id%4]);
}
}
/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
......@@ -2315,22 +2262,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
}
/**
* vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
* @vp: Virtual Path handle.
*
* The function masks all msix interrupt for the given vpath
*
*/
void
vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
{
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
&vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
}
/**
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
* @vp: Virtual Path handle.
......
......@@ -1748,14 +1748,6 @@ vxge_hw_mrpcim_stats_access(
u32 offset,
u64 *stat);
enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats);
enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
struct vxge_hw_xmac_port_stats *port_stats);
enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
struct vxge_hw_xmac_stats *xmac_stats);
......@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
#endif
};
/* ========================= RING PRIVATE API ============================= */
u64
__vxge_hw_ring_first_block_address_get(
struct __vxge_hw_ring *ringh);
enum vxge_hw_status
__vxge_hw_ring_create(
struct __vxge_hw_vpath_handle *vpath_handle,
struct vxge_hw_ring_attr *attr);
enum vxge_hw_status
__vxge_hw_ring_abort(
struct __vxge_hw_ring *ringh);
enum vxge_hw_status
__vxge_hw_ring_reset(
struct __vxge_hw_ring *ringh);
enum vxge_hw_status
__vxge_hw_ring_delete(
struct __vxge_hw_vpath_handle *vpath_handle);
/* ========================= FIFO PRIVATE API ============================= */
struct vxge_hw_fifo_attr;
enum vxge_hw_status
__vxge_hw_fifo_create(
struct __vxge_hw_vpath_handle *vpath_handle,
struct vxge_hw_fifo_attr *attr);
enum vxge_hw_status
__vxge_hw_fifo_abort(
struct __vxge_hw_fifo *fifoh);
enum vxge_hw_status
__vxge_hw_fifo_reset(
struct __vxge_hw_fifo *ringh);
enum vxge_hw_status
__vxge_hw_fifo_delete(
struct __vxge_hw_vpath_handle *vpath_handle);
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
......@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
u32 is_last);
};
void
__vxge_hw_mempool_destroy(
struct vxge_hw_mempool *mempool);
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
......@@ -2194,62 +2143,11 @@ __vxge_hw_vpath_rts_table_set(
u64 data1,
u64 data2);
enum vxge_hw_status
__vxge_hw_vpath_reset(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_sw_reset(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_enable(
struct __vxge_hw_device *devh,
u32 vp_id);
void
__vxge_hw_vpath_prc_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_mac_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_tim_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_initialize(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vp_initialize(
struct __vxge_hw_device *devh,
u32 vp_id,
struct vxge_hw_vp_config *config);
void
__vxge_hw_vp_terminate(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_alarm_process(
struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms);
void vxge_hw_device_intr_enable(
struct __vxge_hw_device *devh);
......@@ -2320,11 +2218,6 @@ vxge_hw_vpath_vid_get(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 *vid);
enum vxge_hw_status
vxge_hw_vpath_vid_get_next(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 *vid);
enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
......@@ -2386,17 +2279,10 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
void
vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
enum vxge_hw_status vxge_hw_vpath_intr_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
......@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
......@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
/* ========================== PRIVATE API ================================= */
enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
enum vxge_hw_event type);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册