未验证 提交 ea365acb 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!570 Net: m1600: Support nebula-matrix m1600-series network card

Merge Pull Request from: @nebula_matrix 
 
Add m1600-driver for nebula-matrix m1600 series smart NIC.

M1600-NIC is a series of network interface card for the Data Center Area.
The driver supports link-speed 10GbE. M1600 devices support SR-IOV.This
driver is used for both of Physical Function(PF) and Virtual Function(VF).
M1600 devices support MSI-X interrupt vector for each Tx/Rx queue and
interrupt moderation. M1600 devices support also various offload features
such as checksum offload, Receive-Side Scaling(RSS)

 
 
Link:https://gitee.com/openeuler/kernel/pulls/570 

Reviewed-by: Liu Chao <liuchao173@huawei.com> 
Reviewed-by: Jialin Zhang <zhangjialin11@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
.. SPDX-License-Identifier: GPL-2.0
============================================================
Linux Base Driver for Nebula-matrix M1600-NIC family
============================================================
Overview:
=========
M1600-NIC is a series of network interface card for the Data Center Area.
The driver supports link-speed 10GbE.
M1600 devices support SR-IOV. This driver is used for both of Physical
Function(PF) and Virtual Function(VF).
M1600 devices support MSI-X interrupt vector for each Tx/Rx queue and
interrupt moderation.
M1600 devices support also various offload features such as checksum offload,
Receive-Side Scaling(RSS).
Supported PCI vendor ID/device IDs:
===================================
1f0f:1600 - M1600-Nic PF
1f0f:1601 - M1600-Nic VF
ethtool tool support
====================
Obtain basic information of the network card:
ethtool -i enp130s0f0
Get network card ring parameters:
ethtool -g enp130s0f0
Set the ring parameter:
ethtool -G enp130s0f0 rx 1024 tx 1024
View statistics:
ethtool -S enp130s0f0
Viewing Optical Module Information:
ethtool -m enp130s0f0
Support
=======
For more information about M1600-NIC, please visit the following URL:
https://www.nebula-matrix.com/
If an issue is identified with the released source code on the supported kernel
with a supported adapter, email the specific information related to the issue to
open@nebula-matrix.com.
......@@ -2893,6 +2893,8 @@ CONFIG_SMSC9420=m
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_NET_VENDOR_NEBULA_MATRIX=y
CONFIG_M1600=m
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_NET_SB1000 is not set
......
......@@ -2864,6 +2864,8 @@ CONFIG_SFC_MCDI_LOGGING=y
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_NET_VENDOR_NEBULA_MATRIX=y
CONFIG_M1600=m
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_NET_SB1000 is not set
......
......@@ -183,5 +183,6 @@ source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
source "drivers/net/ethernet/nebula-matrix/Kconfig"
endif # ETHERNET
......@@ -96,3 +96,4 @@ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
obj-$(CONFIG_NET_VENDOR_NETSWIFT) += netswift/
obj-$(CONFIG_NET_VENDOR_NEBULA_MATRIX) += nebula-matrix/
# SPDX-License-Identifier: GPL-2.0
#
# Nebula-matrix network device configuration
#
config NET_VENDOR_NEBULA_MATRIX
bool "Nebula-matrix devices"
default y
help
If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Nebual-matrix cards. If you say Y, you will be
asked for your specific card in the following questions.
if NET_VENDOR_NEBULA_MATRIX
config M1600
tristate "Nebula-matrix Ethernet Controller m1600 Family support"
depends on PCI
depends on ARM64 || X86_64
default m
help
This driver supports Nebula-matrix Ethernet Controller m1600 Family of
devices. For more information about this product, go to the product
description with smart NIC:
<http://www.nebula-matrix.com>
More specific information on configuring the driver is in
<file:Documentation/networking/device_drivers/ethernet/nebula-matrix/m1600.rst>.
To compile this driver as a module, choose M here. The module
will be called m1600.
endif # NET_VENDOR_NEBULA_MATRIX
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Nebula-matrix network device drivers.
#
obj-$(CONFIG_M1600) += m1600/
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022 Nebula Matrix Limited.
# Author: Monte Song <monte.song@nebula-matrix.com>
ccflags-y += -DCONFIG_NBL_DEBUGFS
obj-$(CONFIG_M1600) += m1600.o
m1600-y += main.o \
ethtool.o \
common.o \
interrupt.o \
txrx.o \
mailbox.o \
debug.o \
hwmon.o \
macvlan.o \
sriov.o
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0*/
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#ifndef _NBL_COMMON_H_
#define _NBL_COMMON_H_
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/version.h>
#include <linux/io.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/debugfs.h>
#include "hw.h"
#define NBL_X4_DRIVER_NAME "m1600"
#define NBL_X4_DRIVER_VERSION "2.1.2"
#define NBL_MAILBOX_QUEUE_LEN 256
#define NBL_MAILBOX_BUF_LEN 4096
#define NBL_REG_WRITE_MAX_TRY_TIMES 5
#define NBL_LED_FLICKER_FREQUENCY (2)
#define NBL_MAX_JUMBO_FRAME_SIZE (15872)
#define NBL_MAX_FRAME_SIZE (10000)
#define NBL_MIN_FRAME_SIZE (64)
#define NBL_MAX_MTU 9600
#define NBL_VLAN_HLEN 4
#define NBL_URMUX_MAX_PKT_LEN 10000
#define NBL_MODULE_SPEED_NOT_SUPPORT 0
#define NBL_MODULE_SPEED_1G BIT(0)
#define NBL_MODULE_SPEED_10G BIT(1)
struct nbl_mailbox_buf {
void *va;
dma_addr_t pa;
size_t size;
};
struct nbl_mailbox_tx_desc {
u16 flags;
u16 srcid;
u16 dstid;
u16 data_len;
u16 buf_len;
u64 buf_addr;
u16 msg_type;
u8 data[16];
u8 rsv[28];
} __packed;
struct nbl_mailbox_rx_desc {
u16 flags;
u32 buf_len;
u16 buf_id;
u64 buf_addr;
} __packed;
struct nbl_mailbox_ring {
void *desc;
struct nbl_mailbox_buf *buf;
u16 next_to_use;
u16 tail_ptr;
u16 next_to_clean;
dma_addr_t dma;
};
#define NBL_STRING_NAME_LEN 32
struct nbl_mailbox_info {
struct nbl_mailbox_ring txq;
struct nbl_mailbox_ring rxq;
/* For mailbox txq */
spinlock_t txq_lock;
/* For send msg */
struct mutex send_normal_msg_lock;
int acked;
int ack_err;
unsigned int ack_req_msg_type;
char *ack_data;
u16 ack_data_len;
u16 num_txq_entries;
u16 num_rxq_entries;
u16 txq_buf_size;
u16 rxq_buf_size;
char name[NBL_STRING_NAME_LEN];
};
struct nbl_msix_map_table {
struct nbl_msix_map *base_addr;
dma_addr_t dma;
size_t size;
};
struct nbl_func_res {
u8 num_txrx_queues;
u8 *txrx_queues;
u16 num_interrupts;
u16 *interrupts;
struct nbl_msix_map_table msix_map_table;
u16 macvlan_start_index;
u16 num_macvlan_entries;
u8 eth_port_id;
u8 mac_addr[ETH_ALEN];
s16 vlan_ids[NBL_PF_MAX_MACVLAN_ENTRIES];
};
enum nbl_func_type {
NBL_X4_AF,
NBL_X4_PF,
NBL_X4_VF,
};
struct nbl_fc_info {
u32 rx_pause;
u32 tx_pause;
};
struct nbl_hw_stats {
u64 tx_total_packets;
u64 tx_total_good_packets;
u64 rx_total_packets;
u64 rx_total_good_packets;
u64 tx_bad_fcs;
u64 rx_bad_fcs;
u64 tx_total_bytes;
u64 tx_total_good_bytes;
u64 rx_total_bytes;
u64 rx_total_good_bytes;
u64 tx_frame_error;
u64 tx_unicast;
u64 tx_multicast;
u64 tx_broadcast;
u64 tx_vlan;
u64 tx_fc_pause;
u64 rx_oversize;
u64 rx_undersize;
u64 rx_frame_err;
u64 rx_bad_code;
u64 rx_unicast;
u64 rx_multicast;
u64 rx_broadcast;
u64 rx_vlan;
u64 rx_fc_pause;
};
struct nbl_stats {
/* for nbl status consistent */
struct mutex lock;
u64 tx_total_packets;
u64 tx_total_good_packets;
u64 tx_total_bytes;
u64 tx_total_good_bytes;
u64 tx_error_packets;
u64 tx_bad_fcs;
u64 tx_frame_error;
u64 tx_unicast;
u64 tx_multicast;
u64 tx_broadcast;
u64 tx_vlan;
u64 tx_fc_pause;
u64 rx_total_packets;
u64 rx_total_good_packets;
u64 rx_total_bytes;
u64 rx_total_good_bytes;
u64 rx_error_packets;
u64 rx_bad_fcs;
u64 rx_oversize;
u64 rx_undersize;
u64 rx_frame_err;
u64 rx_bad_code;
u64 rx_unicast;
u64 rx_multicast;
u64 rx_broadcast;
u64 rx_vlan;
u64 rx_fc_pause;
u64 tx_busy;
u64 tx_linearize;
u64 tx_timeout;
u64 tx_csum_pkts;
u64 rx_csum_pkts;
u64 tx_dma_err;
u64 alloc_page_failed;
u64 alloc_skb_failed;
u64 rx_dma_err;
u64 err_status_reset;
u64 bad_code_reset;
};
struct nbl_vf_bar_info {
u64 vf_bar_start;
u64 vf_bar_len;
};
struct nbl_af_res_info {
/* For function resource */
spinlock_t func_res_lock;
DECLARE_BITMAP(interrupt_bitmap, NBL_MAX_INTERRUPT);
DECLARE_BITMAP(txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE);
struct nbl_qid_map qid_map_table[NBL_QID_MAP_TABLE_ENTRIES];
int qid_map_ready;
int qid_map_select;
struct nbl_func_res *res_record[NBL_MAX_FUNC];
struct nbl_vf_bar_info vf_bar_info[NBL_MAX_PF_FUNC];
u8 forward_ring_index;
atomic_t eth_port_tx_refcount[NBL_ETH_PORT_NUM];
atomic_t eth_port_rx_refcount[NBL_ETH_PORT_NUM];
};
struct nbl_hw {
u8 __iomem *hw_addr;
void *back;
u8 function;
u8 devid;
u8 bus;
enum nbl_func_type func_type;
u8 vsi_id;
u8 eth_port_id;
u8 __iomem *msix_bar_hw_addr;
bool module_inplace;
u8 module_support_speed;
u8 __iomem *mailbox_bar_hw_addr;
struct nbl_mailbox_info mailbox;
struct nbl_af_res_info *af_res;
struct nbl_fc_info fc;
struct nbl_hw_stats hw_stats;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
/* debugfs */
struct dentry *nbl_debug_root;
int debugfs_reg_bar;
long debugfs_reg_offset;
long debugfs_reg_length;
};
enum nbl_adapter_state {
NBL_DOWN,
NBL_MAILBOX_READY,
NBL_MAILBOX_EVENT_PENDING,
NBL_RESETTING,
NBL_RESET_REQUESTED,
NBL_PROMISC,
NBL_STATE_NBITS,
};
struct nbl_healing_var {
u64 former_bad_code;
int bad_code_increase;
int status_chk_timer;
};
struct nbl_adapter {
struct nbl_hw hw;
struct pci_dev *pdev;
struct net_device *netdev;
u8 num_txq;
u8 num_rxq;
u16 tx_desc_num;
u16 rx_desc_num;
struct msix_entry *msix_entries;
u16 num_lan_msix;
u16 num_mailbox_msix;
struct nbl_ring **tx_rings;
struct nbl_ring **rx_rings;
u16 num_q_vectors;
struct nbl_q_vector **q_vectors;
DECLARE_BITMAP(state, NBL_STATE_NBITS);
unsigned long serv_timer_period;
struct timer_list serv_timer;
struct work_struct serv_task1;
struct work_struct serv_task2;
struct nbl_stats stats;
struct nbl_healing_var healing_var;
struct device *hwmon_dev;
u32 msg_enable;
u32 flags;
};
static inline bool is_af(struct nbl_hw *hw)
{
return hw->func_type == NBL_X4_AF;
}
static inline bool is_vf(struct nbl_hw *hw)
{
return hw->func_type == NBL_X4_VF;
}
#define nbl_adapter_to_dev(adapter) (&((adapter)->pdev->dev))
#define nbl_hw_to_dev(hw) nbl_adapter_to_dev((struct nbl_adapter *)((hw)->back))
#define wr32(hw, reg, value) writel((value), ((hw)->hw_addr + (reg)))
#define rd32(hw, reg) readl((hw)->hw_addr + (reg))
#define wr32_for_each(hw, reg, value, size) \
do { \
int __n; \
for (__n = 0; __n < (size); __n += 4) \
wr32((hw), (reg) + __n, (value)[__n / 4]); \
} while (0)
#define rd32_for_each(hw, reg, value, size) \
do { \
int __n; \
for (__n = 0; __n < (size); __n += 4) \
(value)[__n / 4] = rd32((hw), (reg) + __n); \
} while (0)
#define wr32_zero_for_each(hw, reg, size) \
do { \
int __n; \
for (__n = 0; __n < (size); __n += 4) \
wr32((hw), (reg) + __n, 0); \
} while (0)
#define NBL_WRITE_VERIFY_MAX_TIMES (5)
static inline void wr32_and_verify(struct nbl_hw *hw, u64 reg, u32 value)
{
u32 read_value;
int i = 0;
while (likely(i < NBL_WRITE_VERIFY_MAX_TIMES)) {
wr32(hw, reg, value);
read_value = rd32(hw, reg);
if (read_value == value)
return;
i++;
}
pr_err("Write to register addr %llx failed\n", reg);
}
#define mb_wr32(hw, reg, value) writel((value), ((hw)->mailbox_bar_hw_addr + (reg)))
#define mb_rd32(hw, reg) readl((hw)->mailbox_bar_hw_addr + (reg))
#define mb_wr32_for_each(hw, reg, value, size) \
do { \
int __n; \
for (__n = 0; __n < (size); __n += 4) \
mb_wr32((hw), (reg) + __n, (value)[__n / 4]); \
} while (0)
#define mb_rd32_for_each(hw, reg, value, size) \
do { \
int __n; \
for (__n = 0; __n < (size); __n += 4) \
(value)[__n / 4] = mb_rd32((hw), (reg) + __n); \
} while (0)
#define msix_wr32(hw, reg, value) writel((value), ((hw)->msix_bar_hw_addr + (reg)))
void nbl_service_task1_schedule(struct nbl_adapter *adapter);
void nbl_service_task_schedule(struct nbl_adapter *adapter);
void nbl_firmware_init(struct nbl_hw *hw);
void nbl_af_configure_captured_packets(struct nbl_hw *hw);
void nbl_af_clear_captured_packets_conf(struct nbl_hw *hw);
u32 nbl_af_get_firmware_version(struct nbl_hw *hw);
int nbl_af_res_mng_init(struct nbl_hw *hw);
void nbl_af_free_res(struct nbl_hw *hw);
void nbl_af_compute_bdf(struct nbl_hw *hw, u16 func_id,
u8 *bus, u8 *devid, u8 *function);
bool nbl_check_golden_version(struct nbl_hw *hw);
int nbl_af_configure_func_msix_map(struct nbl_hw *hw, u16 func_id, u16 requested);
void nbl_af_destroy_func_msix_map(struct nbl_hw *hw, u16 func_id);
int nbl_configure_msix_map(struct nbl_hw *hw);
void nbl_destroy_msix_map(struct nbl_hw *hw);
int nbl_af_configure_qid_map(struct nbl_hw *hw, u16 func_id, u8 num_queues, u64 notify_addr);
void nbl_af_clear_qid_map(struct nbl_hw *hw, u16 func_id, u64 notify_addr);
int nbl_get_vsi_id(struct nbl_hw *hw);
void nbl_af_register_vf_bar_info(struct nbl_hw *hw, u16 func_id,
u64 vf_bar_start, u64 vf_bar_len);
int nbl_register_vf_bar_info(struct nbl_hw *hw);
u64 nbl_af_compute_vf_bar_base_addr(struct nbl_hw *hw, u16 func_id);
int nbl_configure_notify_addr(struct nbl_hw *hw);
void nbl_clear_notify_addr(struct nbl_hw *hw);
void nbl_af_disable_promisc(struct nbl_hw *hw, u8 eth_port_id);
void nbl_disable_promisc(struct nbl_hw *hw);
void nbl_af_enable_promisc(struct nbl_hw *hw, u8 eth_port_id);
void nbl_enable_promisc(struct nbl_hw *hw);
void nbl_af_configure_ingress_eth_port_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id);
void nbl_af_configure_src_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id);
void nbl_af_configure_dest_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id);
void nbl_datapath_init(struct nbl_hw *hw);
int nbl_af_get_board_info(struct nbl_hw *hw, u8 eth_port_id, union nbl_board_info *board_info);
bool nbl_af_query_link_status(struct nbl_hw *hw, u8 eth_port_id);
bool nbl_query_link_status(struct nbl_hw *hw);
void nbl_query_link_status_subtask(struct nbl_adapter *adapter);
void nbl_af_set_pauseparam(struct nbl_hw *hw, u8 eth_port_id, struct nbl_fc_info fc);
void nbl_af_write_mac_to_logic(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr);
void nbl_write_mac_to_logic(struct nbl_hw *hw, u8 *mac_addr);
void nbl_af_init_pkt_len_limit(struct nbl_hw *hw, u8 eth_port_id,
struct nbl_pkt_len_limit pkt_len_limit);
void nbl_init_pkt_len_limit(struct nbl_hw *hw);
int nbl_af_get_eth_stats(struct nbl_hw *hw, u8 eth_port_id, struct nbl_hw_stats *hw_stats);
void nbl_update_stats_subtask(struct nbl_adapter *adapter);
void nbl_init_hw_stats(struct nbl_hw *hw);
void nbl_reset_subtask(struct nbl_adapter *adapter);
int nbl_stop(struct net_device *netdev);
int nbl_open(struct net_device *netdev);
void nbl_do_reset(struct nbl_adapter *adapter);
enum NBL_MODULE_INPLACE_STATUS nbl_af_check_module_inplace(struct nbl_hw *hw, u8 eth_port_id);
int nbl_af_config_module_speed(struct nbl_hw *hw, u8 target_speed, u8 eth_port_id);
void nbl_set_module_speed(struct nbl_hw *hw, u8 target_speed);
void nbl_af_configure_fc_cplh_up_th(struct nbl_hw *hw);
u32 nbl_af_get_rxlos(struct nbl_hw *hw, u8 eth_port_id);
void nbl_af_reset_eth(struct nbl_hw *hw, u8 eth_port_id);
#ifdef CONFIG_NBL_DEBUGFS
void nbl_debugfs_init(void);
void nbl_debugfs_exit(void);
void nbl_debugfs_hw_init(struct nbl_hw *hw);
void nbl_debugfs_hw_exit(struct nbl_hw *hw);
#else
static inline void nbl_debugfs_init(void) {}
static inline void nbl_debugfs_exit(void) {}
static inline void nbl_debugfs_hw_init(struct nbl_hw *hw) {}
static inline void nbl_debugfs_hw_exit(struct nbl_hw *hw) {}
#endif
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: David Miao <david.miao@nebula-matrix.com>
*/
#ifdef CONFIG_NBL_DEBUGFS
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "hw.h"
#include "common.h"
#include "ethtool.h"
#include "interrupt.h"
#include "txrx.h"
#include "mailbox.h"
#include "hwmon.h"
static struct dentry *nblx4_debug_root;
#define SINGLE_FOPS_RW(_fops_, _open_, _write_) \
static const struct file_operations _fops_ = { \
.open = _open_, \
.write = _write_, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = seq_release, \
}
#define SINGLE_FOPS_RO(_fops_, _open_) \
static const struct file_operations _fops_ = { \
.open = _open_, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = seq_release, \
}
/* dvn */
static int dvn_seq_show(struct seq_file *m, void *v)
{
int i, j;
struct nbl_hw *hw = m->private;
struct tx_queue_info q;
struct nbl_tx_queue_stat qs;
for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) {
rd32_for_each(hw, NBL_DVN_QUEUE_INFO_ARR(i),
(u32 *)&q, sizeof(struct tx_queue_info));
seq_printf(m, "QueueID: %03d - ", i);
for (j = 0; j < sizeof(struct tx_queue_info) / sizeof(u32); j++)
seq_printf(m, "%08X ", ((u32 *)&q)[j]);
seq_printf(m, "size:%d ", q.log2_size);
seq_printf(m, "vsi_idx:%d ", q.src_vsi_idx);
seq_printf(m, "pri:%d ", q.priority);
seq_printf(m, "en:%d ", q.enable);
seq_printf(m, "tail_ptr:%d ", q.tail_ptr);
seq_printf(m, "head_ptr:%d\n", q.head_ptr);
}
seq_puts(m, "\n");
seq_puts(m, "=== statistics ===\n");
for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) {
rd32_for_each(hw, NBL_DVN_QUEUE_STAT_REG_ARR(i),
(u32 *)&qs, sizeof(struct nbl_tx_queue_stat));
seq_printf(m, "QueueID: %03d - ", i);
seq_printf(m, "pkt_get: %d ", qs.pkt_get);
seq_printf(m, "pkt_out: %d ", qs.pkt_out);
seq_printf(m, "pkt_drop: %d ", qs.pkt_drop);
seq_printf(m, "sw_notify: %d ", qs.sw_notify);
seq_printf(m, "pkt_dsch: %d ", qs.pkt_dsch);
seq_printf(m, "hd_notify: %d ", qs.hd_notify);
seq_printf(m, "hd_notify_empty: %d\n", qs.hd_notify_empty);
}
return 0;
}
static int debugfs_dvn_open(struct inode *inode, struct file *file)
{
return single_open(file, dvn_seq_show, inode->i_private);
}
SINGLE_FOPS_RO(dvn_fops, debugfs_dvn_open);
/* uvn */
#define TABLE_UVN_ATTR(n, b, l) \
{ .name = n, .base = NBL_UVN_MODULE + (b), .len = l, }
static struct uvn_table {
char *name;
long base;
int len;
} tables[] = {
TABLE_UVN_ATTR("rd_diff_err_state", 0x2000, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_pkt_drop", 0x3000, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_desc_no_available", 0x3200, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_pkt_in_cnt", 0x3400, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_pkt_out_cnt", 0x3600, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_desc_rd_cnt", 0x3800, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_desc_wb_cnt", 0x3A00, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_notify_cnt", 0x3C00, NBL_MAX_TXRX_QUEUE),
TABLE_UVN_ATTR("queue_desc_merge_cnt", 0x3E00, NBL_MAX_TXRX_QUEUE),
};
static int uvn_seq_show(struct seq_file *m, void *v)
{
int i, j;
struct nbl_hw *hw = m->private;
struct rx_queue_info q;
for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) {
rd32_for_each(hw, NBL_UVN_QUEUE_INFO_ARR(i),
(u32 *)&q, sizeof(struct rx_queue_info));
seq_printf(m, "QueueID: %03d - ", i);
for (j = 0; j < sizeof(struct rx_queue_info) / sizeof(u32); j++)
seq_printf(m, "%08X ", ((u32 *)&q)[j]);
seq_printf(m, "size:%d ", q.log2_size);
seq_printf(m, "buf_len:%d ", q.buf_length_pow);
seq_printf(m, "en:%d ", q.enable);
seq_printf(m, "tail_ptr:%d ", q.tail_ptr);
seq_printf(m, "head_ptr:%d\n", q.head_ptr);
}
seq_puts(m, "\n");
#define LINE_RECORD_NUM 8
for (i = 0; i < ARRAY_SIZE(tables); i++) {
seq_printf(m, "=== %s ===\n", tables[i].name);
for (j = 0; j < tables[i].len; j++) {
if (j % LINE_RECORD_NUM == 0)
seq_printf(m, "QueueID %03d:", j);
seq_printf(m, " %d", rd32(hw, tables[i].base + j * 4));
if (((j + 1) % LINE_RECORD_NUM == 0) || ((j + 1) == LINE_RECORD_NUM))
seq_puts(m, "\n");
}
if ((i + 1) != ARRAY_SIZE(tables))
seq_puts(m, "\n");
}
return 0;
}
static int debugfs_uvn_open(struct inode *inode, struct file *file)
{
return single_open(file, uvn_seq_show, inode->i_private);
}
SINGLE_FOPS_RO(uvn_fops, debugfs_uvn_open);
/* nic statistics */
static int nic_statistics_seq_show(struct seq_file *m, void *v)
{
int epid;
struct nbl_hw *hw = m->private;
WARN_ON(!is_af(hw));
for (epid = 0; epid < 4; epid++) {
seq_printf(m, "======== port %d ========\n", epid);
/* tx */
seq_printf(m, "tx_total_packets=%lld\n",
((u64)rd32(hw, NBL_ETH_TX_TOTAL_PKT_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_TX_TOTAL_PKT_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "tx_total_bytes=%lld\n",
((u64)rd32(hw, NBL_ETH_TX_TOTAL_BYTES_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_TX_TOTAL_BYTES_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "tx_total_good_packets=%lld\n",
((u64)rd32(hw, NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "tx_frame_error=%lld\n",
((u64)rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "tx_bad_fcs=%lld\n",
((u64)rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_bad_code=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_BADCODE_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_BADCODE_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_puts(m, "-----\n");
/* rx */
seq_printf(m, "rx_total_packets=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_TOTAL_PKT_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_TOTAL_PKT_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_total_bytes=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_TOTAL_BYTES_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_TOTAL_BYTES_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_total_good_packets=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_total_good_bytes=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_frame_err=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_bad_fcs=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_oversize=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_OVERSIZE_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_OVERSIZE_CNT_H_REG(epid)) &
0xFFFF) << 32)));
seq_printf(m, "rx_undersize=%lld\n",
((u64)rd32(hw, NBL_ETH_RX_UNDERSIZE_CNT_L_REG(epid)) |
(((u64)rd32(hw, NBL_ETH_RX_UNDERSIZE_CNT_H_REG(epid)) &
0xFFFF) << 32)));
if (epid != 3)
seq_puts(m, "\n");
}
return 0;
}
static int debugfs_nic_statistics_open(struct inode *inode, struct file *file)
{
return single_open(file, nic_statistics_seq_show, inode->i_private);
}
SINGLE_FOPS_RO(nic_statistics_fops, debugfs_nic_statistics_open);
/* ring */
static int ring_seq_show(struct seq_file *m, void *v)
{
int i, j, n;
struct nbl_rx_desc *rx_desc;
struct nbl_tx_desc *tx_desc;
struct nbl_ring *ring = m->private;
seq_printf(m, "size=%d\n", ring->size);
seq_printf(m, "dma=0x%llX\n", (unsigned long long)ring->dma);
seq_printf(m, "desc=0x%llX\n", (unsigned long long)ring->desc);
seq_printf(m, "desc_num=%d\n", ring->desc_num);
seq_printf(m, "local_qid=%d\n", ring->local_qid);
seq_printf(m, "queue_index=%d\n", ring->queue_index);
seq_printf(m, "notify_addr=0x%llX\n",
(unsigned long long)ring->notify_addr);
seq_printf(m, "buf_len=%d\n", ring->buf_len);
seq_printf(m, "next_to_use=%d\n", ring->next_to_use);
seq_printf(m, "next_to_clean=%d\n", ring->next_to_clean);
seq_printf(m, "next_to_alloc=%d\n", ring->next_to_alloc);
seq_printf(m, "tail_ptr=%d\n", ring->tail_ptr);
if (!ring->desc) {
seq_puts(m, "[Unallocated]\n");
return 0;
}
if (ring->local_qid & 1) {
tx_desc = (struct nbl_tx_desc *)ring->desc;
n = sizeof(struct nbl_tx_desc) / sizeof(u32);
for (i = 0; i < ring->desc_num; i++) {
seq_printf(m, "[desc-%03d]: ", i);
for (j = 0; j < n; j++)
seq_printf(m, "%08X ", ((u32 *)tx_desc)[j]);
seq_printf(m, "dlen:%d ", tx_desc->data_len);
seq_printf(m, "plen:%d ", tx_desc->pkt_len);
seq_printf(m, "dd:%d ", tx_desc->dd);
seq_printf(m, "eop:%d ", tx_desc->eop);
seq_printf(m, "sop:%d ", tx_desc->sop);
seq_printf(m, "fwd:%d ", tx_desc->fwd);
seq_printf(m, "dp:%d ", tx_desc->dport);
seq_printf(m, "dpi:%d ", tx_desc->dport_id);
seq_printf(m, "l3c:%d ", tx_desc->l3_checksum);
seq_printf(m, "l4c:%d ", tx_desc->l4_checksum);
seq_printf(m, "rsslag:%d ", tx_desc->rss_lag);
seq_printf(m, "l3_off:%d\n", tx_desc->l3_start_offset);
tx_desc++;
}
} else {
rx_desc = (struct nbl_rx_desc *)ring->desc;
n = sizeof(struct nbl_rx_desc) / sizeof(u32);
for (i = 0; i < ring->desc_num; i++) {
seq_printf(m, "[desc-%03d]: ", i);
for (j = 0; j < n; j++)
seq_printf(m, "%08X ", ((u32 *)rx_desc)[j]);
seq_printf(m, "dlen:%d ", rx_desc->data_len);
seq_printf(m, "dd:%d ", rx_desc->dd);
seq_printf(m, "eop:%d ", rx_desc->eop);
seq_printf(m, "sop:%d ", rx_desc->sop);
seq_printf(m, "fwd:%d ", rx_desc->fwd);
seq_printf(m, "sp:%d ", rx_desc->sport);
seq_printf(m, "spi:%d ", rx_desc->sport_id);
seq_printf(m, "cks:%d ", rx_desc->checksum_status);
seq_printf(m, "ptype:%d ", rx_desc->ptype);
seq_printf(m, "lag:%d ", rx_desc->lag);
seq_printf(m, "lagid:%d\n", rx_desc->lag_id);
rx_desc++;
}
}
return 0;
}
static int debugfs_ring_open(struct inode *inode, struct file *file)
{
return single_open(file, ring_seq_show, inode->i_private);
}
SINGLE_FOPS_RO(ring_fops, debugfs_ring_open);
/* function_msix_map_table */
static int tables_seq_show(struct seq_file *m, void *v)
{
int i, j, k;
struct nbl_hw *hw;
struct nbl_adapter *adapter;
struct nbl_function_msix_map function_msix_map;
struct nbl_qid_map qid_map;
struct nbl_msix_entry msix_entry;
struct nbl_msix_info msix_info;
struct nbl_queue_map queue_map;
hw = m->private;
adapter = (struct nbl_adapter *)hw->back;
seq_puts(m, "===== function_msix_map_table at 0x0013_4000 =====\n");
for (i = 0; i < NBL_MAX_FUNC; i++) {
struct nbl_func_res *funs_res = hw->af_res->res_record[i];
rd32_for_each(hw, NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(i),
(u32 *)&function_msix_map,
sizeof(struct nbl_function_msix_map));
seq_printf(m, "[%03d] base:0x%llX bus:%d dev:%d func:%d valid:%d\n",
i,
function_msix_map.msix_map_base_addr,
function_msix_map.bus,
function_msix_map.devid,
function_msix_map.function,
function_msix_map.valid);
if (funs_res) {
seq_printf(m, " queues:%d irqs:%d\n",
funs_res->num_txrx_queues, funs_res->num_interrupts);
for (j = 0; j < adapter->num_q_vectors + 1; j++) {
seq_printf(m, " [%03d] global_msix_index:%d valid:%d\n", j,
funs_res->msix_map_table.base_addr[j].global_msix_index,
funs_res->msix_map_table.base_addr[j].valid);
}
}
}
seq_puts(m, "\n");
for (k = 0; k < 2; k++) {
seq_printf(m, "===== qid_map_table %d at 0x0013_8000 now %d =====\n",
k, rd32(hw, NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG) & 1);
for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) {
rd32_for_each(hw, NBL_PCOMPLETER_QID_MAP_REG_ARR(k, i),
(u32 *)&qid_map, sizeof(struct nbl_qid_map));
seq_printf(m, "[%03d] local_qid:%d notify_addr_l:0x%X notify_addr_h:0x%X global_qid:%d notify_addr:0x%llX\n",
i,
qid_map.local_qid,
qid_map.notify_addr_l,
qid_map.notify_addr_h,
qid_map.global_qid,
(((u64)qid_map.notify_addr_h << 27) |
qid_map.notify_addr_l) << 5);
}
seq_puts(m, "\n");
}
seq_puts(m, "===== msix_table at 0x0015_4000 =====\n");
for (i = 0; i < NBL_MAX_INTERRUPT; i++) {
rd32_for_each(hw, NBL_PADPT_MSIX_TABLE_REG_ADDR(i),
(u32 *)&msix_entry, sizeof(struct nbl_msix_entry));
seq_printf(m, "[%03d] addr:0x%016llX msg_data:%d mask:%d\n", i,
((u64)msix_entry.upper_address << 32) | msix_entry.lower_address,
msix_entry.message_data, msix_entry.vector_mask);
}
seq_puts(m, "\n");
seq_puts(m, "===== msix_info_table at 0x0015_8000 =====\n");
for (i = 0; i < NBL_MAX_INTERRUPT; i++) {
rd32_for_each(hw, NBL_PADPT_MSIX_INFO_REG_ARR(i),
(u32 *)&msix_info, sizeof(struct nbl_msix_info));
seq_printf(m, "[%03d] intrl_pnum:%d intrl_rate:%d bus:%d dev:%d func:%d valid:%d\n",
i,
msix_info.intrl_pnum, msix_info.intrl_rate,
msix_info.bus, msix_info.devid, msix_info.function, msix_info.valid);
}
seq_puts(m, "\n");
seq_puts(m, "===== queue_map_table at 0x0015_C000 =====\n");
for (i = 0; i < NBL_MAX_TXRX_QUEUE * 2; i++) {
rd32_for_each(hw, NBL_PADPT_QUEUE_MAP_REG_ARR(i),
(u32 *)&queue_map, sizeof(struct nbl_queue_map));
seq_printf(m, "[%03d] bus:%d dev:%d func:%d msix_idx:%d valid:%d\n", i,
queue_map.bus, queue_map.devid, queue_map.function,
queue_map.msix_idx, queue_map.msix_idx_valid);
}
return 0;
}
static int debugfs_tables_open(struct inode *inode, struct file *file)
{
return single_open(file, tables_seq_show, inode->i_private);
}
SINGLE_FOPS_RO(tables_fops, debugfs_tables_open);
/* bar */
static int bar_seq_show(struct seq_file *m, void *v)
{
struct nbl_hw *hw = m->private;
struct nbl_adapter *adapter = hw->back;
seq_printf(m, "BAR0 - phy: 0x%llX virt: 0x%llX len: 0x%llX\n",
pci_resource_start(adapter->pdev, NBL_X4_MEMORY_BAR),
(u64)hw->hw_addr,
pci_resource_len(adapter->pdev, NBL_X4_MEMORY_BAR));
seq_printf(m, "BAR2 - phy: 0x%llX virt: 0x%llX len: 0x%llX\n",
pci_resource_start(adapter->pdev, NBL_X4_MAILBOX_BAR),
(u64)hw->mailbox_bar_hw_addr,
pci_resource_len(adapter->pdev, NBL_X4_MAILBOX_BAR));
return 0;
}
static int debugfs_bar_open(struct inode *inode, struct file *file)
{
return single_open(file, bar_seq_show, inode->i_private);
}
SINGLE_FOPS_RO(bar_fops, debugfs_bar_open);
/* register
* echo offset > register - BAR 0 and 4B
* echo offset,length > register - BAR 0 and length
* echo bB,offset > register - BAR B (0 or 2) and 4B
* echo bB,offset,length > register - BAR B (0 or 2) and length
*/
static int register_seq_show(struct seq_file *m, void *v)
{
int i;
struct nbl_hw *hw = m->private;
seq_printf(m, "BAR %d off 0x%lX len 0x%lX:\n",
hw->debugfs_reg_bar, hw->debugfs_reg_offset, hw->debugfs_reg_length);
for (i = 0; i < hw->debugfs_reg_length; i += 4) {
seq_printf(m, "[%08X]: ", (unsigned int)hw->debugfs_reg_offset + i);
if (hw->debugfs_reg_bar == 0)
seq_printf(m, "%08X\n", rd32(hw, hw->debugfs_reg_offset + i));
else if (hw->debugfs_reg_bar == 2)
seq_printf(m, "%08X\n", mb_rd32(hw, hw->debugfs_reg_offset + i));
}
return 0;
}
static ssize_t debugfs_register_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
int err;
char *p, *p1, line[16] = { 0, };
struct nbl_hw *hw = ((struct seq_file *)(file->private_data))->private;
if (copy_from_user(line, buf, count))
return -EFAULT;
p = line;
/* BAR */
if (line[0] == 'b') {
if (line[2] != ',')
return -EINVAL;
if (line[1] == '0')
hw->debugfs_reg_bar = 0;
else if (line[1] == '2')
hw->debugfs_reg_bar = 2;
else
return -EINVAL;
p = line + 3;
}
/* offset */
p1 = strchr(p, ',');
if (p1) {
*p1 = 0;
p1++;
}
err = kstrtol(p, 0, &hw->debugfs_reg_offset);
if (err)
return err;
/* length */
if (p1) {
err = kstrtol(p1, 0, &hw->debugfs_reg_length);
if (err)
return err;
}
hw->debugfs_reg_offset = ALIGN_DOWN(hw->debugfs_reg_offset, 4);
hw->debugfs_reg_length = ALIGN(hw->debugfs_reg_length, 4);
if (!hw->debugfs_reg_length)
hw->debugfs_reg_length = 4;
return count;
}
static int debugfs_register_open(struct inode *inode, struct file *file)
{
return single_open(file, register_seq_show, inode->i_private);
}
SINGLE_FOPS_RW(reg_fops, debugfs_register_open, debugfs_register_write);
/* function init and cleanup */
void nbl_debugfs_hw_init(struct nbl_hw *hw)
{
int i;
char buf[16];
struct nbl_adapter *adapter;
adapter = (struct nbl_adapter *)hw->back;
if (!nblx4_debug_root)
return;
snprintf(buf, sizeof(buf), "%04x:%02x:%02x.%x",
pci_domain_nr(adapter->pdev->bus), hw->bus, hw->devid, hw->function);
hw->nbl_debug_root = debugfs_create_dir(buf, nblx4_debug_root);
if (is_af(hw)) {
debugfs_create_file("dvn", 0444,
hw->nbl_debug_root, hw, &dvn_fops);
debugfs_create_file("uvn", 0644,
hw->nbl_debug_root, hw, &uvn_fops);
debugfs_create_file("nic-statistics", 0444,
hw->nbl_debug_root, hw, &nic_statistics_fops);
debugfs_create_file("tables", 0644,
hw->nbl_debug_root, hw, &tables_fops);
}
if (adapter->num_txq) {
for (i = 0; i < adapter->num_txq; i++) {
snprintf(buf, sizeof(buf), "txring-%d", i);
debugfs_create_file(buf, 0444,
hw->nbl_debug_root,
adapter->tx_rings[i], &ring_fops);
}
}
if (adapter->num_rxq) {
for (i = 0; i < adapter->num_rxq; i++) {
snprintf(buf, sizeof(buf), "rxring-%d", i);
debugfs_create_file(buf, 0444,
hw->nbl_debug_root,
adapter->rx_rings[i], &ring_fops);
}
}
debugfs_create_file("bar", 0444, hw->nbl_debug_root, hw, &bar_fops);
hw->debugfs_reg_bar = 0;
hw->debugfs_reg_offset = 0;
hw->debugfs_reg_length = 8;
debugfs_create_file("reg", 0444, hw->nbl_debug_root, hw, &reg_fops);
}
void nbl_debugfs_hw_exit(struct nbl_hw *hw)
{
debugfs_remove_recursive(hw->nbl_debug_root);
hw->nbl_debug_root = NULL;
}
/* module init and cleanup */
void nbl_debugfs_init(void)
{
nblx4_debug_root = debugfs_create_dir("nblx4", NULL);
if (!nblx4_debug_root)
pr_info("init of nbl X4 debugfs failed\n");
}
void nbl_debugfs_exit(void)
{
debugfs_remove_recursive(nblx4_debug_root);
nblx4_debug_root = NULL;
}
#endif /* CONFIG_NBL_DEBUGFS */
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0*/
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#ifndef _NBL_ETHTOOL_H_
#define _NBL_ETHTOOL_H_
#include <linux/netdevice.h>
#include <linux/ethtool.h>
static const u32 nbl_regs_dump_list[] = {
NBL_GREG_DYNAMIC_PRJ_ID_REG,
NBL_GREG_DYNAMIC_VERSION_REG,
};
enum NBL_STATS_TYPE {
NBL_NETDEV_STATS,
NBL_ETH_STATS,
NBL_PRIV_STATS,
NBL_STATS_TYPE_MAX
};
struct nbl_ethtool_stats {
char stat_string[ETH_GSTRING_LEN];
int type;
int sizeof_stat;
int stat_offset;
};
#ifndef sizeof_field
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->(MEMBER)))
#endif
#define NBL_NETDEV_STAT(_name, stat_m) { \
.stat_string = _name, \
.type = NBL_NETDEV_STATS, \
.sizeof_stat = sizeof_field(struct rtnl_link_stats64, stat_m), \
.stat_offset = offsetof(struct rtnl_link_stats64, stat_m) \
}
#define NBL_ETH_STAT(_name, stat_m) { \
.stat_string = _name, \
.type = NBL_ETH_STATS, \
.sizeof_stat = sizeof_field(struct nbl_adapter, stat_m), \
.stat_offset = offsetof(struct nbl_adapter, stat_m) \
}
#define NBL_PRIV_STAT(_name, stat_m) { \
.stat_string = _name, \
.type = NBL_PRIV_STATS, \
.sizeof_stat = sizeof_field(struct nbl_adapter, stat_m), \
.stat_offset = offsetof(struct nbl_adapter, stat_m) \
}
static const struct nbl_ethtool_stats nbl_gstrings_stats[] = {
NBL_NETDEV_STAT("rx_packets", rx_packets),
NBL_NETDEV_STAT("tx_packets", tx_packets),
NBL_NETDEV_STAT("rx_bytes", rx_bytes),
NBL_NETDEV_STAT("tx_bytes", tx_bytes),
NBL_NETDEV_STAT("rx_errors", rx_errors),
NBL_NETDEV_STAT("tx_errors", tx_errors),
NBL_NETDEV_STAT("rx_dropped", rx_dropped),
NBL_NETDEV_STAT("tx_dropped", tx_dropped),
NBL_NETDEV_STAT("multicast", multicast),
NBL_NETDEV_STAT("rx_crc_errors", rx_crc_errors),
NBL_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
NBL_NETDEV_STAT("rx_length_errors", rx_length_errors),
NBL_ETH_STAT("tx_total_packets", stats.tx_total_packets),
NBL_ETH_STAT("tx_total_bytes", stats.tx_total_bytes),
NBL_ETH_STAT("tx_total_good_packets", stats.tx_total_good_packets),
NBL_ETH_STAT("tx_total_good_bytes", stats.tx_total_good_bytes),
NBL_ETH_STAT("tx_frame_error", stats.tx_frame_error),
NBL_ETH_STAT("tx_bad_fcs", stats.tx_bad_fcs),
NBL_ETH_STAT("tx_unicast", stats.tx_unicast),
NBL_ETH_STAT("tx_multicast", stats.tx_multicast),
NBL_ETH_STAT("tx_broadcast", stats.tx_broadcast),
NBL_ETH_STAT("tx_vlan", stats.tx_vlan),
NBL_ETH_STAT("tx_fc_pause", stats.tx_fc_pause),
NBL_ETH_STAT("rx_total_packets", stats.rx_total_packets),
NBL_ETH_STAT("rx_total_bytes", stats.rx_total_bytes),
NBL_ETH_STAT("rx_total_good_packets", stats.rx_total_good_packets),
NBL_ETH_STAT("rx_total_good_bytes", stats.rx_total_good_bytes),
NBL_ETH_STAT("rx_oversize", stats.rx_oversize),
NBL_ETH_STAT("rx_undersize", stats.rx_undersize),
NBL_ETH_STAT("rx_frame_err", stats.rx_frame_err),
NBL_ETH_STAT("rx_bad_code", stats.rx_bad_code),
NBL_ETH_STAT("rx_bad_fcs", stats.rx_bad_fcs),
NBL_ETH_STAT("rx_unicast", stats.rx_unicast),
NBL_ETH_STAT("rx_multicast", stats.rx_multicast),
NBL_ETH_STAT("rx_broadcast", stats.rx_broadcast),
NBL_ETH_STAT("rx_vlan", stats.rx_vlan),
NBL_ETH_STAT("rx_fc_pause", stats.rx_fc_pause),
NBL_PRIV_STAT("tx_csum_pkts", stats.tx_csum_pkts),
NBL_PRIV_STAT("rx_csum_pkts", stats.rx_csum_pkts),
NBL_PRIV_STAT("tx_busy", stats.tx_busy),
NBL_PRIV_STAT("tx_linearize", stats.tx_linearize),
NBL_PRIV_STAT("tx_dma_err", stats.tx_dma_err),
NBL_PRIV_STAT("alloc_page_failed", stats.alloc_page_failed),
NBL_PRIV_STAT("alloc_skb_failed", stats.alloc_skb_failed),
NBL_PRIV_STAT("rx_dma_err", stats.rx_dma_err),
NBL_PRIV_STAT("tx_timeout", stats.tx_timeout),
NBL_PRIV_STAT("err_status_reset", stats.err_status_reset),
NBL_PRIV_STAT("bad_code_reset", stats.bad_code_reset),
};
enum nbl_ethtool_test_id {
NBL_ETH_TEST_REG = 0,
NBL_ETH_TEST_LINK,
};
static const char nbl_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)",
};
#define NBL_TEST_LEN (sizeof(nbl_gstrings_test) / ETH_GSTRING_LEN)
#define NBL_REG_TEST_PATTERN_0 0x5A5A5A5A
#define NBL_REG_TEST_PATTERN_1 0xA5A5A5A5
#define NBL_REG_TEST_PATTERN_2 0x00000000
#define NBL_REG_TEST_PATTERN_3 0xFFFFFFFF
#define NBL_TEST_PATTERN_NUM 4
#define NBL_GLOBAL_STATS_LEN ARRAY_SIZE(nbl_gstrings_stats)
static const char nbl_priv_flags[][ETH_GSTRING_LEN] = {
"sriov-ena",
};
enum nbl_adapter_flags {
NBL_ADAPTER_SRIOV_ENA,
NBL_ADAPTER_FLAGS_MAX
};
#define NBL_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(nbl_priv_flags)
void nbl_set_ethtool_ops(struct net_device *netdev);
int nbl_af_get_module_eeprom(struct nbl_hw *hw, u8 eth_port_id,
struct ethtool_eeprom *eeprom, u8 *data);
int nbl_af_get_module_info(struct nbl_hw *hw, u8 eth_port_id, struct ethtool_modinfo *info);
int nbl_read_eeprom_byte(struct nbl_hw *hw, u32 addr, u8 *data);
int nbl_af_get_eeprom(struct nbl_hw *hw, u32 offset, u32 length, u8 *bytes);
u64 nbl_af_link_test(struct nbl_hw *hw, u8 eth_port_id);
u64 nbl_af_reg_test(struct nbl_hw *hw, u8 eth_port_id);
void nbl_af_get_ethtool_dump_regs(struct nbl_hw *hw, u32 *regs_buff, u32 len);
int nbl_af_set_phys_id(struct nbl_hw *hw, u8 eth_port_id, enum ethtool_phys_id_state state);
void nbl_af_get_pause_stats(struct nbl_hw *hw, u8 eth_port_id, struct ethtool_pause_stats *stats);
int nbl_af_get_coalesce(struct nbl_hw *hw, struct ethtool_coalesce *ec,
u16 func_id, u16 local_vector_id);
int nbl_af_set_coalesce(struct nbl_hw *hw, u16 func_id, u16 local_vector_id,
u16 num_q_vectors, u32 regval);
int nbl_af_query_link_speed(struct nbl_hw *hw, u8 eth_port_id, u32 *speed_stat);
#endif
此差异已折叠。
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include "hw.h"
#include "common.h"
#include "hwmon.h"
enum NBL_HWMON_TEMP {
NBL_TEMP,
NBL_TEMP_MAX,
};
enum NBL_HWMON_VOLTAGE {
NBL_VOLT_VCCINT,
NBL_VOLT_VCCAUX,
NBL_VOLT_VCCBRAM,
NBL_VOLT_VUSER0,
NBL_VOLT_VUSER1,
NBL_VOLT_VUSER2,
NBL_VOLT_VUSER3,
NBL_VOLT_MAX,
};
#define NBL_HWMON_TEMP_MUL (5093140064ULL)
#define NBL_HWMON_TEMP_SHIFT (16)
#define NBL_HWMON_TEMP_SUB (2802308787LL)
#define NBL_HWMON_TEMP_FAC (10000)
#define NBL_HWMON_VOLT_MUL (3000)
#define NBL_HWMON_VOLT_SHIFT (16)
static ssize_t nbl_hwmon_temp_input_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct nbl_adapter *adapter = dev_get_drvdata(dev);
struct nbl_hw *hw = &adapter->hw;
int channel = to_sensor_dev_attr(attr)->index;
u32 val;
int temperature;
int len;
switch (channel) {
case NBL_TEMP:
val = rd32(hw, NBL_PRCFG_TEMPERATURE_REG);
temperature = (int)((((s64)(((u64)val * NBL_HWMON_TEMP_MUL) >>
NBL_HWMON_TEMP_SHIFT)) - NBL_HWMON_TEMP_SUB) /
NBL_HWMON_TEMP_FAC);
break;
default:
return -EINVAL;
}
len = snprintf(buf, PAGE_SIZE, "%d\n", temperature);
return len;
}
static ssize_t nbl_hwmon_in_input_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nbl_adapter *adapter = dev_get_drvdata(dev);
struct nbl_hw *hw = &adapter->hw;
int channel = to_sensor_dev_attr(attr)->index;
u32 val;
u32 voltage;
int len;
switch (channel) {
case NBL_VOLT_VCCINT:
val = rd32(hw, NBL_PRCFG_VCCINT_REG);
break;
case NBL_VOLT_VCCAUX:
val = rd32(hw, NBL_PRCFG_VCCAUX_REG);
break;
case NBL_VOLT_VCCBRAM:
val = rd32(hw, NBL_PRCFG_VCCBRAM_REG);
break;
case NBL_VOLT_VUSER0:
val = rd32(hw, NBL_PRCFG_VUSER0_REG);
break;
case NBL_VOLT_VUSER1:
val = rd32(hw, NBL_PRCFG_VUSER1_REG);
break;
case NBL_VOLT_VUSER2:
val = rd32(hw, NBL_PRCFG_VUSER2_REG);
break;
case NBL_VOLT_VUSER3:
val = rd32(hw, NBL_PRCFG_VUSER3_REG);
break;
default:
return -EINVAL;
}
voltage = (val * NBL_HWMON_VOLT_MUL) >> NBL_HWMON_VOLT_SHIFT;
len = snprintf(buf, PAGE_SIZE, "%u\n", voltage);
return len;
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, nbl_hwmon_temp_input_show, NULL, NBL_TEMP);
static SENSOR_DEVICE_ATTR(in0_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VCCINT);
static SENSOR_DEVICE_ATTR(in1_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VCCAUX);
static SENSOR_DEVICE_ATTR(in2_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VCCBRAM);
static SENSOR_DEVICE_ATTR(in3_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER0);
static SENSOR_DEVICE_ATTR(in4_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER1);
static SENSOR_DEVICE_ATTR(in5_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER2);
static SENSOR_DEVICE_ATTR(in6_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER3);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
NULL,
};
static umode_t hwmon_attributes_visible(struct kobject __always_unused *kobj,
struct attribute *attr, int __always_unused index)
{
return attr->mode;
}
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
.is_visible = hwmon_attributes_visible,
};
static const struct attribute_group *hwmon_groups[] = {
&hwmon_attrgroup,
NULL,
};
int nbl_hwmon_init(struct nbl_adapter *adapter)
{
struct nbl_hw *hw = &adapter->hw;
struct device *dev = nbl_adapter_to_dev(adapter);
if (!is_af(hw))
return 0;
adapter->hwmon_dev = hwmon_device_register_with_groups(dev, "nbl_x4", adapter,
hwmon_groups);
return PTR_ERR_OR_ZERO(adapter->hwmon_dev);
}
void nbl_hwmon_fini(struct nbl_adapter *adapter)
{
struct nbl_hw *hw = &adapter->hw;
if (!is_af(hw))
return;
if (adapter->hwmon_dev)
hwmon_device_unregister(adapter->hwmon_dev);
}
/* SPDX-License-Identifier: GPL-2.0*/
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#ifndef _NBL_HWMON_H_
#define _NBL_HWMON_H_
int nbl_hwmon_init(struct nbl_adapter *adapter);
void nbl_hwmon_fini(struct nbl_adapter *adapter);
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0*/
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#ifndef _NBL_INTERRUPT_H_
#define _NBL_INTERRUPT_H_
int nbl_init_interrupt_scheme(struct nbl_adapter *adapter);
void nbl_fini_interrupt_scheme(struct nbl_adapter *adapter);
int nbl_napi_poll(struct napi_struct *napi, int budget);
int nbl_request_irq(struct nbl_adapter *adapter);
void nbl_free_irq(struct nbl_adapter *adapter);
void nbl_enable_all_napis(struct nbl_adapter *adapter);
void nbl_disable_all_napis(struct nbl_adapter *adapter);
void nbl_configure_msix_irqs(struct nbl_adapter *adapter);
void nbl_af_configure_msix_irq(struct nbl_hw *hw, u16 func_id, u16 local_vector_id);
void nbl_af_clear_msix_irq_conf(struct nbl_hw *hw, u16 func_id, u16 local_vector_id);
void nbl_clear_msix_irqs_conf(struct nbl_adapter *adapter);
void nbl_enable_msix_irq(struct nbl_hw *hw, struct nbl_q_vector *q_vector);
int nbl_af_forward_ring_napi_poll(struct napi_struct *napi, int budget);
int nbl_af_forward_ring_request_irq(struct nbl_adapter *adapter);
void nbl_af_forward_ring_free_irq(struct nbl_adapter *adapter);
void nbl_af_enable_forward_ring_napi(struct nbl_adapter *adapter);
void nbl_af_disable_forward_ring_napi(struct nbl_adapter *adapter);
void nbl_af_configure_forward_ring_irq(struct nbl_adapter *adapter);
void nbl_af_clear_forward_ring_irq_conf(struct nbl_adapter *adapter);
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0*/
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#ifndef _NBL_MACVLAN_H_
#define _NBL_MACVLAN_H_
#define NBL_MACVLAN_TRY_GET_STATUS_TIMES 10
int nbl_macvlan_add(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id,
u8 vsi_id, int index);
int nbl_macvlan_delete(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id);
int nbl_af_configure_mac_addr(struct nbl_hw *hw, u16 func_id, u8 eth_port_id,
u8 *mac_addr, u8 vsi_id);
int nbl_configure_mac_addr(struct nbl_hw *hw, u8 *mac_addr);
int nbl_af_clear_mac_addr(struct nbl_hw *hw, u16 func_id);
int nbl_clear_mac_addr(struct nbl_hw *hw);
int nbl_af_change_mac_addr(struct nbl_hw *hw, u16 func_id, u8 *mac_addr, u8 vsi_id);
int nbl_change_mac_addr(struct nbl_hw *hw, u8 *mac_addr);
int nbl_af_operate_vlan_id(struct nbl_hw *hw, u16 func_id, u16 vlan_id,
u8 vsi_id, bool add);
int nbl_add_vlan_id(struct nbl_hw *hw, u16 vlan_id);
int nbl_delete_vlan_id(struct nbl_hw *hw, u16 vlan_id);
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include "common.h"
#include "mailbox.h"
#include "sriov.h"
#ifdef CONFIG_PCI_IOV
void nbl_af_enter_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id)
{
struct nbl_af_res_info *af_res = hw->af_res;
u8 forward_ring_index = af_res->forward_ring_index;
struct nbl_ingress_eth_port_fwd port_fwd_config;
struct nbl_src_vsi_port src_vsi_port_config;
rd32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id),
(u32 *)&port_fwd_config, sizeof(port_fwd_config));
port_fwd_config.forward_queue_id_en = 1;
port_fwd_config.forward_queue_id = forward_ring_index;
wr32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id),
(u32 *)&port_fwd_config, sizeof(port_fwd_config));
rd32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id),
(u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config));
src_vsi_port_config.mac_lut_en = 1;
src_vsi_port_config.forward_queue_id_en = 1;
src_vsi_port_config.forward_queue_id = forward_ring_index;
wr32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id),
(u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config));
}
void nbl_af_leave_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id)
{
struct nbl_ingress_eth_port_fwd port_fwd_config;
struct nbl_src_vsi_port src_vsi_port_config;
rd32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id),
(u32 *)&port_fwd_config, sizeof(port_fwd_config));
port_fwd_config.forward_queue_id_en = 0;
port_fwd_config.forward_queue_id = 0;
wr32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id),
(u32 *)&port_fwd_config, sizeof(port_fwd_config));
rd32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id),
(u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config));
src_vsi_port_config.mac_lut_en = 0;
src_vsi_port_config.forward_queue_id_en = 0;
src_vsi_port_config.forward_queue_id = 0;
wr32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id),
(u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config));
}
static void nbl_enter_forward_ring_mode(struct nbl_hw *hw)
{
u8 eth_port_id;
u8 vsi_id;
eth_port_id = hw->eth_port_id;
vsi_id = hw->vsi_id;
if (is_af(hw))
nbl_af_enter_forward_ring_mode(hw, eth_port_id, vsi_id);
else
nbl_mailbox_req_enter_forward_ring_mode(hw, eth_port_id, vsi_id);
}
static void nbl_leave_forward_ring_mode(struct nbl_hw *hw)
{
u8 eth_port_id;
u8 vsi_id;
eth_port_id = hw->eth_port_id;
vsi_id = hw->vsi_id;
if (is_af(hw))
nbl_af_leave_forward_ring_mode(hw, eth_port_id, vsi_id);
else
nbl_mailbox_req_leave_forward_ring_mode(hw, eth_port_id, vsi_id);
}
#endif
static int nbl_sriov_disable(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct net_device *netdev = pci_get_drvdata(pdev);
struct nbl_adapter *adapter = netdev_priv(netdev);
struct nbl_hw *hw = &adapter->hw;
if (pci_vfs_assigned(pdev)) {
pr_warn("Unloading driver while VFs are assigned\n");
return -EPERM;
}
nbl_leave_forward_ring_mode(hw);
pci_disable_sriov(pdev);
#endif
return 0;
}
static int nbl_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct net_device *netdev = pci_get_drvdata(pdev);
struct nbl_adapter *adapter = netdev_priv(netdev);
struct nbl_hw *hw = &adapter->hw;
int existing_vfs = pci_num_vf(pdev);
int err;
if (existing_vfs) {
pr_err("VFs is created already\n");
return -EINVAL;
}
nbl_enter_forward_ring_mode(hw);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
pr_warn("Failed to enable SR-IOV with error %d\n", err);
return err;
}
return num_vfs;
#else
return 0;
#endif
}
int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
return nbl_sriov_disable(pdev);
else
return nbl_sriov_enable(pdev, num_vfs);
}
/* SPDX-License-Identifier: GPL-2.0*/
/*
* Copyright (c) 2022 nebula-matrix Limited.
* Author: Monte Song <monte.song@nebula-matrix.com>
*/
#ifndef _NBL_SRIOV_H_
#define _NBL_SRIOV_H_
#ifdef CONFIG_PCI_IOV
void nbl_af_enter_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id);
void nbl_af_leave_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id);
#endif
int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs);
#endif
此差异已折叠。
此差异已折叠。
......@@ -97,6 +97,12 @@ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
F: include/uapi/rdma/hns-abi.h
F: drivers/infiniband/hw/hns/
NEBULA-MATRIX Ethernet Controller DRIVERS
M: Yi Chen <open@nebula-matrix.com>
S: Maintained
F: Documentation/networking/device_drivers/ethernet/nebula-matrix/
F: drivers/net/ethernet/nebula-matrix/
HISILICON ROH DRIVER
M: Ke Chen <chenke54@huawei.com>
S: Maintained
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册