提交 b2f43d83 编写于 作者: C Chiqijun 提交者: Zheng Zengkai

net/hinic: Add NIC Layer

driver inclusion
category: feature
bugzilla: 47993

-----------------------------------------------------------------------

Add NIC Layer support, include:
1. register net device to the kernel
2. implement the hooks of the 'struct net_device_ops' and
    'struct ethtool_ops'
3. etc.
Signed-off-by: NChiqijun <chiqijun@huawei.com>
Reviewed-by: NWangxiaoyun <cloud.wangxiaoyun@huawei.com>
Acked-by: NXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 f1168d29
...@@ -6,4 +6,7 @@ hinic-y := hinic_nic_cfg.o hinic_nic_io.o hinic_nic_dbg.o \ ...@@ -6,4 +6,7 @@ hinic-y := hinic_nic_cfg.o hinic_nic_io.o hinic_nic_dbg.o \
hinic_mbox.o hinic_api_cmd.o hinic_mgmt.o \ hinic_mbox.o hinic_api_cmd.o hinic_mgmt.o \
hinic_wq.o hinic_cmdq.o hinic_hwdev.o hinic_cfg.o \ hinic_wq.o hinic_cmdq.o hinic_hwdev.o hinic_cfg.o \
hinic_sml_counter.o hinic_sml_lt.o \ hinic_sml_counter.o hinic_sml_lt.o \
hinic_multi_host_mgmt.o \ hinic_multi_host_mgmt.o hinic_main.o hinic_lld.o \
hinic_qp.o hinic_rx.o hinic_tx.o hinic_dbgtool_knl.o \
hinic_nictool.o hinic_sriov.o hinic_dcb.o\
hinic_ethtool.o
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include "ossl_knl.h"
#include "hinic_hw.h"
#include "hinic_hwdev.h"
#include "hinic_hw_mgmt.h"
#include "hinic_nic_dev.h"
#include "hinic_lld.h"
#include "hinic_dbgtool_knl.h"
struct ffm_intr_info {
u8 node_id;
/* error level of the interrupt source */
u8 err_level;
/* Classification by interrupt source properties */
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
};
#define DBGTOOL_MSG_MAX_SIZE 2048ULL
#define HINIC_SELF_CMD_UP2PF_FFM 0x26
void *g_card_node_array[MAX_CARD_NUM] = {0};
void *g_card_vir_addr[MAX_CARD_NUM] = {0};
u64 g_card_phy_addr[MAX_CARD_NUM] = {0};
/* lock for g_card_vir_addr */
struct mutex g_addr_lock;
int card_id;
/* dbgtool character device name, class name, dev path */
#define CHR_DEV_DBGTOOL "dbgtool_chr_dev"
#define CLASS_DBGTOOL "dbgtool_class"
#define DBGTOOL_DEV_PATH "/dev/dbgtool_chr_dev"
struct dbgtool_k_glb_info {
struct semaphore dbgtool_sem;
struct ffm_record_info *ffm;
};
dev_t dbgtool_dev_id; /* device id */
struct cdev dbgtool_chr_dev; /* struct of char device */
/*lint -save -e104 -e808*/
struct class *dbgtool_d_class; /* struct of char class */
/*lint -restore*/
int g_dbgtool_init_flag;
int g_dbgtool_ref_cnt;
static int dbgtool_knl_open(struct inode *pnode,
struct file *pfile)
{
return 0;
}
static int dbgtool_knl_release(struct inode *pnode,
struct file *pfile)
{
return 0;
}
static ssize_t dbgtool_knl_read(struct file *pfile,
char __user *ubuf,
size_t size,
loff_t *ppos)
{
return 0;
}
static ssize_t dbgtool_knl_write(struct file *pfile,
const char __user *ubuf,
size_t size,
loff_t *ppos)
{
return 0;
}
static bool is_valid_phy_addr(u64 offset)
{
int i;
for (i = 0; i < MAX_CARD_NUM; i++) {
if (offset == g_card_phy_addr[i])
return true;
}
return false;
}
int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long vmsize = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
phys_addr_t phy_addr;
if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) {
pr_err("Map size = %lu is bigger than alloc\n", vmsize);
return -EAGAIN;
}
if (offset && !is_valid_phy_addr((u64)offset) &&
!hinic_is_valid_bar_addr((u64)offset)) {
pr_err("offset is invalid");
return -EAGAIN;
}
/* old version of tool set vma->vm_pgoff to 0 */
phy_addr = offset ? offset : g_card_phy_addr[card_id];
if (!phy_addr) {
pr_err("Card_id = %d physical address is 0\n", card_id);
return -EAGAIN;
}
if (remap_pfn_range(vma, vma->vm_start,
(phy_addr >> PAGE_SHIFT),
vmsize, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/**
* dbgtool_knl_api_cmd_read - used for read operations
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_api_cmd_read(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
u8 *cmd;
u16 size;
void *ack;
u16 ack_size;
u32 pf_id;
void *hwdev;
pf_id = para->param.api_rd.pf_id;
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big\n", pf_id);
return -EFAULT;
}
/* obtaining pf_id chipif pointer */
hwdev = g_func_handle_array[pf_id];
if (!hwdev) {
pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id);
return -EFAULT;
}
/* alloc cmd and ack memory */
size = para->param.api_rd.size;
if (para->param.api_rd.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Read cmd size invalid or more than 2M\n");
return -EINVAL;
}
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc read cmd mem fail\n");
return -ENOMEM;
}
ack_size = para->param.api_rd.ack_size;
if (para->param.api_rd.ack_size == 0) {
pr_err("Read cmd ack size is 0\n");
ret = -ENOMEM;
goto alloc_ack_mem_fail;
}
ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL);
if (!ack) {
pr_err("Alloc read ack mem fail\n");
ret = -ENOMEM;
goto alloc_ack_mem_fail;
}
/* cmd content copied from user-mode */
if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) {
pr_err("Copy cmd from user fail\n");
ret = -EFAULT;
goto copy_user_cmd_fail;
}
/* Invoke the api cmd interface read content*/
ret = hinic_api_cmd_read_ack(hwdev, para->param.api_rd.dest,
cmd, size, ack, ack_size);
if (ret) {
pr_err("Api send single cmd ack fail!\n");
goto api_rd_fail;
}
/* Copy the contents of the ack to the user state */
if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) {
pr_err("Copy ack to user fail\n");
ret = -EFAULT;
}
api_rd_fail:
copy_user_cmd_fail:
kfree(ack);
alloc_ack_mem_fail:
kfree(cmd);
return ret;
}
/**
* dbgtool_knl_api_cmd_write - used for write operations
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_api_cmd_write(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
u8 *cmd;
u16 size;
u32 pf_id;
void *hwdev;
pf_id = para->param.api_wr.pf_id;
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big\n", pf_id);
return -EFAULT;
}
/* obtaining chipif pointer according to pf_id */
hwdev = g_func_handle_array[pf_id];
if (!hwdev) {
pr_err("PF id(0x%x) handle null\n", pf_id);
return -EFAULT;
}
/* alloc cmd memory */
size = para->param.api_wr.size;
if (para->param.api_wr.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Write cmd size invalid or more than 2M\n");
return -EINVAL;
}
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc write cmd mem fail\n");
return -ENOMEM;
}
/* cmd content copied from user-mode */
if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) {
pr_err("Copy cmd from user fail\n");
ret = -EFAULT;
goto copy_user_cmd_fail;
}
/* api cmd interface is invoked to write the content */
ret = hinic_api_cmd_write_nack(hwdev, para->param.api_wr.dest,
cmd, size);
if (ret)
pr_err("Api send single cmd nack fail\n");
copy_user_cmd_fail:
kfree(cmd);
return ret;
}
void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx,
void **g_func_handle_array)
{
u32 func_idx;
struct hinic_hwdev *hwdev;
if (!dev_info) {
pr_err("Params error!\n");
return;
}
/* pf at most 16 */
for (func_idx = 0; func_idx < 16; func_idx++) {
hwdev = (struct hinic_hwdev *)g_func_handle_array[func_idx];
dev_info[func_idx].phy_addr = g_card_phy_addr[card_idx];
if (!hwdev) {
dev_info[func_idx].bar0_size = 0;
dev_info[func_idx].bus = 0;
dev_info[func_idx].slot = 0;
dev_info[func_idx].func = 0;
} else {
dev_info[func_idx].bar0_size =
pci_resource_len
(((struct pci_dev *)hwdev->pcidev_hdl), 0);
dev_info[func_idx].bus =
((struct pci_dev *)
hwdev->pcidev_hdl)->bus->number;
dev_info[func_idx].slot =
PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl)
->devfn);
dev_info[func_idx].func =
PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl)
->devfn);
}
}
}
/**
* dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para,
void **g_func_handle_array)
{
struct pf_dev_info dev_info[16] = { {0} };
unsigned char *tmp;
int i;
mutex_lock(&g_addr_lock);
if (!g_card_vir_addr[card_id]) {
g_card_vir_addr[card_id] =
(void *)__get_free_pages(GFP_KERNEL,
DBGTOOL_PAGE_ORDER);
if (!g_card_vir_addr[card_id]) {
pr_err("Alloc dbgtool api chain fail!\n");
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
memset(g_card_vir_addr[card_id], 0,
PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
g_card_phy_addr[card_id] =
virt_to_phys(g_card_vir_addr[card_id]);
if (!g_card_phy_addr[card_id]) {
pr_err("phy addr for card %d is 0\n", card_id);
free_pages((unsigned long)g_card_vir_addr[card_id],
DBGTOOL_PAGE_ORDER);
g_card_vir_addr[card_id] = NULL;
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
tmp = g_card_vir_addr[card_id];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
SetPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
}
mutex_unlock(&g_addr_lock);
chipif_get_all_pf_dev_info(dev_info, card_id, g_func_handle_array);
/* Copy the dev_info to user mode */
if (copy_to_user(para->param.dev_info, dev_info,
(unsigned int)sizeof(dev_info))) {
pr_err("Copy dev_info to user fail\n");
return -EFAULT;
}
return 0;
}
/**
* dbgtool_knl_ffm_info_rd - Read ffm information
* @para: the dbgtool parameter
* @dbgtool_info: the dbgtool info
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para,
struct dbgtool_k_glb_info *dbgtool_info)
{
/* Copy the ffm_info to user mode */
if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm,
(unsigned int)sizeof(struct ffm_record_info))) {
pr_err("Copy ffm_info to user fail\n");
return -EFAULT;
}
return 0;
}
/**
* dbgtool_knl_ffm_info_clr - Clear FFM information
* @para: unused
* @dbgtool_info: the dbgtool info
*/
void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para,
struct dbgtool_k_glb_info *dbgtool_info)
{
dbgtool_info->ffm->ffm_num = 0;
}
/**
* dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_msg_to_up(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
void *buf_in;
void *buf_out;
u16 out_size;
u8 pf_id;
if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("User data(%d) more than 2KB\n",
para->param.msg2up.in_size);
return -EFAULT;
}
pf_id = para->param.msg2up.pf_id;
/* pf at most 16 */
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id);
return -EFAULT;
}
if (!g_func_handle_array[pf_id]) {
pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id);
return -EFAULT;
}
/* alloc buf_in and buf_out memory, apply for 2K */
buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL);
if (!buf_in) {
pr_err("Alloc buf_in mem fail\n");
return -ENOMEM;
}
buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0);
if (!buf_out) {
pr_err("Alloc buf_out mem fail\n");
ret = -ENOMEM;
goto alloc_buf_out_mem_fail;
}
/* copy buf_in from the user state */
if (copy_from_user(buf_in, para->param.msg2up.buf_in,
(unsigned long)para->param.msg2up.in_size)) {
pr_err("Copy buf_in from user fail\n");
ret = -EFAULT;
goto copy_user_buf_in_fail;
}
out_size = DBGTOOL_MSG_MAX_SIZE;
/* Invoke the pf2up communication interface */
ret = hinic_msg_to_mgmt_sync(g_func_handle_array[pf_id],
para->param.msg2up.mod,
para->param.msg2up.cmd,
buf_in,
para->param.msg2up.in_size,
buf_out,
&out_size,
0);
if (ret)
goto msg_2_up_fail;
/* Copy the out_size and buf_out content to user mode */
if (copy_to_user(para->param.msg2up.out_size, &out_size,
(unsigned int)sizeof(out_size))) {
pr_err("Copy out_size to user fail\n");
ret = -EFAULT;
goto copy_out_size_fail;
}
if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) {
pr_err("Copy buf_out to user fail\n");
ret = -EFAULT;
}
copy_out_size_fail:
msg_2_up_fail:
copy_user_buf_in_fail:
kfree(buf_out);
alloc_buf_out_mem_fail:
kfree(buf_in);
return ret;
}
long dbgtool_knl_free_mem(int id)
{
unsigned char *tmp;
int i;
mutex_lock(&g_addr_lock);
if (!g_card_vir_addr[id]) {
mutex_unlock(&g_addr_lock);
return 0;
}
tmp = g_card_vir_addr[id];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
ClearPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
free_pages((unsigned long)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER);
g_card_vir_addr[id] = NULL;
g_card_phy_addr[id] = 0;
mutex_unlock(&g_addr_lock);
return 0;
}
/*lint -save -e771 -e794*/
/**
* dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry
* @pfile: the pointer to file
* @cmd: the command type
*/
long dbgtool_knl_unlocked_ioctl(struct file *pfile,
unsigned int cmd,
unsigned long arg)
{
long ret = 0;
unsigned int real_cmd;
struct dbgtool_param param;
struct dbgtool_k_glb_info *dbgtool_info;
struct card_node *card_info = NULL;
int i;
(void)memset(&param, 0, sizeof(param));
if (copy_from_user(&param, (void *)arg, sizeof(param))) {
pr_err("Copy param from user fail\n");
return -EFAULT;
}
param.chip_name[IFNAMSIZ - 1] = '\0';
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
if (!strncmp(param.chip_name, card_info->chip_name, IFNAMSIZ))
break;
}
if (i == MAX_CARD_NUM || !card_info) {
pr_err("Can't find this card %s\n", param.chip_name);
return -EFAULT;
}
card_id = i;
dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
down(&dbgtool_info->dbgtool_sem);
real_cmd = _IOC_NR(cmd);
switch (real_cmd) {
case DBGTOOL_CMD_API_RD:
ret = dbgtool_knl_api_cmd_read(&param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_API_WR:
ret = dbgtool_knl_api_cmd_write(&param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_FFM_RD:
ret = dbgtool_knl_ffm_info_rd(&param, dbgtool_info);
break;
case DBGTOOL_CMD_FFM_CLR:
dbgtool_knl_ffm_info_clr(&param, dbgtool_info);
break;
case DBGTOOL_CMD_PF_DEV_INFO_GET:
ret = dbgtool_knl_pf_dev_info_get(&param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_MSG_2_UP:
ret = dbgtool_knl_msg_to_up(&param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_FREE_MEM:
ret = dbgtool_knl_free_mem(i);
break;
default:
pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd);
ret = -EFAULT;
}
up(&dbgtool_info->dbgtool_sem);
return ret;
}
/**
* ffm_intr_msg_record - FFM interruption records sent up
* @handle: the function handle
* @buf_in: the pointer to input buffer
* @buf_out: the pointer to outputput buffer
*/
void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct ffm_intr_info *intr;
u32 ffm_idx;
struct tm tm;
struct card_node *card_info = NULL;
struct hinic_hwdev *hwdev = handle;
bool flag = false;
int i, j;
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
for (j = 0; j < MAX_FUNCTION_NUM; j++) {
if (handle == card_info->func_handle_array[j]) {
flag = true;
break;
}
}
if (flag)
break;
}
if (i == MAX_CARD_NUM || !card_info) {
pr_err("Id(%d) cant find this card\n", i);
return;
}
dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
if (!dbgtool_info) {
pr_err("Dbgtool info is null\n");
return;
}
intr = (struct ffm_intr_info *)buf_in;
if (!dbgtool_info->ffm)
return;
ffm_idx = dbgtool_info->ffm->ffm_num;
if (ffm_idx < FFM_RECORD_NUM_MAX) {
nic_info(hwdev->dev_hdl, "%s: recv intr, ffm_idx: %d\n",
__func__, ffm_idx);
dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id;
dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level;
dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type;
dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr =
intr->err_csr_addr;
dbgtool_info->ffm->ffm[ffm_idx].err_csr_value =
intr->err_csr_value;
/* Calculate the time in date value to tm */
time64_to_tm(ktime_to_ms(ktime_get_real()) / MSEC_PER_SEC, 0, &tm);
/* tm_year starts from 1900; 0->1900, 1->1901, and so on */
dbgtool_info->ffm->ffm[ffm_idx].year =
(u16)(tm.tm_year + 1900);
/* tm_mon starts from 0, 0 indicates January, and so on */
dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)tm.tm_mon + 1;
dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)tm.tm_mday;
dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)tm.tm_hour;
dbgtool_info->ffm->ffm[ffm_idx].min = (u8)tm.tm_min;
dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)tm.tm_sec;
dbgtool_info->ffm->ffm_num++;
}
}
/*lint -restore*/
/*lint -save -e785 -e438*/
static const struct file_operations dbgtool_file_operations = {
.owner = THIS_MODULE,
.open = dbgtool_knl_open,
.release = dbgtool_knl_release,
.read = dbgtool_knl_read,
.write = dbgtool_knl_write,
.unlocked_ioctl = dbgtool_knl_unlocked_ioctl,
.mmap = hinic_mem_mmap,
};
/**
* dbgtool_knl_init - dbgtool character device init
* @hwdev: the pointer to hardware device
* @chip_node: the pointer to card node
* Return: 0 - success, negative - failure
*/
int dbgtool_knl_init(void *vhwdev, void *chip_node)
{
int ret = 0;
int id;
struct dbgtool_k_glb_info *dbgtool_info;
struct device *pdevice;
struct card_node *chip_info = (struct card_node *)chip_node;
struct hinic_hwdev *hwdev = vhwdev;
if (hinic_func_type(hwdev) == TYPE_VF)
return 0;
ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
if (ret) {
pr_err("Failed to sysfs create file\n");
return ret;
}
chip_info->func_handle_array[hinic_global_func_id(hwdev)] = hwdev;
hinic_comm_recv_mgmt_self_cmd_reg(hwdev, HINIC_SELF_CMD_UP2PF_FFM,
ffm_intr_msg_record);
if (chip_info->dbgtool_info) {
chip_info->func_num++;
return 0;
}
dbgtool_info = (struct dbgtool_k_glb_info *)
kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL);
if (!dbgtool_info) {
pr_err("Failed to allocate dbgtool_info\n");
ret = -EFAULT;
goto dbgtool_info_fail;
}
chip_info->dbgtool_info = dbgtool_info;
/* FFM init */
dbgtool_info->ffm = (struct ffm_record_info *)
kzalloc(sizeof(struct ffm_record_info),
GFP_KERNEL);
if (!dbgtool_info->ffm) {
pr_err("Failed to allocate cell contexts for a chain\n");
ret = -EFAULT;
goto dbgtool_info_ffm_fail;
}
sema_init(&dbgtool_info->dbgtool_sem, 1);
ret = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id);
if (ret <= 0) {
pr_err("Failed to get hinic id\n");
goto sscanf_chdev_fail;
}
g_card_node_array[id] = chip_info;
chip_info->func_num++;
if (g_dbgtool_init_flag) {
g_dbgtool_ref_cnt++;
/* already initialized */
return 0;
}
/*alloc device id*/
ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL);
if (ret) {
pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret);
goto alloc_chdev_fail;
}
/*init device*/
cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations);
/*add device*/
ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1);
if (ret) {
pr_err("Add dgbtool dev fail, ret=0x%x\n", ret);
goto cdev_add_fail;
}
/*lint -save -e160*/
dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL);
/*lint -restore*/
if (IS_ERR(dbgtool_d_class)) {
pr_err("Create dgbtool class fail\n");
ret = -EFAULT;
goto cls_create_fail;
}
/* Export device information to user space
* (/sys/class/class name/device name)
*/
pdevice = device_create(dbgtool_d_class, NULL,
dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL);
if (IS_ERR(pdevice)) {
pr_err("Create dgbtool device fail\n");
ret = -EFAULT;
goto dev_create_fail;
}
g_dbgtool_init_flag = 1;
g_dbgtool_ref_cnt = 1;
mutex_init(&g_addr_lock);
return 0;
dev_create_fail:
class_destroy(dbgtool_d_class);
cls_create_fail:
cdev_del(&(dbgtool_chr_dev));
cdev_add_fail:
unregister_chrdev_region(dbgtool_dev_id, 1);
alloc_chdev_fail:
g_card_node_array[id] = NULL;
sscanf_chdev_fail:
kfree(dbgtool_info->ffm);
dbgtool_info_ffm_fail:
kfree(dbgtool_info);
dbgtool_info = NULL;
chip_info->dbgtool_info = NULL;
dbgtool_info_fail:
hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM);
chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL;
sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
return ret;
}
/**
* dbgtool_knl_deinit - dbgtool character device deinit
* @hwdev: the pointer to hardware device
* @chip_node: the pointer to card node
*/
void dbgtool_knl_deinit(void *vhwdev, void *chip_node)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct card_node *chip_info = (struct card_node *)chip_node;
int id;
int err;
struct hinic_hwdev *hwdev = vhwdev;
if (hinic_func_type(hwdev) == TYPE_VF)
return;
hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM);
chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL;
sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
chip_info->func_num--;
if (chip_info->func_num)
return;
err = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id);
if (err <= 0)
pr_err("Failed to get hinic id\n");
g_card_node_array[id] = NULL;
dbgtool_info = chip_info->dbgtool_info;
/* FFM deinit */
kfree(dbgtool_info->ffm);
dbgtool_info->ffm = NULL;
kfree(dbgtool_info);
chip_info->dbgtool_info = NULL;
(void)dbgtool_knl_free_mem(id);
if (g_dbgtool_init_flag) {
if ((--g_dbgtool_ref_cnt))
return;
}
if (!dbgtool_d_class)
return;
device_destroy(dbgtool_d_class, dbgtool_dev_id);
class_destroy(dbgtool_d_class);
dbgtool_d_class = NULL;
cdev_del(&(dbgtool_chr_dev));
unregister_chrdev_region(dbgtool_dev_id, 1);
g_dbgtool_init_flag = 0;
}
/*lint -restore*/
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef __DBGTOOL_KNL_H__
#define __DBGTOOL_KNL_H__
#define DBG_TOOL_MAGIC 'w'
/* dbgtool command type */
/* You can add the required dbgtool through these commands
* can invoke all X86 kernel mode driver interface
*/
enum dbgtool_cmd {
DBGTOOL_CMD_API_RD = 0,
DBGTOOL_CMD_API_WR,
DBGTOOL_CMD_FFM_RD,
DBGTOOL_CMD_FFM_CLR,
DBGTOOL_CMD_PF_DEV_INFO_GET,
DBGTOOL_CMD_MSG_2_UP,
DBGTOOL_CMD_FREE_MEM,
DBGTOOL_CMD_NUM
};
struct api_cmd_rd {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
void *ack;
u16 ack_size;
};
struct api_cmd_wr {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
};
struct pf_dev_info {
u64 bar0_size;
u8 bus;
u8 slot;
u8 func;
u64 phy_addr;
};
/* Interrupt at most records, interrupt will be recorded in the FFM */
#define FFM_RECORD_NUM_MAX 64
struct ffm_intr_tm_info {
u8 node_id;
/* error level of the interrupt source */
u8 err_level;
/* Classification by interrupt source properties */
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
u8 sec; /* second*/
u8 min; /* minute */
u8 hour; /* hour */
u8 mday; /* day */
u8 mon; /* month */
u16 year; /* year */
};
struct ffm_record_info {
u32 ffm_num;
struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX];
};
struct msg_2_up {
u8 pf_id; /* which pf sends messages to the up */
u8 mod;
u8 cmd;
void *buf_in;
u16 in_size;
void *buf_out;
u16 *out_size;
};
struct dbgtool_param {
union {
struct api_cmd_rd api_rd;
struct api_cmd_wr api_wr;
struct pf_dev_info *dev_info;
struct ffm_record_info *ffm_rd;
struct msg_2_up msg2up;
} param;
char chip_name[16];
};
#define MAX_CARD_NUM 64
#define DBGTOOL_PAGE_ORDER 10
int dbgtool_knl_init(void *vhwdev, void *chip_node);
void dbgtool_knl_deinit(void *vhwdev, void *chip_node);
int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma);
void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id,
void **g_func_handle_array);
long dbgtool_knl_free_mem(int id);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include "ossl_knl.h"
#include "hinic_hw.h"
#include "hinic_hw_mgmt.h"
#include "hinic_lld.h"
#include "hinic_nic_cfg.h"
#include "hinic_nic_dev.h"
#include "hinic_dcb.h"
#define DCB_HW_CFG_CHG 0
#define DCB_HW_CFG_NO_CHG 1
#define DCB_HW_CFG_ERR 2
#define DCB_CFG_CHG_PG_TX 0x1
#define DCB_CFG_CHG_PG_RX 0x2
#define DCB_CFG_CHG_PFC 0x4
#define DCB_CFG_CHG_UP_COS 0x8
u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up)
{
struct hinic_tc_cfg *tc_cfg = &dcb_cfg->tc_cfg[0];
u8 tc = dcb_cfg->pg_tcs;
if (!tc)
return 0;
for (tc--; tc; tc--) {
if (BIT(up) & tc_cfg[tc].path[dir].up_map)
break;
}
return tc;
}
#define UP_MAPPING(prio) ((u8)(1U << ((HINIC_DCB_UP_MAX - 1) - (prio))))
void hinic_dcb_config_init(struct hinic_nic_dev *nic_dev,
struct hinic_dcb_config *dcb_cfg)
{
struct hinic_tc_cfg *tc;
int i;
memset(dcb_cfg->tc_cfg, 0, sizeof(dcb_cfg->tc_cfg));
tc = &dcb_cfg->tc_cfg[0];
/* All TC mapping to PG0 */
for (i = 0; i < dcb_cfg->pg_tcs; i++) {
tc = &dcb_cfg->tc_cfg[i];
tc->path[HINIC_DCB_CFG_TX].pg_id = 0;
tc->path[HINIC_DCB_CFG_TX].bw_pct = 100;
tc->path[HINIC_DCB_CFG_TX].up_map = UP_MAPPING(i);
tc->path[HINIC_DCB_CFG_RX].pg_id = 0;
tc->path[HINIC_DCB_CFG_RX].bw_pct = 100;
tc->path[HINIC_DCB_CFG_RX].up_map = UP_MAPPING(i);
tc->pfc_en = false;
}
for (; i < HINIC_DCB_UP_MAX; i++) {
tc->path[HINIC_DCB_CFG_TX].up_map |= UP_MAPPING(i);
tc->path[HINIC_DCB_CFG_RX].up_map |= UP_MAPPING(i);
}
memset(dcb_cfg->bw_pct, 0, sizeof(dcb_cfg->bw_pct));
/* Use PG0 in default, PG0's bw is 100% */
dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][0] = 100;
dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][0] = 100;
dcb_cfg->pfc_state = false;
}
void hinic_init_ieee_settings(struct hinic_nic_dev *nic_dev)
{
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
struct ieee_ets *ets = &nic_dev->hinic_ieee_ets_default;
struct ieee_pfc *pfc = &nic_dev->hinic_ieee_pfc;
struct hinic_tc_attr *tc_attr;
u8 i;
memset(ets, 0x0, sizeof(struct ieee_ets));
memset(&nic_dev->hinic_ieee_ets, 0x0, sizeof(struct ieee_ets));
ets->ets_cap = dcb_cfg->pg_tcs;
for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
tc_attr = &dcb_cfg->tc_cfg[i].path[HINIC_DCB_CFG_TX];
ets->tc_tsa[i] = tc_attr->prio_type ?
IEEE8021Q_TSA_STRICT : IEEE8021Q_TSA_ETS;
ets->tc_tx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i];
ets->tc_rx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i];
ets->prio_tc[i] = hinic_dcb_get_tc(dcb_cfg,
HINIC_DCB_CFG_TX, i);
}
memcpy(&nic_dev->hinic_ieee_ets, ets, sizeof(struct ieee_ets));
memset(pfc, 0x0, sizeof(struct ieee_pfc));
pfc->pfc_cap = dcb_cfg->pfc_tcs;
for (i = 0; i < dcb_cfg->pfc_tcs; i++) {
if (dcb_cfg->tc_cfg[i].pfc_en)
pfc->pfc_en |= (u8)BIT(i);
}
}
static int hinic_set_up_cos_map(struct hinic_nic_dev *nic_dev,
u8 num_cos, u8 *cos_up)
{
u8 up_valid_bitmap, up_cos[HINIC_DCB_UP_MAX] = {0};
u8 i;
up_valid_bitmap = 0;
for (i = 0; i < num_cos; i++) {
if (cos_up[i] >= HINIC_DCB_UP_MAX) {
hinic_info(nic_dev, drv, "Invalid up %d mapping to cos %d\n",
cos_up[i], i);
return -EFAULT;
}
if (i > 0 && cos_up[i] >= cos_up[i - 1]) {
hinic_info(nic_dev, drv,
"Invalid priority order, should be descending cos[%d]=%d, cos[%d]=%d\n",
i, cos_up[i], i - 1, cos_up[i - 1]);
return -EINVAL;
}
up_valid_bitmap |= (u8)BIT(cos_up[i]);
if (i == (num_cos - 1))
up_cos[cos_up[i]] = nic_dev->default_cos_id;
else
up_cos[cos_up[i]] = i; /* reverse up and cos */
}
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
if (up_valid_bitmap & (u8)BIT(i))
continue;
up_cos[i] = nic_dev->default_cos_id;
}
nic_dev->up_valid_bitmap = up_valid_bitmap;
memcpy(nic_dev->up_cos, up_cos, sizeof(up_cos));
return hinic_sq_cos_mapping(nic_dev->netdev);
}
static int hinic_init_up_cos_map(struct hinic_nic_dev *nic_dev, u8 num_cos)
{
u8 default_map[HINIC_DCB_COS_MAX] = {0};
bool setted = false;
u8 max_cos, cos_id, up;
int err;
max_cos = hinic_max_num_cos(nic_dev->hwdev);
if (!max_cos || ((max_cos - 1) < nic_dev->default_cos_id)) {
hinic_err(nic_dev, drv, "Max_cos is %d, default cos id %d\n",
max_cos, nic_dev->default_cos_id);
return -EFAULT;
}
err = hinic_get_chip_cos_up_map(nic_dev->pdev, &setted, default_map);
if (err) {
hinic_err(nic_dev, drv, "Get chip cos_up map failed\n");
return -EFAULT;
}
if (!setted) {
/* Use (max_cos-1)~0 as default user priority and mapping
* to cos0~(max_cos-1)
*/
up = nic_dev->max_cos - 1;
for (cos_id = 0; cos_id < nic_dev->max_cos; cos_id++, up--)
default_map[cos_id] = up;
}
return hinic_set_up_cos_map(nic_dev, num_cos, default_map);
}
int hinic_dcb_init(struct hinic_nic_dev *nic_dev)
{
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
u8 num_cos, support_cos = 0, default_cos = 0;
u8 i, cos_valid_bitmap;
int err;
if (HINIC_FUNC_IS_VF(nic_dev->hwdev))
return 0;
cos_valid_bitmap = hinic_cos_valid_bitmap(nic_dev->hwdev);
if (!cos_valid_bitmap) {
hinic_err(nic_dev, drv, "None cos supported\n");
return -EFAULT;
}
for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
if (cos_valid_bitmap & BIT(i)) {
support_cos++;
default_cos = i; /* Find max cos id as default cos */
}
}
hinic_info(nic_dev, drv, "Support num cos %d, default cos %d\n",
support_cos, default_cos);
num_cos = (u8)(1U << ilog2(support_cos));
if (num_cos != support_cos)
hinic_info(nic_dev, drv, "Adjust num_cos from %d to %d\n",
support_cos, num_cos);
nic_dev->dcbx_cap = 0;
nic_dev->max_cos = num_cos;
nic_dev->default_cos_id = default_cos;
dcb_cfg->pfc_tcs = nic_dev->max_cos;
dcb_cfg->pg_tcs = nic_dev->max_cos;
err = hinic_init_up_cos_map(nic_dev, num_cos);
if (err) {
hinic_info(nic_dev, drv, "Initialize up_cos mapping failed\n");
return -EFAULT;
}
hinic_dcb_config_init(nic_dev, dcb_cfg);
nic_dev->dcb_changes = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX |
DCB_CFG_CHG_PG_RX | DCB_CFG_CHG_UP_COS;
nic_dev->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->dcb_cfg,
sizeof(nic_dev->tmp_dcb_cfg));
memcpy(&nic_dev->save_dcb_cfg, &nic_dev->dcb_cfg,
sizeof(nic_dev->save_dcb_cfg));
hinic_init_ieee_settings(nic_dev);
sema_init(&nic_dev->dcb_sem, 1);
return 0;
}
void hinic_set_prio_tc_map(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
u8 prio, tc;
for (prio = 0; prio < HINIC_DCB_UP_MAX; prio++) {
tc = nic_dev->up_cos[prio];
if (tc == nic_dev->default_cos_id)
tc = nic_dev->max_cos - 1;
netdev_set_prio_tc_map(netdev, prio, tc);
}
}
int hinic_setup_tc(struct net_device *netdev, u8 tc)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err;
if (!FUNC_SUPPORT_DCB(nic_dev->hwdev)) {
nicif_err(nic_dev, drv, netdev,
"Current function don't support DCB\n");
return -EOPNOTSUPP;
}
if (tc > nic_dev->dcb_cfg.pg_tcs) {
nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %d, max tc: %d\n",
tc, nic_dev->dcb_cfg.pg_tcs);
return -EINVAL;
}
if (netif_running(netdev)) {
err = hinic_close(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to close device\n");
return -EFAULT;
}
}
if (tc) {
if (tc & (tc - 1)) {
nicif_err(nic_dev, drv, netdev,
"Invalid num_tc: %d, must be power of 2\n",
tc);
return -EINVAL;
}
netdev_set_num_tc(netdev, tc);
hinic_set_prio_tc_map(nic_dev);
set_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
} else {
netdev_reset_tc(netdev);
clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
}
hinic_sq_cos_mapping(netdev);
if (netif_running(netdev)) {
err = hinic_open(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to open device\n");
return -EFAULT;
}
} else {
hinic_update_num_qps(netdev);
}
hinic_configure_dcb(netdev);
return 0;
}
u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
if (wr_flag) {
if (nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs && *dcb_en) {
nicif_err(nic_dev, drv, netdev,
"max_qps: %d is less than %d\n",
nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs);
return 1;
}
if (*dcb_en)
set_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
else
clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
/*hinic_setup_tc need get the nic_mutex lock again */
mutex_unlock(&nic_dev->nic_mutex);
/* kill the rtnl assert warning */
rtnl_lock();
err = hinic_setup_tc(netdev,
*dcb_en ? nic_dev->dcb_cfg.pg_tcs : 0);
rtnl_unlock();
mutex_lock(&nic_dev->nic_mutex);
if (!err)
nicif_info(nic_dev, drv, netdev, "%s DCB\n",
*dcb_en ? "Enable" : "Disable");
} else {
*dcb_en = (u8)test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
}
return !!err;
}
static u8 hinic_dcbnl_get_state(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
return !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
}
static u8 hinic_dcbnl_set_state(struct net_device *netdev, u8 state)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 curr_state = !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
int err = 0;
if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (state == curr_state)
return 0;
if (nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs && state) {
nicif_err(nic_dev, drv, netdev,
"max_qps: %d is less than %d\n",
nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs);
return 1;
}
err = hinic_setup_tc(netdev, state ? nic_dev->dcb_cfg.pg_tcs : 0);
if (!err)
nicif_info(nic_dev, drv, netdev, "%s DCB\n",
state ? "Enable" : "Disable");
return !!err;
}
static void hinic_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
err = hinic_get_default_mac(nic_dev->hwdev, perm_addr);
if (err)
nicif_err(nic_dev, drv, netdev, "Failed to get default mac\n");
}
void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg;
struct hinic_tc_cfg *tc_conf = nic_dev->dcb_cfg.tc_cfg;
u8 i, tc_tmp, j;
if (flag) {
/*need to clear first */
for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
cfg[i].path[HINIC_DCB_CFG_TX].up_map = 0;
cfg[i].path[HINIC_DCB_CFG_RX].up_map = 0;
}
for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
tc_tmp = tc[i];
cfg[tc_tmp].path[HINIC_DCB_CFG_TX].up_map |= (u8)BIT(i);
cfg[tc_tmp].path[HINIC_DCB_CFG_RX].up_map |= (u8)BIT(i);
cfg[tc_tmp].path[HINIC_DCB_CFG_TX].pg_id = (u8)tc_tmp;
cfg[tc_tmp].path[HINIC_DCB_CFG_RX].pg_id = (u8)tc_tmp;
}
} else {
for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
for (j = 0; j < HINIC_DCB_TC_MAX; j++) {
if (tc_conf[i].path[HINIC_DCB_CFG_TX].up_map &
(u8)BIT(j)) {
tc[j] = i;
}
}
}
}
}
void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev,
u8 percent[], bool flag)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int i;
if (flag) {
for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i] =
percent[i];
nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i] =
percent[i];
}
} else {
for (i = 0; i < HINIC_DCB_COS_MAX; i++)
percent[i] =
nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i];
}
}
static void hinic_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 pg_id, u8 bw_pct,
u8 up_map)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (tc > HINIC_DCB_TC_MAX - 1)
return;
if (prio != DCB_ATTR_VALUE_UNDEFINED)
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].prio_type = prio;
if (pg_id != DCB_ATTR_VALUE_UNDEFINED)
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].pg_id = pg_id;
if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].bw_pct = bw_pct;
/* if all priority mapping to the same tc,
* up_map is 0xFF, and it's a valid value
*/
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].up_map = up_map;
}
static void hinic_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (bwg_id > HINIC_DCB_PG_MAX - 1)
return;
nic_dev->tmp_dcb_cfg.bw_pct[0][bwg_id] = bw_pct;
}
static void hinic_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 pg_id, u8 bw_pct,
u8 up_map)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (tc > HINIC_DCB_TC_MAX - 1)
return;
if (prio != DCB_ATTR_VALUE_UNDEFINED)
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].prio_type = prio;
if (pg_id != DCB_ATTR_VALUE_UNDEFINED)
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].pg_id = pg_id;
if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].bw_pct = bw_pct;
nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].up_map = up_map;
}
static void hinic_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (bwg_id > HINIC_DCB_PG_MAX - 1)
return;
nic_dev->tmp_dcb_cfg.bw_pct[1][bwg_id] = bw_pct;
}
static void hinic_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *pg_id, u8 *bw_pct,
u8 *up_map)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (tc > HINIC_DCB_TC_MAX - 1)
return;
*prio = nic_dev->dcb_cfg.tc_cfg[tc].path[0].prio_type;
*pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[0].pg_id;
*bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[0].bw_pct;
*up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[0].up_map;
}
static void hinic_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (bwg_id > HINIC_DCB_PG_MAX - 1)
return;
*bw_pct = nic_dev->dcb_cfg.bw_pct[0][bwg_id];
}
static void hinic_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *pg_id, u8 *bw_pct,
u8 *up_map)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (tc > HINIC_DCB_TC_MAX - 1)
return;
*prio = nic_dev->dcb_cfg.tc_cfg[tc].path[1].prio_type;
*pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[1].pg_id;
*bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[1].bw_pct;
*up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[1].up_map;
}
static void hinic_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (bwg_id > HINIC_DCB_PG_MAX - 1)
return;
*bw_pct = nic_dev->dcb_cfg.bw_pct[1][bwg_id];
}
void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 i;
for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en = !!(setting & BIT(i));
if (nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en !=
nic_dev->dcb_cfg.tc_cfg[i].pfc_en) {
nic_dev->tmp_dcb_cfg.pfc_state = true;
}
}
}
void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev,
u8 *setting, bool flag)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg;
struct hinic_tc_cfg *conf = nic_dev->dcb_cfg.tc_cfg;
u8 i;
if (flag) {
for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
cfg[i].path[HINIC_DCB_CFG_TX].prio_type =
!!(*setting & BIT(i)) ? 2 : 0;
cfg[i].path[HINIC_DCB_CFG_RX].prio_type =
!!(*setting & BIT(i)) ? 2 : 0;
}
} else {
for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
*setting = *setting |
(u8)((u32)(!!(conf[i].path[0].prio_type)) << i);
}
}
}
void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev,
u8 *value, bool flag)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (flag)
nic_dev->tmp_dcb_cfg.pfc_state = !!(*value);
else
*value = nic_dev->tmp_dcb_cfg.pfc_state;
}
void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev,
u8 *value, bool flag)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (flag) {
if (*value)
set_bit(HINIC_ETS_ENABLE, &nic_dev->flags);
else
clear_bit(HINIC_ETS_ENABLE, &nic_dev->flags);
} else {
*value = (u8)test_bit(HINIC_ETS_ENABLE, &nic_dev->flags);
}
}
static void hinic_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
u8 setting)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en = !!setting;
if (nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en !=
nic_dev->dcb_cfg.tc_cfg[prio].pfc_en)
nic_dev->tmp_dcb_cfg.pfc_state = true;
}
void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 i;
for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
*setting = *setting |
(u8)((u32)(nic_dev->dcb_cfg.tc_cfg[i].pfc_en) << i);
}
}
void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
*tc_num = nic_dev->max_cos;
}
static void hinic_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
u8 *setting)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (prio > HINIC_DCB_TC_MAX - 1)
return;
*setting = nic_dev->dcb_cfg.tc_cfg[prio].pfc_en;
}
static u8 hinic_dcbnl_getcap(struct net_device *netdev, int cap_id,
u8 *dcb_cap)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
switch (cap_id) {
case DCB_CAP_ATTR_PG:
*dcb_cap = true;
break;
case DCB_CAP_ATTR_PFC:
*dcb_cap = true;
break;
case DCB_CAP_ATTR_UP2TC:
*dcb_cap = false;
break;
case DCB_CAP_ATTR_PG_TCS:
*dcb_cap = 0x80;
break;
case DCB_CAP_ATTR_PFC_TCS:
*dcb_cap = 0x80;
break;
case DCB_CAP_ATTR_GSP:
*dcb_cap = true;
break;
case DCB_CAP_ATTR_BCN:
*dcb_cap = false;
break;
case DCB_CAP_ATTR_DCBX:
*dcb_cap = nic_dev->dcbx_cap;
break;
default:
*dcb_cap = false;
break;
}
return 0;
}
static u8 hinic_sync_tc_cfg(struct hinic_tc_cfg *tc_dst,
struct hinic_tc_cfg *tc_src, int dir)
{
u8 tc_dir_change = (dir == HINIC_DCB_CFG_TX) ?
DCB_CFG_CHG_PG_TX : DCB_CFG_CHG_PG_RX;
u8 changes = 0;
if (tc_dst->path[dir].prio_type != tc_src->path[dir].prio_type) {
tc_dst->path[dir].prio_type = tc_src->path[dir].prio_type;
changes |= tc_dir_change;
}
if (tc_dst->path[dir].pg_id != tc_src->path[dir].pg_id) {
tc_dst->path[dir].pg_id = tc_src->path[dir].pg_id;
changes |= tc_dir_change;
}
if (tc_dst->path[dir].bw_pct != tc_src->path[dir].bw_pct) {
tc_dst->path[dir].bw_pct = tc_src->path[dir].bw_pct;
changes |= tc_dir_change;
}
if (tc_dst->path[dir].up_map != tc_src->path[dir].up_map) {
tc_dst->path[dir].up_map = tc_src->path[dir].up_map;
changes |= (tc_dir_change | DCB_CFG_CHG_PFC);
}
return changes;
}
static u8 hinic_sync_dcb_cfg(struct hinic_nic_dev *nic_dev)
{
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
struct hinic_dcb_config *tmp_dcb_cfg = &nic_dev->tmp_dcb_cfg;
struct hinic_tc_cfg *tc_dst, *tc_src;
u8 changes = 0;
int i;
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
tc_src = &tmp_dcb_cfg->tc_cfg[i];
tc_dst = &dcb_cfg->tc_cfg[i];
changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_TX);
changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_RX);
}
for (i = 0; i < HINIC_DCB_PG_MAX; i++) {
if (dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] !=
tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]) {
dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] =
tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
changes |= DCB_CFG_CHG_PG_TX;
}
if (dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] !=
tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i]) {
dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] =
tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i];
changes |= DCB_CFG_CHG_PG_RX;
}
}
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
if (dcb_cfg->tc_cfg[i].pfc_en !=
tmp_dcb_cfg->tc_cfg[i].pfc_en) {
dcb_cfg->tc_cfg[i].pfc_en =
tmp_dcb_cfg->tc_cfg[i].pfc_en;
changes |= DCB_CFG_CHG_PFC;
}
}
if (dcb_cfg->pfc_state != tmp_dcb_cfg->pfc_state) {
dcb_cfg->pfc_state = tmp_dcb_cfg->pfc_state;
changes |= DCB_CFG_CHG_PFC;
}
return changes;
}
static void hinic_dcb_get_pfc_map(struct hinic_nic_dev *nic_dev,
struct hinic_dcb_config *dcb_cfg, u8 *pfc_map)
{
u8 i, up;
u8 pfc_en = 0, outof_range_pfc = 0;
for (i = 0; i < dcb_cfg->pfc_tcs; i++) {
up = (HINIC_DCB_UP_MAX - 1) - i;
if (dcb_cfg->tc_cfg[up].pfc_en)
*pfc_map |= (u8)BIT(up);
}
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
up = (HINIC_DCB_UP_MAX - 1) - i;
if (dcb_cfg->tc_cfg[up].pfc_en)
pfc_en |= (u8)BIT(up);
}
*pfc_map = pfc_en & nic_dev->up_valid_bitmap;
outof_range_pfc = pfc_en & (~nic_dev->up_valid_bitmap);
if (outof_range_pfc)
hinic_info(nic_dev, drv,
"PFC setting out of range, 0x%x will be ignored\n",
outof_range_pfc);
}
static bool is_cos_in_use(u8 cos, u8 up_valid_bitmap, u8 *up_cos)
{
u32 i;
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
if (!(up_valid_bitmap & BIT(i)))
continue;
if (cos == up_cos[i])
return true;
}
return false;
}
static void hinic_dcb_adjust_up_bw(struct hinic_nic_dev *nic_dev, u8 *up_pgid,
u8 *up_bw)
{
u8 tmp_cos, pg_id;
u16 bw_all;
u8 bw_remain, cos_cnt;
for (pg_id = 0; pg_id < HINIC_DCB_PG_MAX; pg_id++) {
bw_all = 0;
cos_cnt = 0;
/* Find all up mapping to the same pg */
for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) {
if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap,
nic_dev->up_cos))
continue;
if (up_pgid[tmp_cos] == pg_id) {
bw_all += up_bw[tmp_cos];
cos_cnt++;
}
}
if (bw_all <= 100 || !cos_cnt)
continue;
/* Calculate up percent of bandwidth group, The sum of
* percentages for priorities in the same priority group
* must be 100
*/
bw_remain = 100 % cos_cnt;
for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) {
if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap,
nic_dev->up_cos))
continue;
if (up_pgid[tmp_cos] == pg_id) {
up_bw[tmp_cos] =
(u8)(100 * up_bw[tmp_cos] / bw_all +
(u8)!!bw_remain);
if (bw_remain)
bw_remain--;
}
}
}
}
static void hinic_dcb_dump_configuration(struct hinic_nic_dev *nic_dev,
u8 *up_tc, u8 *up_pgid, u8 *up_bw,
u8 *pg_bw, u8 *up_strict, u8 *bw_pct)
{
u8 i;
u8 cos;
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
if (!(nic_dev->up_valid_bitmap & BIT(i)))
continue;
cos = nic_dev->up_cos[i];
hinic_info(nic_dev, drv,
"up: %d, cos: %d, tc: %d, pgid: %d, bw: %d, tsa: %d\n",
i, cos, up_tc[cos], up_pgid[cos], up_bw[cos],
up_strict[cos]);
}
for (i = 0; i < HINIC_DCB_PG_MAX; i++)
hinic_info(nic_dev, drv, "pgid: %d, bw: %d\n", i, pg_bw[i]);
}
/* Ucode thread timeout is 210ms, must be lagger then 210ms */
#define HINIC_WAIT_PORT_IO_STOP 250
static int hinic_stop_port_traffic_flow(struct hinic_nic_dev *nic_dev)
{
int err = 0;
down(&nic_dev->dcb_sem);
if (nic_dev->disable_port_cnt++ != 0)
goto out;
err = hinic_force_port_disable(nic_dev);
if (err) {
hinic_err(nic_dev, drv, "Failed to disable port\n");
goto set_port_err;
}
err = hinic_set_port_funcs_state(nic_dev->hwdev, false);
if (err) {
hinic_err(nic_dev, drv,
"Failed to disable all functions in port\n");
goto set_port_funcs_err;
}
hinic_info(nic_dev, drv, "Stop port traffic flow\n");
goto out;
set_port_funcs_err:
hinic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev));
set_port_err:
out:
if (err)
nic_dev->disable_port_cnt--;
up(&nic_dev->dcb_sem);
return err;
}
static int hinic_start_port_traffic_flow(struct hinic_nic_dev *nic_dev)
{
int err;
down(&nic_dev->dcb_sem);
nic_dev->disable_port_cnt--;
if (nic_dev->disable_port_cnt > 0) {
up(&nic_dev->dcb_sem);
return 0;
}
nic_dev->disable_port_cnt = 0;
up(&nic_dev->dcb_sem);
err = hinic_force_set_port_state(nic_dev,
!!netif_running(nic_dev->netdev));
if (err)
hinic_err(nic_dev, drv, "Failed to disable port\n");
err = hinic_set_port_funcs_state(nic_dev->hwdev, true);
if (err)
hinic_err(nic_dev, drv,
"Failed to disable all functions in port\n");
hinic_info(nic_dev, drv, "Start port traffic flow\n");
return err;
}
static int __set_hw_cos_up_map(struct hinic_nic_dev *nic_dev)
{
u8 cos, cos_valid_bitmap, cos_up_map[HINIC_DCB_COS_MAX] = {0};
u8 i;
int err;
cos_valid_bitmap = 0;
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
if (!(nic_dev->up_valid_bitmap & BIT(i)))
continue;
cos = nic_dev->up_cos[i];
cos_up_map[cos] = i;
cos_valid_bitmap |= (u8)BIT(cos);
}
err = hinic_dcb_set_cos_up_map(nic_dev->hwdev, cos_valid_bitmap,
cos_up_map);
if (err) {
hinic_info(nic_dev, drv, "Set cos_up map failed\n");
return err;
}
return 0;
}
static int __set_hw_ets(struct hinic_nic_dev *nic_dev)
{
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
struct hinic_tc_attr *tc_attr;
u8 up_tc[HINIC_DCB_UP_MAX] = {0};
u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
u8 up_bw[HINIC_DCB_UP_MAX] = {0};
u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
u8 up_strict[HINIC_DCB_UP_MAX] = {0};
u8 i, tc, cos;
int err;
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
if (!(nic_dev->up_valid_bitmap & BIT(i)))
continue;
cos = nic_dev->up_cos[i];
if ((nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) {
up_tc[cos] = my_ets->prio_tc[i];
up_pgid[cos] = my_ets->prio_tc[i];
up_bw[cos] = 100;
up_strict[i] =
(my_ets->tc_tsa[cos] == IEEE8021Q_TSA_STRICT) ?
HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
} else {
tc = hinic_dcb_get_tc(dcb_cfg, HINIC_DCB_CFG_TX, i);
tc_attr = &dcb_cfg->tc_cfg[tc].path[HINIC_DCB_CFG_TX];
up_tc[cos] = tc;
up_pgid[cos] = tc_attr->pg_id;
up_bw[cos] = tc_attr->bw_pct;
up_strict[cos] = tc_attr->prio_type ?
HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
}
}
hinic_dcb_adjust_up_bw(nic_dev, up_pgid, up_bw);
if (nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
for (i = 0; i < HINIC_DCB_PG_MAX; i++)
pg_bw[i] = my_ets->tc_tx_bw[i];
} else {
for (i = 0; i < HINIC_DCB_PG_MAX; i++)
pg_bw[i] = dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
}
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
hinic_dcb_dump_configuration(nic_dev, up_tc, up_pgid,
up_bw, pg_bw, up_strict,
pg_bw);
err = hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, up_pgid,
up_bw, up_strict);
if (err) {
hinic_err(nic_dev, drv, "Failed to set ets with mode: %d\n",
nic_dev->dcbx_cap);
return err;
}
hinic_info(nic_dev, drv, "Set ets to hw done with mode: %d\n",
nic_dev->dcbx_cap);
return 0;
}
u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 state = DCB_HW_CFG_CHG;
int err;
nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev);
if (!nic_dev->dcb_changes)
return DCB_HW_CFG_CHG;
err = hinic_stop_port_traffic_flow(nic_dev);
if (err)
return DCB_HW_CFG_ERR;
/* wait all traffic flow stopped */
if (netdev->reg_state == NETREG_REGISTERED)
msleep(HINIC_WAIT_PORT_IO_STOP);
if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) {
err = __set_hw_cos_up_map(nic_dev);
if (err) {
hinic_info(nic_dev, drv,
"Set cos_up map to hardware failed\n");
state = DCB_HW_CFG_ERR;
goto out;
}
nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS);
}
if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) {
err = __set_hw_ets(nic_dev);
if (err) {
state = DCB_HW_CFG_ERR;
goto out;
}
nic_dev->dcb_changes &=
(~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX));
}
out:
hinic_start_port_traffic_flow(nic_dev);
return state;
}
static int hinic_dcbnl_set_df_ieee_cfg(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct ieee_ets *ets_default = &nic_dev->hinic_ieee_ets_default;
struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
struct ieee_pfc pfc = {0};
int err1 = 0;
int err2 = 0;
u8 flag = 0;
if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return 0;
if (memcmp(my_ets, ets_default, sizeof(struct ieee_ets)))
flag |= (u8)BIT(0);
if (my_pfc->pfc_en)
flag |= (u8)BIT(1);
if (!flag)
return 0;
err1 = hinic_stop_port_traffic_flow(nic_dev);
if (err1)
return err1;
if (netdev->reg_state == NETREG_REGISTERED)
msleep(HINIC_WAIT_PORT_IO_STOP);
if (flag & BIT(0)) {
memcpy(my_ets, ets_default, sizeof(struct ieee_ets));
err1 = __set_hw_ets(nic_dev);
}
if (flag & BIT(1)) {
my_pfc->pfc_en = 0;
err2 = hinic_dcb_set_pfc(nic_dev->hwdev, false, pfc.pfc_en);
if (err2)
nicif_err(nic_dev, drv, netdev, "Failed to set pfc\n");
}
hinic_start_port_traffic_flow(nic_dev);
return (err1 || err2) ? -EINVAL : 0;
}
u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
u8 state = DCB_HW_CFG_CHG;
int err;
nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev);
if (!nic_dev->dcb_changes)
return DCB_HW_CFG_CHG;
if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) {
u8 pfc_map = 0;
hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map);
err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state,
pfc_map);
if (err) {
hinic_info(nic_dev, drv, "Failed to %s PFC\n",
dcb_cfg->pfc_state ? "enable" : "disable");
state = DCB_HW_CFG_ERR;
goto out;
}
if (dcb_cfg->pfc_state)
hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n",
pfc_map);
else
hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n");
nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC);
}
out:
return state;
}
u8 hinic_dcbnl_set_all(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
u8 state = DCB_HW_CFG_CHG;
int err;
if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return DCB_HW_CFG_ERR;
nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev);
if (!nic_dev->dcb_changes)
return DCB_HW_CFG_NO_CHG;
err = hinic_stop_port_traffic_flow(nic_dev);
if (err)
return DCB_HW_CFG_ERR;
/* wait all traffic flow stopped */
if (netdev->reg_state == NETREG_REGISTERED)
msleep(HINIC_WAIT_PORT_IO_STOP);
if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) {
err = __set_hw_cos_up_map(nic_dev);
if (err) {
hinic_info(nic_dev, drv,
"Set cos_up map to hardware failed\n");
state = DCB_HW_CFG_ERR;
goto out;
}
nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS);
}
if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) {
err = __set_hw_ets(nic_dev);
if (err) {
state = DCB_HW_CFG_ERR;
goto out;
}
nic_dev->dcb_changes &=
(~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX));
}
if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) {
u8 pfc_map = 0;
hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map);
err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state,
pfc_map);
if (err) {
hinic_info(nic_dev, drv, "Failed to %s PFC\n",
dcb_cfg->pfc_state ? "enable" : "disable");
state = DCB_HW_CFG_ERR;
goto out;
}
if (dcb_cfg->pfc_state)
hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n",
pfc_map);
else
hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n");
nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC);
}
out:
hinic_start_port_traffic_flow(nic_dev);
return state;
}
static int hinic_dcbnl_ieee_get_ets(struct net_device *netdev,
struct ieee_ets *ets)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
ets->ets_cap = my_ets->ets_cap;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
return 0;
}
static int hinic_dcbnl_ieee_set_ets(struct net_device *netdev,
struct ieee_ets *ets)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
struct ieee_ets back_ets;
int err, i;
u8 max_tc = 0;
u16 total_bw = 0;
if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (!memcmp(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)) &&
!memcmp(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)) &&
!memcmp(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)) &&
!memcmp(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)))
return 0;
for (i = 0; i < HINIC_DCB_TC_MAX; i++)
total_bw += ets->tc_tx_bw[i];
if (!total_bw)
return -EINVAL;
for (i = 0; i < dcb_cfg->pg_tcs; i++) {
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
}
if (max_tc)
max_tc++;
if (max_tc > dcb_cfg->pg_tcs)
return -EINVAL;
max_tc = max_tc ? dcb_cfg->pg_tcs : 0;
memcpy(&back_ets, my_ets, sizeof(struct ieee_ets));
memcpy(my_ets->tc_tx_bw, ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(my_ets->tc_rx_bw, ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
memcpy(my_ets->prio_tc, ets->prio_tc, sizeof(ets->prio_tc));
memcpy(my_ets->tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
if (max_tc != netdev_get_num_tc(netdev)) {
err = hinic_setup_tc(netdev, max_tc);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to setup tc with max_tc: %d, err: %d\n",
max_tc, err);
memcpy(my_ets, &back_ets, sizeof(struct ieee_ets));
return err;
}
}
err = hinic_stop_port_traffic_flow(nic_dev);
if (err)
return err;
if (netdev->reg_state == NETREG_REGISTERED)
msleep(HINIC_WAIT_PORT_IO_STOP);
err = __set_hw_ets(nic_dev);
hinic_start_port_traffic_flow(nic_dev);
return err;
}
static int hinic_dcbnl_ieee_get_pfc(struct net_device *netdev,
struct ieee_pfc *pfc)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
pfc->pfc_en = my_pfc->pfc_en;
pfc->pfc_cap = my_pfc->pfc_cap;
return 0;
}
static int hinic_dcbnl_ieee_set_pfc(struct net_device *netdev,
struct ieee_pfc *pfc)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
int err, i;
u8 pfc_map, max_tc;
u8 outof_range_pfc = 0;
bool pfc_en;
if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (my_pfc->pfc_en == pfc->pfc_en)
return 0;
pfc_map = pfc->pfc_en & nic_dev->up_valid_bitmap;
outof_range_pfc = pfc->pfc_en & (~nic_dev->up_valid_bitmap);
if (outof_range_pfc)
nicif_info(nic_dev, drv, netdev,
"pfc setting out of range, 0x%x will be ignored\n",
outof_range_pfc);
err = hinic_stop_port_traffic_flow(nic_dev);
if (err)
return err;
if (netdev->reg_state == NETREG_REGISTERED)
msleep(HINIC_WAIT_PORT_IO_STOP);
pfc_en = pfc_map ? true : false;
max_tc = 0;
for (i = 0; i < dcb_cfg->pg_tcs; i++) {
if (my_ets->prio_tc[i] > max_tc)
max_tc = my_ets->prio_tc[i];
}
pfc_en = max_tc ? pfc_en : false;
err = hinic_dcb_set_pfc(nic_dev->hwdev, pfc_en, pfc_map);
if (err) {
hinic_info(nic_dev, drv,
"Failed to set pfc to hw with pfc_map: 0x%x err: %d\n",
pfc_map, err);
hinic_start_port_traffic_flow(nic_dev);
return err;
}
hinic_start_port_traffic_flow(nic_dev);
my_pfc->pfc_en = pfc->pfc_en;
hinic_info(nic_dev, drv,
"Set pfc successfully with pfc_map: 0x%x, pfc_en: %d\n",
pfc_map, pfc_en);
return 0;
}
static int hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
return -EINVAL;
switch (tcid) {
case DCB_NUMTCS_ATTR_PG:
*num = dcb_cfg->pg_tcs;
break;
case DCB_NUMTCS_ATTR_PFC:
*num = dcb_cfg->pfc_tcs;
break;
default:
return -EINVAL;
}
return 0;
}
static int hinic_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
{
return -EINVAL;
}
static u8 hinic_dcbnl_getpfcstate(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
return (u8)nic_dev->dcb_cfg.pfc_state;
}
static void hinic_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
nic_dev->tmp_dcb_cfg.pfc_state = !!state;
}
static u8 hinic_dcbnl_getdcbx(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
return nic_dev->dcbx_cap;
}
static u8 hinic_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err;
if (((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
((mode & DCB_CAP_DCBX_LLD_MANAGED) &&
(!(mode & DCB_CAP_DCBX_HOST)))) {
nicif_info(nic_dev, drv, netdev,
"Set dcbx failed with invalid mode: %d\n", mode);
return 1;
}
if (nic_dev->dcbx_cap == mode)
return 0;
nic_dev->dcbx_cap = mode;
if (mode & DCB_CAP_DCBX_VER_CEE) {
u8 mask = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX |
DCB_CFG_CHG_PG_RX;
nic_dev->dcb_changes |= mask;
hinic_dcbnl_set_all(netdev);
} else if (mode & DCB_CAP_DCBX_VER_IEEE) {
if (netdev_get_num_tc(netdev)) {
err = hinic_setup_tc(netdev, 0);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to setup tc with mode: %d\n",
mode);
return 1;
}
}
hinic_dcbnl_set_df_ieee_cfg(netdev);
hinic_force_port_relink(nic_dev->hwdev);
} else {
err = hinic_setup_tc(netdev, 0);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to setup tc with mode: %d\n", mode);
return 1;
}
}
nicif_info(nic_dev, drv, netdev, "Change dcbx mode to 0x%x\n", mode);
return 0;
}
const struct dcbnl_rtnl_ops hinic_dcbnl_ops = {
/* IEEE 802.1Qaz std */
.ieee_getets = hinic_dcbnl_ieee_get_ets,
.ieee_setets = hinic_dcbnl_ieee_set_ets,
.ieee_getpfc = hinic_dcbnl_ieee_get_pfc,
.ieee_setpfc = hinic_dcbnl_ieee_set_pfc,
/* CEE std */
.getstate = hinic_dcbnl_get_state,
.setstate = hinic_dcbnl_set_state,
.getpermhwaddr = hinic_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = hinic_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = hinic_dcbnl_set_pg_bwg_cfg_tx,
.setpgtccfgrx = hinic_dcbnl_set_pg_tc_cfg_rx,
.setpgbwgcfgrx = hinic_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = hinic_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = hinic_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = hinic_dcbnl_get_pg_tc_cfg_rx,
.getpgbwgcfgrx = hinic_dcbnl_get_pg_bwg_cfg_rx,
.setpfccfg = hinic_dcbnl_set_pfc_cfg,
.getpfccfg = hinic_dcbnl_get_pfc_cfg,
.setall = hinic_dcbnl_set_all,
.getcap = hinic_dcbnl_getcap,
.getnumtcs = hinic_dcbnl_getnumtcs,
.setnumtcs = hinic_dcbnl_setnumtcs,
.getpfcstate = hinic_dcbnl_getpfcstate,
.setpfcstate = hinic_dcbnl_setpfcstate,
/* DCBX configuration */
.getdcbx = hinic_dcbnl_getdcbx,
.setdcbx = hinic_dcbnl_setdcbx,
};
int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
u8 state;
hinic_dcb_config_init(nic_dev, &nic_dev->tmp_dcb_cfg);
state = hinic_dcbnl_set_all(netdev);
if (state == DCB_HW_CFG_ERR)
return -EFAULT;
if (state == DCB_HW_CFG_CHG)
hinic_info(nic_dev, drv,
"Reset hardware DCB configuration done\n");
return 0;
}
void hinic_configure_dcb(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err;
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->save_dcb_cfg,
sizeof(nic_dev->tmp_dcb_cfg));
hinic_dcbnl_set_all(netdev);
} else {
memcpy(&nic_dev->save_dcb_cfg, &nic_dev->tmp_dcb_cfg,
sizeof(nic_dev->save_dcb_cfg));
err = hinic_dcb_reset_hw_config(nic_dev);
if (err)
nicif_warn(nic_dev, drv, netdev,
"Failed to reset hw dcb configuration\n");
}
}
static bool __is_cos_up_map_change(struct hinic_nic_dev *nic_dev, u8 *cos_up)
{
u8 cos, up;
for (cos = 0; cos < nic_dev->max_cos; cos++) {
up = cos_up[cos];
if (BIT(up) != (nic_dev->up_valid_bitmap & BIT(up)))
return true;
}
return false;
}
int __set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up)
{
struct net_device *netdev;
u8 state;
int err = 0;
if (!nic_dev || !cos_up)
return -EINVAL;
netdev = nic_dev->netdev;
if (test_and_set_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) {
nicif_err(nic_dev, drv, netdev,
"Cos_up map setting in inprocess, please try again later\n");
return -EFAULT;
}
nicif_info(nic_dev, drv, netdev, "Set cos2up: %d%d%d%d%d%d%d%d\n",
cos_up[0], cos_up[1], cos_up[2], cos_up[3],
cos_up[4], cos_up[5], cos_up[6], cos_up[7]);
if (!__is_cos_up_map_change(nic_dev, cos_up)) {
nicif_err(nic_dev, drv, netdev,
"Same mapping, don't need to change anything\n");
err = 0;
goto out;
}
err = hinic_set_up_cos_map(nic_dev, nic_dev->max_cos, cos_up);
if (err) {
err = -EFAULT;
goto out;
}
nic_dev->dcb_changes = DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX |
DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS;
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
/* Change map in kernel */
hinic_set_prio_tc_map(nic_dev);
state = hinic_dcbnl_set_all(netdev);
if (state == DCB_HW_CFG_ERR) {
nicif_err(nic_dev, drv, netdev,
"Reconfig dcb to hw failed\n");
err = -EFAULT;
}
}
out:
clear_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags);
return err;
}
int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos)
{
if (!nic_dev || !num_cos)
return -EINVAL;
*num_cos = nic_dev->max_cos;
return 0;
}
int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *num_cos,
u8 *cos_up)
{
u8 up, cos;
if (!nic_dev || !cos_up)
return -EINVAL;
for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) {
for (up = 0; up < HINIC_DCB_UP_MAX; up++) {
if (!(nic_dev->up_valid_bitmap & BIT(up)))
continue;
if (nic_dev->up_cos[up] == cos ||
nic_dev->up_cos[up] == nic_dev->default_cos_id)
cos_up[cos] = up;
}
}
*num_cos = nic_dev->max_cos;
return 0;
}
static int __stop_port_flow(void *uld_array[], u32 num_dev)
{
struct hinic_nic_dev *tmp_dev;
u32 i, idx;
int err;
for (idx = 0; idx < num_dev; idx++) {
tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
err = hinic_stop_port_traffic_flow(tmp_dev);
if (err) {
nicif_err(tmp_dev, drv, tmp_dev->netdev,
"Stop port traffic flow failed\n");
goto stop_port_err;
}
}
/* wait all traffic flow stopped */
msleep(HINIC_WAIT_PORT_IO_STOP);
return 0;
stop_port_err:
for (i = 0; i < idx; i++) {
tmp_dev = (struct hinic_nic_dev *)uld_array[i];
hinic_start_port_traffic_flow(tmp_dev);
}
return err;
}
static void __start_port_flow(void *uld_array[], u32 num_dev)
{
struct hinic_nic_dev *tmp_dev;
u32 idx;
for (idx = 0; idx < num_dev; idx++) {
tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
hinic_start_port_traffic_flow(tmp_dev);
}
}
/* for hinicadm tool, need to chang all port of the chip */
int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up)
{
void *uld_array[HINIC_MAX_PF_NUM];
struct hinic_nic_dev *tmp_dev;
u8 num_cos, old_cos_up[HINIC_DCB_COS_MAX] = {0};
u32 i, idx, num_dev = 0;
int err, rollback_err;
/* Save old map, in case of set failed */
err = hinic_get_cos_up_map(nic_dev, &num_cos, old_cos_up);
if (err || !num_cos) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Get old cos_up map failed\n");
return -EFAULT;
}
if (!memcmp(cos_up, old_cos_up, sizeof(u8) * num_cos)) {
nicif_info(nic_dev, drv, nic_dev->netdev,
"Same cos2up map, don't need to change anything\n");
return 0;
}
/* Get all pf of this chip */
err = hinic_get_pf_uld_array(nic_dev->pdev, &num_dev, uld_array);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Get all pf private handle failed\n");
return -EFAULT;
}
err = __stop_port_flow(uld_array, num_dev);
if (err)
return -EFAULT;
for (idx = 0; idx < num_dev; idx++) {
tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
err = __set_cos_up_map(tmp_dev, cos_up);
if (err) {
nicif_err(tmp_dev, drv, tmp_dev->netdev,
"Set cos_up map to hw failed\n");
goto set_err;
}
}
__start_port_flow(uld_array, num_dev);
hinic_set_chip_cos_up_map(nic_dev->pdev, cos_up);
return 0;
set_err:
/* undo all settings */
for (i = 0; i < idx; i++) {
tmp_dev = (struct hinic_nic_dev *)uld_array[i];
rollback_err = __set_cos_up_map(tmp_dev, old_cos_up);
if (rollback_err)
nicif_err(tmp_dev, drv, tmp_dev->netdev,
"Undo cos_up map to hw failed\n");
}
__start_port_flow(uld_array, num_dev);
return err;
}
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_DCB_H_
#define HINIC_DCB_H_
#define HINIC_DCB_CFG_TX 0
#define HINIC_DCB_CFG_RX 1
/* IEEE8021QAZ Transmission selection algorithm identifiers */
#define IEEE8021Q_TSA_STRICT 0x0
#define IEEE8021Q_TSA_CBSHAPER 0x1
#define IEEE8021Q_TSA_ETS 0x2
#define IEEE8021Q_TSA_VENDOR 0xFF
enum HINIC_DCB_FLAGS {
HINIC_DCB_UP_COS_SETTING,
HINIC_DCB_TRAFFIC_STOPPED,
};
extern const struct dcbnl_rtnl_ops hinic_dcbnl_ops;
u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up);
int hinic_dcb_init(struct hinic_nic_dev *nic_dev);
int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev);
int hinic_setup_tc(struct net_device *netdev, u8 tc);
void hinic_configure_dcb(struct net_device *netdev);
int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up);
int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos);
int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev,
u8 *num_cos, u8 *cos_up);
u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag);
void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev,
u8 *value, bool flag);
void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting);
void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting);
u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev);
void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num);
void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag);
void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev,
u8 percent[], bool flag);
void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev,
u8 *value, bool flag);
void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev,
u8 *setting, bool flag);
u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include "ossl_knl.h"
#include "hinic_hw_mgmt.h"
#include "hinic_hw.h"
#include "hinic_nic_cfg.h"
#include "hinic_nic_dev.h"
#include "hinic_dfx_def.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
#include "hinic_qp.h"
#ifndef SET_ETHTOOL_OPS
#define SET_ETHTOOL_OPS(netdev, ops) \
((netdev)->ethtool_ops = (ops))
#endif
struct hinic_stats {
char name[ETH_GSTRING_LEN];
u32 size;
int offset;
};
#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
#define HINIC_NETDEV_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct rtnl_link_stats64, _stat_item), \
.offset = offsetof(struct rtnl_link_stats64, _stat_item) \
}
static struct hinic_stats hinic_netdev_stats[] = {
HINIC_NETDEV_STAT(rx_packets),
HINIC_NETDEV_STAT(tx_packets),
HINIC_NETDEV_STAT(rx_bytes),
HINIC_NETDEV_STAT(tx_bytes),
HINIC_NETDEV_STAT(rx_errors),
HINIC_NETDEV_STAT(tx_errors),
HINIC_NETDEV_STAT(rx_dropped),
HINIC_NETDEV_STAT(tx_dropped),
HINIC_NETDEV_STAT(multicast),
HINIC_NETDEV_STAT(collisions),
HINIC_NETDEV_STAT(rx_length_errors),
HINIC_NETDEV_STAT(rx_over_errors),
HINIC_NETDEV_STAT(rx_crc_errors),
HINIC_NETDEV_STAT(rx_frame_errors),
HINIC_NETDEV_STAT(rx_fifo_errors),
HINIC_NETDEV_STAT(rx_missed_errors),
HINIC_NETDEV_STAT(tx_aborted_errors),
HINIC_NETDEV_STAT(tx_carrier_errors),
HINIC_NETDEV_STAT(tx_fifo_errors),
HINIC_NETDEV_STAT(tx_heartbeat_errors),
};
#define HINIC_NIC_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct hinic_nic_stats, _stat_item), \
.offset = offsetof(struct hinic_nic_stats, _stat_item) \
}
static struct hinic_stats hinic_nic_dev_stats[] = {
HINIC_NIC_STAT(netdev_tx_timeout),
};
static struct hinic_stats hinic_nic_dev_stats_extern[] = {
HINIC_NIC_STAT(tx_carrier_off_drop),
HINIC_NIC_STAT(tx_invalid_qid),
};
#define HINIC_RXQ_STAT(_stat_item) { \
.name = "rxq%d_"#_stat_item, \
.size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
.offset = offsetof(struct hinic_rxq_stats, _stat_item) \
}
#define HINIC_TXQ_STAT(_stat_item) { \
.name = "txq%d_"#_stat_item, \
.size = sizeof_field(struct hinic_txq_stats, _stat_item), \
.offset = offsetof(struct hinic_txq_stats, _stat_item) \
}
/*lint -save -e786*/
static struct hinic_stats hinic_rx_queue_stats[] = {
HINIC_RXQ_STAT(packets),
HINIC_RXQ_STAT(bytes),
HINIC_RXQ_STAT(errors),
HINIC_RXQ_STAT(csum_errors),
HINIC_RXQ_STAT(other_errors),
HINIC_RXQ_STAT(dropped),
HINIC_RXQ_STAT(rx_buf_empty),
};
static struct hinic_stats hinic_rx_queue_stats_extern[] = {
HINIC_RXQ_STAT(alloc_skb_err),
HINIC_RXQ_STAT(alloc_rx_buf_err),
HINIC_RXQ_STAT(map_rx_buf_err),
};
static struct hinic_stats hinic_tx_queue_stats[] = {
HINIC_TXQ_STAT(packets),
HINIC_TXQ_STAT(bytes),
HINIC_TXQ_STAT(busy),
HINIC_TXQ_STAT(wake),
HINIC_TXQ_STAT(dropped),
HINIC_TXQ_STAT(big_frags_pkts),
HINIC_TXQ_STAT(big_udp_pkts),
};
static struct hinic_stats hinic_tx_queue_stats_extern[] = {
HINIC_TXQ_STAT(ufo_pkt_unsupport),
HINIC_TXQ_STAT(ufo_linearize_err),
HINIC_TXQ_STAT(ufo_alloc_skb_err),
HINIC_TXQ_STAT(skb_pad_err),
HINIC_TXQ_STAT(frag_len_overflow),
HINIC_TXQ_STAT(offload_cow_skb_err),
HINIC_TXQ_STAT(alloc_cpy_frag_err),
HINIC_TXQ_STAT(map_cpy_frag_err),
HINIC_TXQ_STAT(map_frag_err),
HINIC_TXQ_STAT(frag_size_err),
HINIC_TXQ_STAT(unknown_tunnel_pkt),
};/*lint -restore*/
#define HINIC_FUNC_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct hinic_vport_stats, _stat_item), \
.offset = offsetof(struct hinic_vport_stats, _stat_item) \
}
static struct hinic_stats hinic_function_stats[] = {
HINIC_FUNC_STAT(tx_unicast_pkts_vport),
HINIC_FUNC_STAT(tx_unicast_bytes_vport),
HINIC_FUNC_STAT(tx_multicast_pkts_vport),
HINIC_FUNC_STAT(tx_multicast_bytes_vport),
HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
HINIC_FUNC_STAT(rx_unicast_pkts_vport),
HINIC_FUNC_STAT(rx_unicast_bytes_vport),
HINIC_FUNC_STAT(rx_multicast_pkts_vport),
HINIC_FUNC_STAT(rx_multicast_bytes_vport),
HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
HINIC_FUNC_STAT(tx_discard_vport),
HINIC_FUNC_STAT(rx_discard_vport),
HINIC_FUNC_STAT(tx_err_vport),
HINIC_FUNC_STAT(rx_err_vport),
};
#define HINIC_PORT_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
}
static struct hinic_stats hinic_port_stats[] = {
HINIC_PORT_STAT(mac_rx_total_pkt_num),
HINIC_PORT_STAT(mac_rx_total_oct_num),
HINIC_PORT_STAT(mac_rx_bad_pkt_num),
HINIC_PORT_STAT(mac_rx_bad_oct_num),
HINIC_PORT_STAT(mac_rx_good_pkt_num),
HINIC_PORT_STAT(mac_rx_good_oct_num),
HINIC_PORT_STAT(mac_rx_uni_pkt_num),
HINIC_PORT_STAT(mac_rx_multi_pkt_num),
HINIC_PORT_STAT(mac_rx_broad_pkt_num),
HINIC_PORT_STAT(mac_tx_total_pkt_num),
HINIC_PORT_STAT(mac_tx_total_oct_num),
HINIC_PORT_STAT(mac_tx_bad_pkt_num),
HINIC_PORT_STAT(mac_tx_bad_oct_num),
HINIC_PORT_STAT(mac_tx_good_pkt_num),
HINIC_PORT_STAT(mac_tx_good_oct_num),
HINIC_PORT_STAT(mac_tx_uni_pkt_num),
HINIC_PORT_STAT(mac_tx_multi_pkt_num),
HINIC_PORT_STAT(mac_tx_broad_pkt_num),
HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
HINIC_PORT_STAT(mac_rx_pause_num),
HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
HINIC_PORT_STAT(mac_rx_control_pkt_num),
HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
HINIC_PORT_STAT(mac_tx_pause_num),
HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
HINIC_PORT_STAT(mac_tx_control_pkt_num),
HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
};
u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev)
{
return ARRAY_LEN(hinic_nic_dev_stats) +
ARRAY_LEN(hinic_nic_dev_stats_extern) +
(ARRAY_LEN(hinic_tx_queue_stats) +
ARRAY_LEN(hinic_tx_queue_stats_extern) +
ARRAY_LEN(hinic_rx_queue_stats) +
ARRAY_LEN(hinic_rx_queue_stats_extern)) * nic_dev->max_qps;
}
#define GET_VALUE_OF_PTR(size, ptr) ( \
(size) == sizeof(u64) ? *(u64 *)(ptr) : \
(size) == sizeof(u32) ? *(u32 *)(ptr) : \
(size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \
)
#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) { \
int j; \
for (j = 0; j < ARRAY_LEN(array); j++) { \
memcpy((items)[item_idx].name, (array)[j].name, \
HINIC_SHOW_ITEM_LEN); \
(items)[item_idx].hexadecimal = 0; \
(items)[item_idx].value = \
GET_VALUE_OF_PTR((array)[j].size, \
(char *)(stats_ptr) + (array)[j].offset); \
item_idx++; \
} \
}
#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) { \
int j, err; \
for (j = 0; j < ARRAY_LEN(array); j++) { \
memcpy((items)[item_idx].name, (array)[j].name, \
HINIC_SHOW_ITEM_LEN); \
err = snprintf((items)[item_idx].name, HINIC_SHOW_ITEM_LEN,\
(array)[j].name, (qid)); \
if (err <= 0 || err >= HINIC_SHOW_ITEM_LEN) \
pr_err("Failed snprintf: func_ret(%d), dest_len(%d)\n",\
err, HINIC_SHOW_ITEM_LEN); \
(items)[item_idx].hexadecimal = 0; \
(items)[item_idx].value = \
GET_VALUE_OF_PTR((array)[j].size, \
(char *)(stats_ptr) + (array)[j].offset); \
item_idx++; \
} \
}
void hinic_get_io_stats(struct hinic_nic_dev *nic_dev,
struct hinic_show_item *items)
{
int item_idx = 0;
u16 qid;
DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats, &nic_dev->stats);
DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats_extern,
&nic_dev->stats);
for (qid = 0; qid < nic_dev->max_qps; qid++) {
QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats,
&nic_dev->txqs[qid].txq_stats, qid);
QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats_extern,
&nic_dev->txqs[qid].txq_stats, qid);
}
for (qid = 0; qid < nic_dev->max_qps; qid++) {
QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats,
&nic_dev->rxqs[qid].rxq_stats, qid);
QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats_extern,
&nic_dev->rxqs[qid].rxq_stats, qid);
}
}
#define LP_DEFAULT_TIME 5 /* seconds */
#define LP_PKT_LEN 1514
#define OBJ_STR_MAX_LEN 32
#define SET_LINK_STR_MAX_LEN 128
#define PORT_DOWN_ERR_IDX 0
enum diag_test_index {
INTERNAL_LP_TEST = 0,
EXTERNAL_LP_TEST = 1,
DIAG_TEST_MAX = 2,
};
static char hinic_test_strings[][ETH_GSTRING_LEN] = {
"Internal lb test (on/offline)",
"External lb test (external_lb)",
};
struct hw2ethtool_link_mode {
enum ethtool_link_mode_bit_indices link_mode_bit;
u32 speed;
enum hinic_link_mode hw_link_mode;
};
static struct hw2ethtool_link_mode
hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
{
.link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
.speed = SPEED_10000,
.hw_link_mode = HINIC_10GE_BASE_KR,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
.speed = SPEED_40000,
.hw_link_mode = HINIC_40GE_BASE_KR4,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
.speed = SPEED_40000,
.hw_link_mode = HINIC_40GE_BASE_CR4,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
.speed = SPEED_100000,
.hw_link_mode = HINIC_100GE_BASE_KR4,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
.speed = SPEED_100000,
.hw_link_mode = HINIC_100GE_BASE_CR4,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
.speed = SPEED_25000,
.hw_link_mode = HINIC_25GE_BASE_KR_S,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
.speed = SPEED_25000,
.hw_link_mode = HINIC_25GE_BASE_CR_S,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
.speed = SPEED_25000,
.hw_link_mode = HINIC_25GE_BASE_KR,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
.speed = SPEED_25000,
.hw_link_mode = HINIC_25GE_BASE_CR,
},
{
.link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = SPEED_1000,
.hw_link_mode = HINIC_GE_BASE_KX,
},
};
u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
SPEED_10, SPEED_100,
SPEED_1000, SPEED_10000,
SPEED_25000, SPEED_40000,
SPEED_100000
};
static int hinic_ethtool_to_hw_speed_level(u32 speed)
{
int i;
for (i = 0; i < LINK_SPEED_LEVELS; i++) {
if (hw_to_ethtool_speed[i] == speed)
break;
}
return i;
}
static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
{
int i = 0;
for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
break;
}
return i;
}
static int hinic_is_support_speed(enum hinic_link_mode supported_link,
u32 speed)
{
enum hinic_link_mode link_mode;
int idx;
for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
if (!(supported_link & ((u32)1 << link_mode)))
continue;
idx = hinic_get_link_mode_index(link_mode);
if (idx >= HINIC_LINK_MODE_NUMBERS)
continue;
if (hw_to_ethtool_link_mode_table[idx].speed == speed)
return 1;
}
return 0;
}
#define GET_SUPPORTED_MODE 0
#define GET_ADVERTISED_MODE 1
struct cmd_link_settings {
u64 supported;
u64 advertising;
u32 speed;
u8 duplex;
u8 port;
u8 autoneg;
};
#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
((ecmd)->supported |= \
(1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
((ecmd)->advertising |= \
(1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
((ecmd)->supported |= SUPPORTED_##mode)
#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
((ecmd)->advertising |= ADVERTISED_##mode)
#define ETHTOOL_TEST_LINK_MODE_SUPPORTED(ecmd, mode) \
((ecmd)->supported & SUPPORTED_##Autoneg)
static void hinic_link_port_type(struct cmd_link_settings *link_settings,
enum hinic_port_type port_type)
{
switch (port_type) {
case HINIC_PORT_ELEC:
case HINIC_PORT_TP:
ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
link_settings->port = PORT_TP;
break;
case HINIC_PORT_AOC:
case HINIC_PORT_FIBRE:
ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
link_settings->port = PORT_FIBRE;
break;
case HINIC_PORT_COPPER:
ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
link_settings->port = PORT_DA;
break;
case HINIC_PORT_BACKPLANE:
ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
link_settings->port = PORT_NONE;
break;
default:
link_settings->port = PORT_OTHER;
break;
}
}
static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
enum hinic_link_mode hw_link_mode,
u32 name)
{
enum hinic_link_mode link_mode;
int idx = 0;
for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
if (hw_link_mode & ((u32)1 << link_mode)) {
idx = hinic_get_link_mode_index(link_mode);
if (idx >= HINIC_LINK_MODE_NUMBERS)
continue;
if (name == GET_SUPPORTED_MODE)
ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
(link_settings, idx);
else
ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
(link_settings, idx);
}
}
}
static int hinic_link_speed_set(struct hinic_nic_dev *nic_dev,
struct cmd_link_settings *link_settings,
struct nic_port_info *port_info)
{
struct net_device *netdev = nic_dev->netdev;
enum hinic_link_mode supported_link = 0, advertised_link = 0;
u8 link_state = 0;
int err;
err = hinic_get_link_mode(nic_dev->hwdev,
&supported_link, &advertised_link);
if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
advertised_link == HINIC_SUPPORTED_UNKNOWN) {
nicif_err(nic_dev, drv, netdev, "Failed to get supported link modes\n");
return err;
}
hinic_add_ethtool_link_mode(link_settings, supported_link,
GET_SUPPORTED_MODE);
hinic_add_ethtool_link_mode(link_settings, advertised_link,
GET_ADVERTISED_MODE);
err = hinic_get_link_state(nic_dev->hwdev, &link_state);
if (!err && link_state) {
link_settings->speed = port_info->speed < LINK_SPEED_LEVELS ?
hw_to_ethtool_speed[port_info->speed] :
(u32)SPEED_UNKNOWN;
link_settings->duplex = port_info->duplex;
} else {
link_settings->speed = (u32)SPEED_UNKNOWN;
link_settings->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static int get_link_settings(struct net_device *netdev,
struct cmd_link_settings *link_settings)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct nic_port_info port_info = {0};
struct nic_pause_config nic_pause = {0};
int err;
err = hinic_get_port_info(nic_dev->hwdev, &port_info);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to get port info\n");
return err;
}
err = hinic_link_speed_set(nic_dev, link_settings, &port_info);
if (err)
return err;
hinic_link_port_type(link_settings, port_info.port_type);
link_settings->autoneg = port_info.autoneg_state;
if (port_info.autoneg_cap)
ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg);
if (port_info.autoneg_state)
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg);
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to get pauseparam from hw\n");
return err;
}
ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause);
if (nic_pause.rx_pause && nic_pause.tx_pause) {
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause);
} else if (nic_pause.tx_pause) {
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings,
Asym_Pause);
} else if (nic_pause.rx_pause) {
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause);
ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings,
Asym_Pause);
}
}
return 0;
}
static int hinic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct cmd_link_settings settings = {0};
struct ethtool_link_settings *base = &cmd->base;
int err;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
err = get_link_settings(netdev, &settings);
if (err)
return err;
bitmap_copy(cmd->link_modes.supported,
(unsigned long *)&settings.supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_copy(cmd->link_modes.advertising,
(unsigned long *)&settings.advertising,
__ETHTOOL_LINK_MODE_MASK_NBITS);
base->autoneg = settings.autoneg;
base->speed = settings.speed;
base->duplex = settings.duplex;
base->port = settings.port;
return 0;
}
static int hinic_is_speed_legal(struct hinic_nic_dev *nic_dev, u32 speed)
{
struct net_device *netdev = nic_dev->netdev;
enum hinic_link_mode supported_link = 0, advertised_link = 0;
enum nic_speed_level speed_level = 0;
int err;
err = hinic_get_link_mode(nic_dev->hwdev,
&supported_link, &advertised_link);
if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
advertised_link == HINIC_SUPPORTED_UNKNOWN) {
nicif_err(nic_dev, drv, netdev,
"Failed to get supported link modes\n");
return -EAGAIN;
}
speed_level = hinic_ethtool_to_hw_speed_level(speed);
if (speed_level >= LINK_SPEED_LEVELS ||
!hinic_is_support_speed(supported_link, speed)) {
nicif_err(nic_dev, drv, netdev,
"Not supported speed: %d\n", speed);
return -EINVAL;
}
return 0;
}
static int hinic_set_settings_to_hw(struct hinic_nic_dev *nic_dev,
u32 set_settings, u8 autoneg, u32 speed)
{
struct net_device *netdev = nic_dev->netdev;
struct hinic_link_ksettings settings = {0};
enum nic_speed_level speed_level = 0;
char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
int err = 0;
err = snprintf(set_link_str, sizeof(set_link_str), "%s",
(set_settings & HILINK_LINK_SET_AUTONEG) ?
(autoneg ? "autong enable " : "autong disable ") : "");
if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
nicif_err(nic_dev, drv, netdev,
"Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
err, SET_LINK_STR_MAX_LEN);
return -EFAULT;
}
if (set_settings & HILINK_LINK_SET_SPEED) {
speed_level = hinic_ethtool_to_hw_speed_level(speed);
err = snprintf(set_link_str, sizeof(set_link_str),
"%sspeed %d ", set_link_str, speed);
if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
nicif_err(nic_dev, drv, netdev,
"Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
err, SET_LINK_STR_MAX_LEN);
return -EFAULT;
}
}
settings.valid_bitmap = set_settings;
settings.autoneg = autoneg;
settings.speed = speed_level;
err = hinic_set_link_settings(nic_dev->hwdev, &settings);
if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
if (err)
nicif_err(nic_dev, drv, netdev, "Set %sfailed\n",
set_link_str);
else
nicif_info(nic_dev, drv, netdev, "Set %ssuccess\n",
set_link_str);
return err;
}
if (set_settings & HILINK_LINK_SET_AUTONEG) {
err = hinic_set_autoneg(nic_dev->hwdev,
(autoneg == AUTONEG_ENABLE));
if (err)
nicif_err(nic_dev, drv, netdev, "%s autoneg failed\n",
(autoneg == AUTONEG_ENABLE) ?
"Enable" : "Disable");
else
nicif_info(nic_dev, drv, netdev, "%s autoneg success\n",
(autoneg == AUTONEG_ENABLE) ?
"Enable" : "Disable");
}
if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
err = hinic_set_speed(nic_dev->hwdev, speed_level);
if (err)
nicif_err(nic_dev, drv, netdev, "Set speed %d failed\n",
speed);
else
nicif_info(nic_dev, drv, netdev, "Set speed %d success\n",
speed);
}
return err;
}
static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct nic_port_info port_info = {0};
u32 set_settings = 0;
int err = 0;
if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
nicif_err(nic_dev, drv, netdev, "Not support set link settings\n");
return -EOPNOTSUPP;
}
err = hinic_get_port_info(nic_dev->hwdev, &port_info);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to get current settings\n");
return -EAGAIN;
}
/* Alwayse set autonegation */
if (port_info.autoneg_cap)
set_settings |= HILINK_LINK_SET_AUTONEG;
if (autoneg == AUTONEG_ENABLE) {
if (!port_info.autoneg_cap) {
nicif_err(nic_dev, drv, netdev, "Not support autoneg\n");
return -EOPNOTSUPP;
}
} else if (speed != (u32)SPEED_UNKNOWN) {
/* Set speed only when autoneg is disable */
err = hinic_is_speed_legal(nic_dev, speed);
if (err)
return err;
set_settings |= HILINK_LINK_SET_SPEED;
} else {
nicif_err(nic_dev, drv, netdev, "Need to set speed when autoneg is off\n");
return -EOPNOTSUPP;
}
if (set_settings)
err = hinic_set_settings_to_hw(nic_dev, set_settings,
autoneg, speed);
else
nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n");
return err;
}
static int hinic_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
/* Only support to set autoneg and speed */
return set_link_settings(netdev, cmd->base.autoneg,
cmd->base.speed);
}
static void hinic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct pci_dev *pdev = nic_dev->pdev;
u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
int err;
strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
strlcpy(info->version, HINIC_DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
err = hinic_get_mgmt_version(nic_dev->hwdev, mgmt_ver);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n");
return;
}
err = snprintf(info->fw_version, sizeof(info->fw_version),
"%s", mgmt_ver);
if (err <= 0 || err >= (int)sizeof(info->fw_version))
nicif_err(nic_dev, drv, netdev,
"Failed to snprintf fw_version, function return(%d) and dest_len(%d)\n",
err, (int)sizeof(info->fw_version));
}
static u32 hinic_get_msglevel(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
return nic_dev->msg_enable;
}
static void hinic_set_msglevel(struct net_device *netdev, u32 data)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
nic_dev->msg_enable = data;
nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data);
}
static int hinic_nway_reset(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct nic_port_info port_info = {0};
int err;
if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
nicif_err(nic_dev, drv, netdev, "Current function don't support to restart autoneg\n");
return -EOPNOTSUPP;
}
err = hinic_get_port_info(nic_dev->hwdev, &port_info);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Get autonegotiation state failed\n");
return -EFAULT;
}
if (!port_info.autoneg_state) {
nicif_err(nic_dev, drv, netdev,
"Autonegotiation is off, don't support to restart it\n");
return -EINVAL;
}
err = hinic_set_autoneg(nic_dev->hwdev, true);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Restart autonegotiation failed\n");
return -EFAULT;
}
nicif_info(nic_dev, drv, netdev, "Restart autonegotiation success\n");
return 0;
}
static void hinic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
ring->rx_pending = nic_dev->rxqs[0].q_depth;
ring->tx_pending = nic_dev->txqs[0].q_depth;
}
static void hinic_update_qp_depth(struct hinic_nic_dev *nic_dev,
u16 sq_depth, u16 rq_depth)
{
u16 i;
nic_dev->sq_depth = sq_depth;
nic_dev->rq_depth = rq_depth;
for (i = 0; i < nic_dev->max_qps; i++) {
nic_dev->txqs[i].q_depth = sq_depth;
nic_dev->txqs[i].q_mask = sq_depth - 1;
nic_dev->rxqs[i].q_depth = rq_depth;
nic_dev->rxqs[i].q_mask = rq_depth - 1;
}
}
static int hinic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 new_sq_depth, new_rq_depth;
int err;
if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
nicif_err(nic_dev, drv, netdev,
"Unsupported rx_jumbo_pending/rx_mini_pending\n");
return -EINVAL;
}
if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
nicif_err(nic_dev, drv, netdev,
"Queue depth out of range [%d-%d]\n",
HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
return -EINVAL;
}
new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
if (new_sq_depth == nic_dev->sq_depth &&
new_rq_depth == nic_dev->rq_depth)
return 0;
nicif_info(nic_dev, drv, netdev,
"Change Tx/Rx ring depth from %d/%d to %d/%d\n",
nic_dev->sq_depth, nic_dev->rq_depth,
new_sq_depth, new_rq_depth);
if (!netif_running(netdev)) {
hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth);
} else {
nicif_info(nic_dev, drv, netdev, "Restarting netdev\n");
err = hinic_close(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to close netdev\n");
return -EFAULT;
}
hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth);
err = hinic_open(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to open netdev\n");
return -EFAULT;
}
}
return 0;
}
static u16 hinic_max_channels(struct hinic_nic_dev *nic_dev)
{
u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev);
return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps;
}
static u16 hinic_curr_channels(struct hinic_nic_dev *nic_dev)
{
if (netif_running(nic_dev->netdev))
return nic_dev->num_rss ? nic_dev->num_rss : 1;
else
return min_t(u16, hinic_max_channels(nic_dev),
nic_dev->rss_limit);
}
static void hinic_get_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
channels->max_rx = 0;
channels->max_tx = 0;
channels->max_other = 0;
channels->max_combined = hinic_max_channels(nic_dev);
channels->rx_count = 0;
channels->tx_count = 0;
channels->other_count = 0;
channels->combined_count = hinic_curr_channels(nic_dev);
}
void hinic_update_num_qps(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 num_qps;
u8 tcs;
/* change num_qps to change counter in ethtool -S */
tcs = (u8)netdev_get_num_tc(nic_dev->netdev);
num_qps = (u16)(nic_dev->rss_limit * (tcs ? tcs : 1));
nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_qps);
}
static int hinic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
unsigned int count = channels->combined_count;
int err;
if (!count) {
nicif_err(nic_dev, drv, netdev,
"Unsupported combined_count=0\n");
return -EINVAL;
}
if (channels->tx_count || channels->rx_count || channels->other_count) {
nicif_err(nic_dev, drv, netdev,
"Setting rx/tx/other count not supported\n");
return -EINVAL;
}
if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
nicif_err(nic_dev, drv, netdev,
"This function don't support RSS, only support 1 queue pair\n");
return -EOPNOTSUPP;
}
if (count > hinic_max_channels(nic_dev)) {
nicif_err(nic_dev, drv, netdev,
"Combined count %d exceed limit %d\n",
count, hinic_max_channels(nic_dev));
return -EINVAL;
}
nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
nic_dev->rss_limit, count);
nic_dev->rss_limit = (u16)count;
if (netif_running(netdev)) {
nicif_info(nic_dev, drv, netdev, "Restarting netdev\n");
err = hinic_close(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to close netdev\n");
return -EFAULT;
}
/* Discard user configured rss */
hinic_set_default_rss_indir(netdev);
err = hinic_open(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to open netdev\n");
return -EFAULT;
}
} else {
/* Discard user configured rss */
hinic_set_default_rss_indir(netdev);
hinic_update_num_qps(netdev);
}
return 0;
}
static int hinic_get_sset_count(struct net_device *netdev, int sset)
{
int count = 0, q_num = 0;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
switch (sset) {
case ETH_SS_TEST:
return ARRAY_LEN(hinic_test_strings);
case ETH_SS_STATS:
q_num = nic_dev->num_qps;
count = ARRAY_LEN(hinic_netdev_stats) +
ARRAY_LEN(hinic_nic_dev_stats) +
(ARRAY_LEN(hinic_tx_queue_stats) +
ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
count += ARRAY_LEN(hinic_function_stats);
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) &&
FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev))
count += ARRAY_LEN(hinic_port_stats);
return count;
default:
return -EOPNOTSUPP;
}
}
#define COALESCE_ALL_QUEUE 0xFFFF
#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
#define COALESCE_PENDING_LIMIT_UNIT 8
#define COALESCE_TIMER_CFG_UNIT 9
static int __hinic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal, u16 queue)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_intr_coal_info *interrupt_info;
if (queue == COALESCE_ALL_QUEUE) {
/* get tx/rx irq0 as default parameters */
interrupt_info = &nic_dev->intr_coalesce[0];
} else {
if (queue >= nic_dev->num_qps) {
nicif_err(nic_dev, drv, netdev,
"Invalid queue_id: %d\n", queue);
return -EINVAL;
}
interrupt_info = &nic_dev->intr_coalesce[queue];
}
/* coalescs_timer is in unit of 9us */
coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg *
COALESCE_TIMER_CFG_UNIT;
/* coalescs_frams is in unit of 8 */
coal->rx_max_coalesced_frames = interrupt_info->pending_limt *
COALESCE_PENDING_LIMIT_UNIT;
/* tx/rx use the same interrupt */
coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal;
coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high;
coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high *
COALESCE_TIMER_CFG_UNIT;
coal->rx_max_coalesced_frames_high =
interrupt_info->rx_pending_limt_high *
COALESCE_PENDING_LIMIT_UNIT;
coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low;
coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low *
COALESCE_TIMER_CFG_UNIT;
coal->rx_max_coalesced_frames_low =
interrupt_info->rx_pending_limt_low *
COALESCE_PENDING_LIMIT_UNIT;
return 0;
}
static int set_queue_coalesce(struct hinic_nic_dev *nic_dev, u16 q_id,
struct hinic_intr_coal_info *coal)
{
struct hinic_intr_coal_info *intr_coal;
struct nic_interrupt_info interrupt_info = {0};
struct net_device *netdev = nic_dev->netdev;
int err;
intr_coal = &nic_dev->intr_coalesce[q_id];
if (intr_coal->coalesce_timer_cfg != coal->coalesce_timer_cfg ||
intr_coal->pending_limt != coal->pending_limt)
intr_coal->user_set_intr_coal_flag = 1;
intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
intr_coal->pending_limt = coal->pending_limt;
intr_coal->pkt_rate_low = coal->pkt_rate_low;
intr_coal->rx_usecs_low = coal->rx_usecs_low;
intr_coal->rx_pending_limt_low = coal->rx_pending_limt_low;
intr_coal->pkt_rate_high = coal->pkt_rate_high;
intr_coal->rx_usecs_high = coal->rx_usecs_high;
intr_coal->rx_pending_limt_high = coal->rx_pending_limt_high;
/* netdev not running or qp not in using,
* don't need to set coalesce to hw
*/
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
q_id >= nic_dev->num_qps || nic_dev->adaptive_rx_coal)
return 0;
interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx;
interrupt_info.lli_set = 0;
interrupt_info.interrupt_coalesc_set = 1;
interrupt_info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg;
interrupt_info.pending_limt = intr_coal->pending_limt;
interrupt_info.resend_timer_cfg = intr_coal->resend_timer_cfg;
nic_dev->rxqs[q_id].last_coalesc_timer_cfg =
intr_coal->coalesce_timer_cfg;
nic_dev->rxqs[q_id].last_pending_limt = intr_coal->pending_limt;
err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info);
if (err)
nicif_warn(nic_dev, drv, netdev,
"Failed to set queue%d coalesce", q_id);
return err;
}
static int is_coalesce_legal(struct net_device *netdev,
const struct ethtool_coalesce *coal)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct ethtool_coalesce tmp_coal = {0};
if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) {
nicif_err(nic_dev, drv, netdev,
"tx-usecs must be equal to rx-usecs\n");
return -EINVAL;
}
if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) {
nicif_err(nic_dev, drv, netdev,
"tx-frames must be equal to rx-frames\n");
return -EINVAL;
}
tmp_coal.cmd = coal->cmd;
tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs;
tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames;
tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs;
tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames;
tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce;
tmp_coal.pkt_rate_low = coal->pkt_rate_low;
tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low;
tmp_coal.rx_max_coalesced_frames_low =
coal->rx_max_coalesced_frames_low;
tmp_coal.pkt_rate_high = coal->pkt_rate_high;
tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high;
tmp_coal.rx_max_coalesced_frames_high =
coal->rx_max_coalesced_frames_high;
if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce))) {
nicif_err(nic_dev, drv, netdev,
"Only support to change rx/tx-usecs and rx/tx-frames\n");
return -EOPNOTSUPP;
}
if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) {
nicif_err(nic_dev, drv, netdev,
"rx_coalesce_usecs out of range[%d-%d]\n", 0,
COALESCE_MAX_TIMER_CFG);
return -EOPNOTSUPP;
}
if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) {
nicif_err(nic_dev, drv, netdev,
"rx_max_coalesced_frames out of range[%d-%d]\n", 0,
COALESCE_MAX_PENDING_LIMIT);
return -EOPNOTSUPP;
}
if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) {
nicif_err(nic_dev, drv, netdev,
"rx_coalesce_usecs_low out of range[%d-%d]\n", 0,
COALESCE_MAX_TIMER_CFG);
return -EOPNOTSUPP;
}
if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) {
nicif_err(nic_dev, drv, netdev,
"rx_max_coalesced_frames_low out of range[%d-%d]\n",
0, COALESCE_MAX_PENDING_LIMIT);
return -EOPNOTSUPP;
}
if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) {
nicif_err(nic_dev, drv, netdev,
"rx_coalesce_usecs_high out of range[%d-%d]\n", 0,
COALESCE_MAX_TIMER_CFG);
return -EOPNOTSUPP;
}
if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) {
nicif_err(nic_dev, drv, netdev,
"rx_max_coalesced_frames_high out of range[%d-%d]\n",
0, COALESCE_MAX_PENDING_LIMIT);
return -EOPNOTSUPP;
}
if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >=
coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) {
nicif_err(nic_dev, drv, netdev,
"coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n",
coal->rx_coalesce_usecs_high,
coal->rx_coalesce_usecs_low,
COALESCE_TIMER_CFG_UNIT);
return -EOPNOTSUPP;
}
if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >=
coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) {
nicif_err(nic_dev, drv, netdev,
"coalesced_frames_high(%u) must more than coalesced_frames_low(%u), after dividing %d frames unit\n",
coal->rx_max_coalesced_frames_high,
coal->rx_max_coalesced_frames_low,
COALESCE_PENDING_LIMIT_UNIT);
return -EOPNOTSUPP;
}
if (coal->pkt_rate_low >= coal->pkt_rate_high) {
nicif_err(nic_dev, drv, netdev,
"pkt_rate_high(%u) must more than pkt_rate_low(%u)\n",
coal->pkt_rate_high,
coal->pkt_rate_low);
return -EOPNOTSUPP;
}
return 0;
}
#define CHECK_COALESCE_ALIGN(coal, item, unit) \
do { \
if ((coal)->item % (unit)) \
nicif_warn(nic_dev, drv, netdev, \
"%s in %d units, change to %d\n", \
#item, (unit), ALIGN_DOWN((coal)->item, unit));\
} while (0)
#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \
do { \
if (((coal)->item / (unit)) != (ori_val)) \
nicif_info(nic_dev, drv, netdev, \
"Change %s from %d to %d %s\n", \
#item, (ori_val) * (unit), \
ALIGN_DOWN((coal)->item, unit), (obj_str));\
} while (0)
#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \
do { \
if ((coal)->item != (ori_val)) \
nicif_info(nic_dev, drv, netdev, \
"Change %s from %llu to %u %s\n", \
#item, (ori_val), (coal)->item, (obj_str));\
} while (0)
static int __hinic_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal, u16 queue)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_intr_coal_info intr_coal = {0};
struct hinic_intr_coal_info *ori_intr_coal;
char obj_str[OBJ_STR_MAX_LEN] = {0};
u16 i;
int err = 0;
err = is_coalesce_legal(netdev, coal);
if (err)
return err;
CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT);
CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames,
COALESCE_PENDING_LIMIT_UNIT);
CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high,
COALESCE_TIMER_CFG_UNIT);
CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high,
COALESCE_PENDING_LIMIT_UNIT);
CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low,
COALESCE_TIMER_CFG_UNIT);
CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low,
COALESCE_PENDING_LIMIT_UNIT);
if (queue == COALESCE_ALL_QUEUE) {
ori_intr_coal = &nic_dev->intr_coalesce[0];
err = snprintf(obj_str, sizeof(obj_str), "for netdev");
if (err <= 0 || err >= OBJ_STR_MAX_LEN) {
nicif_err(nic_dev, drv, netdev,
"Failed to snprintf string, function return(%d) and dest_len(%d)\n",
err, OBJ_STR_MAX_LEN);
return -EFAULT;
}
} else {
ori_intr_coal = &nic_dev->intr_coalesce[queue];
err = snprintf(obj_str, sizeof(obj_str), "for queue %d", queue);
if (err <= 0 || err >= OBJ_STR_MAX_LEN) {
nicif_err(nic_dev, drv, netdev,
"Failed to snprintf string, function return(%d) and dest_len(%d)\n",
err, OBJ_STR_MAX_LEN);
return -EFAULT;
}
}
CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT,
ori_intr_coal->coalesce_timer_cfg, obj_str);
CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames,
COALESCE_PENDING_LIMIT_UNIT,
ori_intr_coal->pending_limt, obj_str);
CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high,
ori_intr_coal->pkt_rate_high, obj_str);
CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high,
COALESCE_TIMER_CFG_UNIT,
ori_intr_coal->rx_usecs_high, obj_str);
CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high,
COALESCE_PENDING_LIMIT_UNIT,
ori_intr_coal->rx_pending_limt_high, obj_str);
CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low,
ori_intr_coal->pkt_rate_low, obj_str);
CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low,
COALESCE_TIMER_CFG_UNIT,
ori_intr_coal->rx_usecs_low, obj_str);
CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low,
COALESCE_PENDING_LIMIT_UNIT,
ori_intr_coal->rx_pending_limt_low, obj_str);
intr_coal.coalesce_timer_cfg =
(u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
COALESCE_PENDING_LIMIT_UNIT);
nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
intr_coal.pkt_rate_high = coal->pkt_rate_high;
intr_coal.rx_usecs_high =
(u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT);
intr_coal.rx_pending_limt_high =
(u8)(coal->rx_max_coalesced_frames_high /
COALESCE_PENDING_LIMIT_UNIT);
intr_coal.pkt_rate_low = coal->pkt_rate_low;
intr_coal.rx_usecs_low =
(u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT);
intr_coal.rx_pending_limt_low =
(u8)(coal->rx_max_coalesced_frames_low /
COALESCE_PENDING_LIMIT_UNIT);
/* coalesce timer or pending set to zero will disable coalesce */
if (!nic_dev->adaptive_rx_coal &&
(!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limt))
nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n");
if (queue == COALESCE_ALL_QUEUE) {
for (i = 0; i < nic_dev->max_qps; i++)
set_queue_coalesce(nic_dev, i, &intr_coal);
} else {
if (queue >= nic_dev->num_qps) {
nicif_err(nic_dev, drv, netdev,
"Invalid queue_id: %d\n", queue);
return -EINVAL;
}
set_queue_coalesce(nic_dev, queue, &intr_coal);
}
return 0;
}
static int hinic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal)
{
return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
}
static int hinic_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal)
{
return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
}
static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *coal)
{
return __hinic_get_coalesce(netdev, coal, queue);
}
static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *coal)
{
return __hinic_set_coalesce(netdev, coal, queue);
}
static void get_drv_queue_stats(struct hinic_nic_dev *nic_dev, u64 *data)
{
struct hinic_txq_stats txq_stats;
struct hinic_rxq_stats rxq_stats;
u16 i = 0, j = 0, qid = 0;
char *p;
for (qid = 0; qid < nic_dev->num_qps; qid++) {
if (!nic_dev->txqs)
break;
hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
p = (char *)(&txq_stats) +
hinic_tx_queue_stats[j].offset;
data[i] = (hinic_tx_queue_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
for (qid = 0; qid < nic_dev->num_qps; qid++) {
if (!nic_dev->rxqs)
break;
hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
p = (char *)(&rxq_stats) +
hinic_rx_queue_stats[j].offset;
data[i] = (hinic_rx_queue_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
}
static void hinic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
struct hinic_phy_port_stats *port_stats;
struct hinic_nic_stats *nic_stats;
struct hinic_vport_stats vport_stats = {0};
u16 i = 0, j = 0;
char *p;
int err;
net_stats = dev_get_stats(netdev, &temp);
for (j = 0; j < ARRAY_LEN(hinic_netdev_stats); j++, i++) {
p = (char *)(net_stats) + hinic_netdev_stats[j].offset;
data[i] = (hinic_netdev_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
nic_stats = &nic_dev->stats;
for (j = 0; j < ARRAY_LEN(hinic_nic_dev_stats); j++, i++) {
p = (char *)(nic_stats) + hinic_nic_dev_stats[j].offset;
data[i] = (hinic_nic_dev_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
if (err)
nicif_err(nic_dev, drv, netdev,
"Failed to get function stats from fw\n");
for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
p = (char *)(&vport_stats) +
hinic_function_stats[j].offset;
data[i] = (hinic_function_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) &&
FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
if (!port_stats) {
nicif_err(nic_dev, drv, netdev,
"Failed to malloc port stats\n");
memset(&data[i], 0,
ARRAY_LEN(hinic_port_stats) * sizeof(*data));
i += ARRAY_LEN(hinic_port_stats);
goto get_drv_stats;
}
err = hinic_get_phy_port_stats(nic_dev->hwdev, port_stats);
if (err)
nicif_err(nic_dev, drv, netdev,
"Failed to get port stats from fw\n");
for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
p = (char *)(port_stats) + hinic_port_stats[j].offset;
data[i] = (hinic_port_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
kfree(port_stats);
}
get_drv_stats:
get_drv_queue_stats(nic_dev, data + i);
}
static void hinic_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
u16 i = 0, j = 0;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
char *p = (char *)data;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
for (i = 0; i < ARRAY_LEN(hinic_netdev_stats); i++) {
memcpy(p, hinic_netdev_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_LEN(hinic_nic_dev_stats); i++) {
memcpy(p, hinic_nic_dev_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
memcpy(p, hinic_function_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) &&
FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
memcpy(p, hinic_port_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < nic_dev->num_qps; i++) {
for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
sprintf(p, hinic_tx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < nic_dev->num_qps; i++) {
for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
sprintf(p, hinic_rx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
}
return;
default:
nicif_err(nic_dev, drv, netdev,
"Invalid string set %d", stringset);
return;
}
}
static int hinic_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 port;
int err;
if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
nicif_err(nic_dev, drv, netdev, "Current function don't support to set LED status\n");
return -EOPNOTSUPP;
}
port = hinic_physical_port_id(nic_dev->hwdev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
err = hinic_set_led_status(nic_dev->hwdev, port,
HINIC_LED_TYPE_LINK,
HINIC_LED_MODE_FORCE_2HZ);
if (err)
nicif_err(nic_dev, drv, netdev,
"Set LED blinking in 2HZ failed\n");
else
nicif_info(nic_dev, drv, netdev,
"Set LED blinking in 2HZ success\n");
break;
case ETHTOOL_ID_INACTIVE:
err = hinic_reset_led_status(nic_dev->hwdev, port);
if (err)
nicif_err(nic_dev, drv, netdev,
"Reset LED to original status failed\n");
else
nicif_info(nic_dev, drv, netdev,
"Reset LED to original status success\n");
break;
default:
return -EOPNOTSUPP;
}
if (err)
return -EFAULT;
else
return 0;
}
static void hinic_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct nic_pause_config nic_pause = {0};
int err;
err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to get pauseparam from hw\n");
} else {
pause->autoneg = nic_pause.auto_neg;
pause->rx_pause = nic_pause.rx_pause;
pause->tx_pause = nic_pause.tx_pause;
}
}
static int hinic_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct nic_pause_config nic_pause = {0};
struct nic_port_info port_info = {0};
int err;
if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
nicif_err(nic_dev, drv, netdev, "Not support to set pause parameters\n");
return -EOPNOTSUPP;
}
err = hinic_get_port_info(nic_dev->hwdev, &port_info);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to get auto-negotiation state\n");
return -EFAULT;
}
if (pause->autoneg != port_info.autoneg_state) {
nicif_err(nic_dev, drv, netdev,
"To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
nic_pause.auto_neg = pause->autoneg;
nic_pause.rx_pause = pause->rx_pause;
nic_pause.tx_pause = pause->tx_pause;
err = hinic_set_pause_info(nic_dev->hwdev, nic_pause);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n");
return err;
}
nicif_info(nic_dev, drv, netdev, "Set pause options, tx: %s, rx: %s\n",
pause->tx_pause ? "on" : "off",
pause->rx_pause ? "on" : "off");
return 0;
}
static int hinic_run_lp_test(struct hinic_nic_dev *nic_dev, u32 test_time)
{
u32 i;
u8 j;
u32 cnt = test_time * 5;
struct sk_buff *skb = NULL;
struct sk_buff *skb_tmp = NULL;
u8 *test_data = NULL;
u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
struct net_device *netdev = nic_dev->netdev;
skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
if (!skb_tmp) {
nicif_err(nic_dev, drv, netdev,
"Alloc xmit skb template failed for loopback test\n");
return -ENOMEM;
}
test_data = __skb_put(skb_tmp, LP_PKT_LEN);
memset(test_data, 0xFF, (2 * ETH_ALEN));
test_data[ETH_ALEN] = 0xFE;
test_data[2 * ETH_ALEN] = 0x08;
test_data[2 * ETH_ALEN + 1] = 0x0;
for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
test_data[i] = i & 0xFF;
skb_tmp->queue_mapping = 0;
skb_tmp->ip_summed = CHECKSUM_COMPLETE;
skb_tmp->dev = netdev;
for (i = 0; i < cnt; i++) {
nic_dev->lb_test_rx_idx = 0;
memset(lb_test_rx_buf, 0, (LP_PKT_CNT * LP_PKT_LEN));
for (j = 0; j < LP_PKT_CNT; j++) {
skb = pskb_copy(skb_tmp, GFP_ATOMIC);
if (!skb) {
dev_kfree_skb_any(skb_tmp);
nicif_err(nic_dev, drv, netdev,
"Copy skb failed for loopback test\n");
return -ENOMEM;
}
/* mark index for every pkt */
skb->data[LP_PKT_LEN - 1] = j;
if (hinic_lb_xmit_frame(skb, netdev)) {
dev_kfree_skb_any(skb);
dev_kfree_skb_any(skb_tmp);
nicif_err(nic_dev, drv, netdev,
"Xmit pkt failed for loopback test\n");
return -EBUSY;
}
}
/* wait till all pkt received to rx buffer */
msleep(200);
for (j = 0; j < LP_PKT_CNT; j++) {
if (memcmp((lb_test_rx_buf + (j * LP_PKT_LEN)),
skb_tmp->data, (LP_PKT_LEN - 1)) ||
(*(lb_test_rx_buf + ((j * LP_PKT_LEN) +
(LP_PKT_LEN - 1))) != j)) {
dev_kfree_skb_any(skb_tmp);
nicif_err(nic_dev, drv, netdev,
"Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
(j + (i * LP_PKT_CNT)),
(LP_PKT_LEN - 1),
*(lb_test_rx_buf +
(((j * LP_PKT_LEN) +
(LP_PKT_LEN - 1)))));
return -EIO;
}
}
}
dev_kfree_skb_any(skb_tmp);
nicif_info(nic_dev, drv, netdev, "Loopback test succeed\n");
return 0;
}
void hinic_lp_test(struct net_device *netdev, struct ethtool_test *eth_test,
u64 *data, u32 test_time)
{
int err = 0;
u8 link_status = 0;
u8 *lb_test_rx_buf = NULL;
struct ethtool_test test = {0};
enum diag_test_index test_index = 0;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
memset(data, 0, (DIAG_TEST_MAX * sizeof(u64)));
/* Do not support loopback test when netdev is closed. */
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, netdev,
"Do not support loopback test when netdev is closed.\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
data[PORT_DOWN_ERR_IDX] = 1;
return;
}
test.flags = eth_test->flags;
if (test_time == 0)
test_time = LP_DEFAULT_TIME;
netif_carrier_off(netdev);
netif_tx_disable(netdev);
if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) {
test_index = INTERNAL_LP_TEST;
if (hinic_set_loopback_mode(nic_dev->hwdev, true)) {
nicif_err(nic_dev, drv, netdev,
"Failed to set port loopback mode before loopback test\n");
err = 1;
goto resume_link;
}
} else {
test_index = EXTERNAL_LP_TEST;
}
lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
if (!lb_test_rx_buf) {
nicif_err(nic_dev, drv, netdev,
"Failed to alloc rx buffer for loopback test\n");
err = 1;
} else {
nic_dev->lb_test_rx_buf = lb_test_rx_buf;
nic_dev->lb_pkt_len = LP_PKT_LEN;
set_bit(HINIC_LP_TEST, &nic_dev->flags);
if (hinic_run_lp_test(nic_dev, test_time))
err = 1;
clear_bit(HINIC_LP_TEST, &nic_dev->flags);
msleep(100);
vfree(lb_test_rx_buf);
nic_dev->lb_test_rx_buf = NULL;
}
if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) {
if (hinic_set_loopback_mode(nic_dev->hwdev, false)) {
nicif_err(nic_dev, drv, netdev,
"Failed to cancel port loopback mode after loopback test\n");
err = 1;
goto resume_link;
}
}
resume_link:
if (err) {
eth_test->flags |= ETH_TEST_FL_FAILED;
data[test_index] = 1;
}
netif_tx_wake_all_queues(netdev);
err = hinic_get_link_state(nic_dev->hwdev, &link_status);
if (!err && link_status)
netif_carrier_on(netdev);
}
static void hinic_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
nicif_err(nic_dev, drv, netdev, "Current function don't support self test\n");
return;
}
hinic_lp_test(netdev, eth_test, data, 0);
}
static int hinic_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 sfp_type;
u8 sfp_type_ext;
int err;
err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
if (err)
return err;
switch (sfp_type) {
case MODULE_TYPE_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
case MODULE_TYPE_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
break;
case MODULE_TYPE_QSFP_PLUS:
if (sfp_type_ext >= 0x3) {
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
}
break;
case MODULE_TYPE_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
break;
default:
nicif_warn(nic_dev, drv, netdev,
"Optical module unknown: 0x%x\n", sfp_type);
return -EINVAL;
}
return 0;
}
static int hinic_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
u16 len;
int err;
if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
return -EINVAL;
memset(data, 0, ee->len);
err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
if (err)
return err;
memcpy(data, sfp_data + ee->offset, ee->len);
return 0;
}
static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
struct nic_rss_type *rss_type)
{
u8 rss_l4_en = 0;
switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
rss_l4_en = 0;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_l4_en = 1;
break;
default:
return -EINVAL;
}
switch (cmd->flow_type) {
case TCP_V4_FLOW:
rss_type->tcp_ipv4 = rss_l4_en;
break;
case TCP_V6_FLOW:
rss_type->tcp_ipv6 = rss_l4_en;
break;
case UDP_V4_FLOW:
rss_type->udp_ipv4 = rss_l4_en;
break;
case UDP_V6_FLOW:
rss_type->udp_ipv6 = rss_l4_en;
break;
default:
return -EINVAL;
}
return 0;
}
static int hinic_set_rss_hash_opts(struct hinic_nic_dev *nic_dev,
struct ethtool_rxnfc *cmd)
{
struct nic_rss_type *rss_type = &nic_dev->rss_type;
int err;
if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
cmd->data = 0;
nicif_err(nic_dev, drv, nic_dev->netdev,
"RSS is disable, not support to set flow-hash\n");
return -EOPNOTSUPP;
}
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
RXH_L4_B_2_3))
return -EINVAL;
/* We need at least the IP SRC and DEST fields for hashing */
if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
return -EINVAL;
err = hinic_get_rss_type(nic_dev->hwdev,
nic_dev->rss_tmpl_idx, rss_type);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n");
return -EFAULT;
}
switch (cmd->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V4_FLOW:
case UDP_V6_FLOW:
err = set_l4_rss_hash_ops(cmd, rss_type);
if (err)
return err;
break;
case IPV4_FLOW:
rss_type->ipv4 = 1;
break;
case IPV6_FLOW:
rss_type->ipv6 = 1;
break;
default:
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unsupported flow type\n");
return -EINVAL;
}
err = hinic_set_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx,
*rss_type);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to set rss type\n");
return -EFAULT;
}
nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n");
return 0;
}
static int hinic_get_rss_hash_opts(struct hinic_nic_dev *nic_dev,
struct ethtool_rxnfc *cmd)
{
struct nic_rss_type rss_type = {0};
int err;
cmd->data = 0;
if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
return 0;
err = hinic_get_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx,
&rss_type);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to get rss type\n");
return err;
}
cmd->data = RXH_IP_SRC | RXH_IP_DST;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
if (rss_type.tcp_ipv4)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case TCP_V6_FLOW:
if (rss_type.tcp_ipv6)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
if (rss_type.udp_ipv4)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
if (rss_type.udp_ipv6)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case IPV4_FLOW:
case IPV6_FLOW:
break;
default:
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unsupported flow type\n");
cmd->data = 0;
return -EINVAL;
}
return 0;
}
static int hinic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = nic_dev->num_qps;
break;
case ETHTOOL_GRXFH:
err = hinic_get_rss_hash_opts(nic_dev, cmd);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
err = hinic_set_rss_hash_opts(nic_dev, cmd);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
{
return HINIC_RSS_INDIR_SIZE;
}
static int __set_rss_rxfh(struct net_device *netdev,
const u32 *indir, const u8 *key)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err, i;
if (indir) {
if (!nic_dev->rss_indir_user) {
nic_dev->rss_indir_user =
kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
GFP_KERNEL);
if (!nic_dev->rss_indir_user) {
nicif_err(nic_dev, drv, netdev,
"Failed to alloc memory for rss_indir_usr\n");
return -ENOMEM;
}
}
memcpy(nic_dev->rss_indir_user, indir,
sizeof(u32) * HINIC_RSS_INDIR_SIZE);
err = hinic_rss_set_indir_tbl(nic_dev->hwdev,
nic_dev->rss_tmpl_idx, indir);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to set rss indir table\n");
return -EFAULT;
}
nicif_info(nic_dev, drv, netdev, "Change rss indir success\n");
}
if (key) {
if (!nic_dev->rss_hkey_user) {
/* We request double spaces for the hash key,
* the second one holds the key of Big Edian
* format.
*/
nic_dev->rss_hkey_user =
kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
if (!nic_dev->rss_hkey_user) {
nicif_err(nic_dev, drv, netdev,
"Failed to alloc memory for rss_hkey_user\n");
return -ENOMEM;
}
/* The second space is for big edian hash key */
nic_dev->rss_hkey_user_be =
(u32 *)(nic_dev->rss_hkey_user +
HINIC_RSS_KEY_SIZE);
}
memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
/* make a copy of the key, and convert it to Big Endian */
memcpy(nic_dev->rss_hkey_user_be, key, HINIC_RSS_KEY_SIZE);
for (i = 0; i < HINIC_RSS_KEY_SIZE / 4; i++)
nic_dev->rss_hkey_user_be[i] =
cpu_to_be32(nic_dev->rss_hkey_user_be[i]);
err = hinic_rss_set_template_tbl(nic_dev->hwdev,
nic_dev->rss_tmpl_idx, key);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n");
return -EFAULT;
}
nicif_info(nic_dev, drv, netdev, "Change rss key success\n");
}
return 0;
}
static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
{
return HINIC_RSS_KEY_SIZE;
}
static int hinic_get_rxfh(struct net_device *netdev,
u32 *indir, u8 *key, u8 *hfunc)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
return -EOPNOTSUPP;
if (hfunc) {
u8 hash_engine_type = 0;
err = hinic_rss_get_hash_engine(nic_dev->hwdev,
nic_dev->rss_tmpl_idx,
&hash_engine_type);
if (err)
return -EFAULT;
*hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
}
if (indir) {
err = hinic_rss_get_indir_tbl(nic_dev->hwdev,
nic_dev->rss_tmpl_idx, indir);
if (err)
return -EFAULT;
}
if (key)
err = hinic_rss_get_template_tbl(nic_dev->hwdev,
nic_dev->rss_tmpl_idx, key);
return err;
}
static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Not support to set rss parameters when rss is disable\n");
return -EOPNOTSUPP;
}
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) && indir) {
nicif_err(nic_dev, drv, netdev,
"Not support to set indir when DCB is enabled\n");
return -EOPNOTSUPP;
}
if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) {
nicif_err(nic_dev, drv, netdev,
"Not support to set hfunc type except TOP and XOR\n");
return -EOPNOTSUPP;
}
nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
HINIC_RSS_HASH_ENGINE_TYPE_XOR :
HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
err = hinic_rss_set_hash_engine
(nic_dev->hwdev, nic_dev->rss_tmpl_idx,
nic_dev->rss_hash_engine);
if (err)
return -EFAULT;
nicif_info(nic_dev, drv, netdev,
"Change hfunc to RSS_HASH_%s success\n",
(hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP");
}
err = __set_rss_rxfh(netdev, indir, key);
return err;
}
static const struct ethtool_ops hinic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_link_ksettings = hinic_get_link_ksettings,
.set_link_ksettings = hinic_set_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
.get_msglevel = hinic_get_msglevel,
.set_msglevel = hinic_set_msglevel,
.nway_reset = hinic_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = hinic_get_ringparam,
.set_ringparam = hinic_set_ringparam,
.get_pauseparam = hinic_get_pauseparam,
.set_pauseparam = hinic_set_pauseparam,
.get_sset_count = hinic_get_sset_count,
.get_coalesce = hinic_get_coalesce,
.set_coalesce = hinic_set_coalesce,
.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
.set_phys_id = hinic_set_phys_id,
.self_test = hinic_diag_test,
.get_rxnfc = hinic_get_rxnfc,
.set_rxnfc = hinic_set_rxnfc,
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_module_info = hinic_get_module_info,
.get_module_eeprom = hinic_get_module_eeprom,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
};
static const struct ethtool_ops hinicvf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_link_ksettings = hinic_get_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
.get_msglevel = hinic_get_msglevel,
.set_msglevel = hinic_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ringparam = hinic_get_ringparam,
.set_ringparam = hinic_set_ringparam,
.get_sset_count = hinic_get_sset_count,
.get_coalesce = hinic_get_coalesce,
.set_coalesce = hinic_set_coalesce,
.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
.get_rxnfc = hinic_get_rxnfc,
.set_rxnfc = hinic_set_rxnfc,
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
};
void hinic_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &hinic_ethtool_ops);
}
void hinicvf_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &hinicvf_ethtool_ops);
} /*lint -e766*/
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "ossl_knl.h"
#include "hinic_hw_mgmt.h"
#include "hinic_hw.h"
#include "hinic_lld.h"
#include "hinic_pci_id_tbl.h"
#include "hinic_nic_dev.h"
#include "hinic_sriov.h"
#include "hinic_dbgtool_knl.h"
#include "hinic_nictool.h"
#define HINIC_PCI_CFG_REG_BAR 0
#define HINIC_PCI_INTR_REG_BAR 2
#define HINIC_PCI_DB_BAR 4
#define HINIC_PCI_VENDOR_ID 0x19e5
#define SELF_TEST_BAR_ADDR_OFFSET 0x883c
#define HINIC_SECOND_BASE 1000
#define HINIC_SYNC_YEAR_OFFSET 1900
#define HINIC_SYNC_MONTH_OFFSET 1
#define HINIC_MINUTE_BASE 60
#define HINIC_WAIT_TOOL_CNT_TIMEOUT 10000
#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
#define HINIC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver"
#define HINICVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver"
MODULE_AUTHOR("Huawei Technologies CO., Ltd");
MODULE_DESCRIPTION(HINIC_DRV_DESC);
MODULE_VERSION(HINIC_DRV_VERSION);
MODULE_LICENSE("GPL");
#ifdef CONFIG_PCI_IOV
static bool disable_vf_load;
module_param(disable_vf_load, bool, 0444);
MODULE_PARM_DESC(disable_vf_load,
"Disable virtual functions probe or not - default is false");
#endif /* CONFIG_PCI_IOV */
enum {
HINIC_FUNC_IN_REMOVE = BIT(0),
HINIC_FUNC_PRB_ERR = BIT(1),
HINIC_FUNC_PRB_DELAY = BIT(2),
};
/* Structure pcidev private */
struct hinic_pcidev {
struct pci_dev *pcidev;
void *hwdev;
struct card_node *chip_node;
struct hinic_lld_dev lld_dev;
/* Record the service object address,
* such as hinic_dev and toe_dev, fc_dev
*/
void *uld_dev[SERVICE_T_MAX];
/* Record the service object name */
char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ];
/* It is a the global variable for driver to manage
* all function device linked list
*/
struct list_head node;
void __iomem *cfg_reg_base;
void __iomem *intr_reg_base;
u64 db_base_phy;
void __iomem *db_base;
#if defined(__aarch64__)
void __iomem *dwqe_mapping;
#else
struct io_mapping *dwqe_mapping;
#endif
/* lock for attach/detach uld */
struct mutex pdev_mutex;
struct hinic_sriov_info sriov_info;
u32 init_state;
/* setted when uld driver processing event */
unsigned long state;
struct pci_device_id id;
unsigned long flag;
struct work_struct slave_nic_work;
struct workqueue_struct *slave_nic_init_workq;
struct delayed_work slave_nic_init_dwork;
enum hinic_chip_mode chip_mode;
bool nic_cur_enable;
bool nic_des_enable;
struct timer_list syncfw_time_timer;
};
#define HINIC_EVENT_PROCESS_TIMEOUT 10000
#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0)
#define SET_BIT(num, n) ((num) | (1UL << (n)))
#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n))))
#define MAX_CARD_ID 64
static u64 card_bit_map;
LIST_HEAD(g_hinic_chip_list);
struct hinic_uld_info g_uld_info[SERVICE_T_MAX] = { {0} };
static const char *s_uld_name[SERVICE_T_MAX] = {
"nic", "ovs", "roce", "toe", "iwarp", "fc", "fcoe", "migrate"};
enum hinic_lld_status {
HINIC_NODE_CHANGE = BIT(0),
};
struct hinic_lld_lock {
/* lock for chip list */
struct mutex lld_mutex;
unsigned long status;
atomic_t dev_ref_cnt;
};
struct hinic_lld_lock g_lld_lock;
#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */
#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */
#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */
/* node in chip_node will changed, tools or driver can't get node
* during this situation
*/
static void lld_lock_chip_node(void)
{
u32 loop_cnt;
mutex_lock(&g_lld_lock.lld_mutex);
loop_cnt = 0;
while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) {
if (!test_and_set_bit(HINIC_NODE_CHANGE, &g_lld_lock.status))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait for lld node change complete for %us\n",
loop_cnt / 1000);
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED)
pr_warn("Wait for lld node change complete timeout when try to get lld lock\n");
loop_cnt = 0;
while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) {
if (!atomic_read(&g_lld_lock.dev_ref_cnt))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait for lld dev unused for %us, reference count: %d\n",
loop_cnt / 1000,
atomic_read(&g_lld_lock.dev_ref_cnt));
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY)
pr_warn("Wait for lld dev unused timeout\n");
mutex_unlock(&g_lld_lock.lld_mutex);
}
static void lld_unlock_chip_node(void)
{
clear_bit(HINIC_NODE_CHANGE, &g_lld_lock.status);
}
/* When tools or other drivers want to get node of chip_node, use this function
* to prevent node be freed
*/
static void lld_dev_hold(void)
{
u32 loop_cnt = 0;
/* ensure there have not any chip node in changing */
mutex_lock(&g_lld_lock.lld_mutex);
while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) {
if (!test_bit(HINIC_NODE_CHANGE, &g_lld_lock.status))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait lld node change complete for %us\n",
loop_cnt / 1000);
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT)
pr_warn("Wait lld node change complete timeout when try to hode lld dev\n");
atomic_inc(&g_lld_lock.dev_ref_cnt);
mutex_unlock(&g_lld_lock.lld_mutex);
}
static void lld_dev_put(void)
{
atomic_dec(&g_lld_lock.dev_ref_cnt);
}
static void hinic_lld_lock_init(void)
{
mutex_init(&g_lld_lock.lld_mutex);
atomic_set(&g_lld_lock.dev_ref_cnt, 0);
}
static atomic_t tool_used_cnt;
void hinic_tool_cnt_inc(void)
{
atomic_inc(&tool_used_cnt);
}
void hinic_tool_cnt_dec(void)
{
atomic_dec(&tool_used_cnt);
}
static int attach_uld(struct hinic_pcidev *dev, enum hinic_service_type type,
struct hinic_uld_info *uld_info)
{
void *uld_dev = NULL;
int err;
mutex_lock(&dev->pdev_mutex);
if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) {
sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n");
err = -EFAULT;
goto out_unlock;
}
if (dev->uld_dev[type]) {
sdk_err(&dev->pcidev->dev,
"%s driver has attached to pcie device\n",
s_uld_name[type]);
err = 0;
goto out_unlock;
}
if ((hinic_get_func_mode(dev->hwdev) == FUNC_MOD_NORMAL_HOST) &&
type == SERVICE_T_OVS && !hinic_support_ovs(dev->hwdev, NULL)) {
sdk_warn(&dev->pcidev->dev, "Dev not support %s\n",
s_uld_name[type]);
err = 0;
goto out_unlock;
}
err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]);
if (err || !uld_dev) {
sdk_err(&dev->pcidev->dev,
"Failed to add object for %s driver to pcie device\n",
s_uld_name[type]);
goto probe_failed;
}
dev->uld_dev[type] = uld_dev;
mutex_unlock(&dev->pdev_mutex);
sdk_info(&dev->pcidev->dev,
"Attach %s driver to pcie device succeed\n", s_uld_name[type]);
return 0;
probe_failed:
out_unlock:
mutex_unlock(&dev->pdev_mutex);
return err;
}
static void detach_uld(struct hinic_pcidev *dev, enum hinic_service_type type)
{
struct hinic_uld_info *uld_info = &g_uld_info[type];
u32 cnt = 0;
mutex_lock(&dev->pdev_mutex);
if (!dev->uld_dev[type]) {
mutex_unlock(&dev->pdev_mutex);
return;
}
while (cnt < HINIC_EVENT_PROCESS_TIMEOUT) {
if (!test_and_set_bit(type, &dev->state))
break;
usleep_range(900, 1000);
cnt++;
}
uld_info->remove(&dev->lld_dev, dev->uld_dev[type]);
dev->uld_dev[type] = NULL;
if (cnt < HINIC_EVENT_PROCESS_TIMEOUT)
clear_bit(type, &dev->state);
sdk_info(&dev->pcidev->dev,
"Detach %s driver from pcie device succeed\n",
s_uld_name[type]);
mutex_unlock(&dev->pdev_mutex);
}
static void attach_ulds(struct hinic_pcidev *dev)
{
enum hinic_service_type type;
for (type = SERVICE_T_OVS; type < SERVICE_T_MAX; type++) {
if (g_uld_info[type].probe)
attach_uld(dev, type, &g_uld_info[type]);
}
}
static void detach_ulds(struct hinic_pcidev *dev)
{
enum hinic_service_type type;
for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) {
if (g_uld_info[type].probe)
detach_uld(dev, type);
}
}
int hinic_register_uld(enum hinic_service_type type,
struct hinic_uld_info *uld_info)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
if (type >= SERVICE_T_MAX) {
pr_err("Unknown type %d of up layer driver to register\n",
type);
return -EINVAL;
}
if (!uld_info || !uld_info->probe || !uld_info->remove) {
pr_err("Invalid information of %s driver to register\n",
s_uld_name[type]);
return -EINVAL;
}
lld_dev_hold();
if (g_uld_info[type].probe) {
pr_err("%s driver has registered\n", s_uld_name[type]);
lld_dev_put();
return -EINVAL;
}
memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info));
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (attach_uld(dev, type, uld_info)) {
sdk_err(&dev->pcidev->dev,
"Attach %s driver to pcie device failed\n",
s_uld_name[type]);
continue;
}
}
}
lld_dev_put();
pr_info("Register %s driver succeed\n", s_uld_name[type]);
return 0;
}
EXPORT_SYMBOL(hinic_register_uld);
void hinic_unregister_uld(enum hinic_service_type type)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
struct hinic_uld_info *uld_info;
if (type >= SERVICE_T_MAX) {
pr_err("Unknown type %d of up layer driver to unregister\n",
type);
return;
}
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
detach_uld(dev, type);
}
}
uld_info = &g_uld_info[type];
memset(uld_info, 0, sizeof(*uld_info));
lld_dev_put();
}
EXPORT_SYMBOL(hinic_unregister_uld);
#define HINIC_SYNFW_TIME_PERIOD (60 * 60 * 1000)
static void hinic_syncfw_timer_handler(struct timer_list *t)
{
struct hinic_pcidev *pci_adapter = from_timer(pci_adapter, t,
syncfw_time_timer);
u64 tv_msec;
tv_msec = ktime_to_ms(ktime_get_real());
hinic_sync_time_async(pci_adapter->hwdev, tv_msec);
mod_timer(&pci_adapter->syncfw_time_timer,
jiffies + msecs_to_jiffies(HINIC_SYNFW_TIME_PERIOD));
}
void hinic_init_syncfw_timer(struct hinic_pcidev *pci_adapter)
{
if (hinic_get_func_mode(pci_adapter->hwdev) != FUNC_MOD_NORMAL_HOST ||
hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
return;
timer_setup(&pci_adapter->syncfw_time_timer,
hinic_syncfw_timer_handler, 0);
pci_adapter->syncfw_time_timer.expires =
jiffies + msecs_to_jiffies(HINIC_SYNFW_TIME_PERIOD);
add_timer(&pci_adapter->syncfw_time_timer);
}
void hinic_destroy_syncfw_timer(struct hinic_pcidev *pci_adapter)
{
if (hinic_get_func_mode(pci_adapter->hwdev) != FUNC_MOD_NORMAL_HOST ||
hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
return;
del_timer_sync(&pci_adapter->syncfw_time_timer);
}
static void hinic_sync_time_to_fmw(struct hinic_pcidev *pdev_pri)
{
struct tm tm = {0};
u64 tv_msec;
int err;
tv_msec = ktime_to_ms(ktime_get_real());
err = hinic_sync_time(pdev_pri->hwdev, tv_msec);
if (err) {
sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n",
err);
} else {
time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm);
sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n",
tm.tm_year + HINIC_SYNC_YEAR_OFFSET,
tm.tm_mon + HINIC_SYNC_MONTH_OFFSET,
tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
}
}
enum hinic_ver_incompat_mode {
/* New driver can't compat with old firmware */
VER_INCOMP_NEW_DRV_OLD_FW,
/* New Firmware can't compat with old driver */
VER_INCOMP_NEW_FW_OLD_DRV,
};
struct hinic_version_incompat {
char *version;
char *advise;
u32 incompat_mode;
};
struct hinic_version_incompat ver_incompat_table[] = {
{
.version = "1.2.2.0",
.advise = "Mechanism of cos changed",
.incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW),
},
{
.version = "1.2.3.0",
.advise = "Driver get sevice mode from firmware",
.incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW),
},
};
#define MAX_VER_FIELD_LEN 4
#define MAX_VER_SPLIT_NUM 4
static void __version_split(const char *str, int *split_num,
char rst[][MAX_VER_FIELD_LEN])
{
const char delim = '.';
const char *src;
int cnt = 0;
u16 idx, end, token_len;
idx = 0;
while (idx < strlen(str)) {
for (end = idx; end < strlen(str); end++) {
if (*(str + end) == delim)
break; /* find */
}
if (end != idx) {
token_len = min_t(u16, end - idx,
MAX_VER_FIELD_LEN - 1);
src = str + idx;
memcpy(rst[cnt], src, token_len);
if (++cnt >= MAX_VER_SPLIT_NUM)
break;
}
idx = end + 1; /* skip delim */
}
*split_num = cnt;
}
int hinic_version_cmp(char *ver1, char *ver2)
{
char ver1_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
char ver2_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
int split1_num, split2_num;
int ver1_num, ver2_num;
int split, err;
/* To compat older firmware version */
if (ver1[0] == 'B')
return -1;
if (ver2[0] == 'B')
return 1;
__version_split(ver1, &split1_num, ver1_split);
__version_split(ver2, &split2_num, ver2_split);
if (split1_num != MAX_VER_SPLIT_NUM ||
split2_num != MAX_VER_SPLIT_NUM) {
pr_err("Invalid version %s or %s\n", ver1, ver2);
return 0;
}
for (split = 0; split < MAX_VER_SPLIT_NUM; split++) {
err = kstrtoint(ver1_split[split], 0, &ver1_num);
err |= kstrtoint(ver2_split[split], 0, &ver2_num);
if (err) {
pr_err("Failed to parse version: %s, %s\n",
ver1_split[split], ver2_split[split]);
return 0;
}
if (ver1_num > ver2_num)
return 1;
else if (ver1_num < ver2_num)
return -1;
}
return 0;
}
static int __version_mismatch(struct hinic_pcidev *pcidev, char *cur_fw_ver,
char *cur_drv_ver,
struct hinic_version_incompat *ver_incompat,
int start_entry)
{
struct hinic_version_incompat *ver_incmp_tmp;
int fw_ver_comp;
int i, num_entry;
fw_ver_comp = hinic_version_cmp(cur_fw_ver, ver_incompat->version);
if (fw_ver_comp <= 0) {
/* Check if new driver compatible with old fw */
for (i = start_entry; i >= 0; i--) {
ver_incmp_tmp = &ver_incompat_table[i];
if (hinic_version_cmp(cur_fw_ver,
ver_incmp_tmp->version) >= 0)
break; /* Not need to check anymore */
if (ver_incmp_tmp->incompat_mode &
BIT(VER_INCOMP_NEW_DRV_OLD_FW)) {
sdk_err(&pcidev->pcidev->dev,
"Version incompatible: %s, please update firmware to %s, or use %s driver\n",
ver_incmp_tmp->advise,
cur_drv_ver, cur_fw_ver);
return -EINVAL;
}
}
return 0;
}
/* check if old driver compatible with new firmware */
num_entry = (int)sizeof(ver_incompat_table) /
(int)sizeof(ver_incompat_table[0]);
for (i = start_entry + 1; i < num_entry; i++) {
ver_incmp_tmp = &ver_incompat_table[i];
if (hinic_version_cmp(cur_fw_ver, ver_incmp_tmp->version) < 0)
break; /* Not need to check anymore */
if (ver_incmp_tmp->incompat_mode &
BIT(VER_INCOMP_NEW_FW_OLD_DRV)) {
sdk_err(&pcidev->pcidev->dev,
"Version incompatible: %s, please update driver to %s, or use %s firmware\n",
ver_incmp_tmp->advise,
cur_fw_ver, cur_drv_ver);
return -EINVAL;
}
}
return 0;
}
static void hinic_ignore_minor_version(char *version)
{
char ver_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
int max_ver_len, split_num = 0;
int err;
__version_split(version, &split_num, ver_split);
if (split_num != MAX_VER_SPLIT_NUM)
return;
max_ver_len = (int)strlen(version) + 1;
memset(version, 0, max_ver_len);
err = snprintf(version, max_ver_len, "%s.%s.%s.0",
ver_split[0], ver_split[1], ver_split[2]);
if (err <= 0 || err >= max_ver_len)
pr_err("Failed to snprintf version, function return(%d) and dest_len(%d)\n",
err, max_ver_len);
}
static int hinic_detect_version_compatible(struct hinic_pcidev *pcidev)
{
struct hinic_fw_version fw_ver = { {0} };
struct hinic_version_incompat *ver_incompat;
char drv_ver[MAX_VER_SPLIT_NUM * MAX_VER_FIELD_LEN] = {0};
int idx, num_entry, drv_ver_len;
int ver_mismatch;
int err;
err = hinic_get_fw_version(pcidev->hwdev, &fw_ver);
if (err) {
sdk_err(&pcidev->pcidev->dev,
"Failed to get firmware version\n");
return err;
}
drv_ver_len = min_t(int, (int)sizeof(drv_ver) - 1,
(int)strlen(HINIC_DRV_VERSION));
memcpy(drv_ver, HINIC_DRV_VERSION, drv_ver_len);
sdk_info(&pcidev->pcidev->dev, "Version info: driver %s, firmware %s\n",
drv_ver, fw_ver.mgmt_ver);
hinic_ignore_minor_version(fw_ver.mgmt_ver);
hinic_ignore_minor_version(drv_ver);
ver_mismatch = hinic_version_cmp(drv_ver, fw_ver.mgmt_ver);
if (!ver_mismatch)
return 0;
num_entry = (int)sizeof(ver_incompat_table) /
(int)sizeof(ver_incompat_table[0]);
for (idx = num_entry - 1; idx >= 0; idx--) {
ver_incompat = &ver_incompat_table[idx];
if (hinic_version_cmp(drv_ver, ver_incompat->version) < 0)
continue;
/* Find older verion of driver in table */
return __version_mismatch(pcidev, fw_ver.mgmt_ver, drv_ver,
ver_incompat, idx);
}
return 0;
}
struct mctp_hdr {
u16 resp_code;
u16 reason_code;
u32 manufacture_id;
u8 cmd_rsvd;
u8 major_cmd;
u8 sub_cmd;
u8 spc_field;
};
struct mctp_bdf_info {
struct mctp_hdr hdr; /* spc_field: pf index */
u8 rsvd;
u8 bus;
u8 device;
u8 function;
};
enum mctp_resp_code {
/* COMMAND_COMPLETED = 0, */
/* COMMAND_FAILED = 1, */
/* COMMAND_UNAVALILABLE = 2, */
COMMAND_UNSUPPORTED = 3,
};
static void __mctp_set_hdr(struct mctp_hdr *hdr,
struct hinic_mctp_host_info *mctp_info)
{
u32 manufacture_id = 0x07DB;
hdr->cmd_rsvd = 0;
hdr->major_cmd = mctp_info->major_cmd;
hdr->sub_cmd = mctp_info->sub_cmd;
hdr->manufacture_id = cpu_to_be32(manufacture_id);
hdr->resp_code = cpu_to_be16(hdr->resp_code);
hdr->reason_code = cpu_to_be16(hdr->reason_code);
}
static void __mctp_get_bdf(struct hinic_pcidev *pci_adapter,
struct hinic_mctp_host_info *mctp_info)
{
struct pci_dev *pdev = pci_adapter->pcidev;
struct mctp_bdf_info *bdf_info = mctp_info->data;
bdf_info->bus = pdev->bus->number;
bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */
bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */
memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr));
__mctp_set_hdr(&bdf_info->hdr, mctp_info);
bdf_info->hdr.spc_field =
(u8)hinic_global_func_id_hw(pci_adapter->hwdev);
mctp_info->data_len = sizeof(*bdf_info);
}
#define MCTP_MAJOR_CMD_PUBLIC 0x0
#define MCTP_MAJOR_CMD_NIC 0x1
#define MCTP_PUBLIC_SUB_CMD_BDF 0x1
#define MCTP_PUBLIC_SUB_CMD_DRV 0x4
#define MCTP_NIC_SUB_CMD_IP 0x1
static void __mctp_get_host_info(struct hinic_pcidev *dev,
struct hinic_mctp_host_info *mctp_info)
{
struct mctp_hdr *hdr;
switch ((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) {
case (MCTP_MAJOR_CMD_PUBLIC << 8 | MCTP_PUBLIC_SUB_CMD_BDF):
__mctp_get_bdf(dev, mctp_info);
break;
default:
hdr = mctp_info->data;
hdr->reason_code = COMMAND_UNSUPPORTED;
__mctp_set_hdr(hdr, mctp_info);
mctp_info->data_len = sizeof(*hdr);
break;
}
}
static bool __is_pcidev_match_chip_name(const char *ifname,
struct hinic_pcidev *dev,
struct card_node *chip_node,
enum func_type type)
{
if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) {
if (type == TYPE_UNKNOWN) {
if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED)
return false;
} else {
if (dev->init_state >=
HINIC_INIT_STATE_HW_PART_INITED &&
hinic_func_type(dev->hwdev) != type)
return false;
}
return true;
}
return false;
}
static struct hinic_pcidev *_get_pcidev_by_chip_name(char *ifname,
enum func_type type)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (__is_pcidev_match_chip_name(ifname, dev, chip_node,
type)) {
lld_dev_put();
return dev;
}
}
}
lld_dev_put();
return NULL;
}
static struct hinic_pcidev *hinic_get_pcidev_by_chip_name(char *ifname)
{
struct hinic_pcidev *dev, *dev_hw_init;
/* find hw init device first */
dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN);
if (dev_hw_init) {
if (hinic_func_type(dev_hw_init->hwdev) == TYPE_PPF)
return dev_hw_init;
}
dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF);
if (dev) {
if (dev_hw_init && dev_hw_init->init_state >= dev->init_state)
return dev_hw_init;
return dev;
}
dev = _get_pcidev_by_chip_name(ifname, TYPE_PF);
if (dev) {
if (dev_hw_init && dev_hw_init->init_state >= dev->init_state)
return dev_hw_init;
return dev;
}
dev = _get_pcidev_by_chip_name(ifname, TYPE_VF);
if (dev)
return dev;
return NULL;
}
static bool __is_pcidev_match_dev_name(const char *ifname,
struct hinic_pcidev *dev,
enum hinic_service_type type)
{
struct hinic_nic_dev *nic_dev;
enum hinic_service_type i;
if (type == SERVICE_T_MAX) {
for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) {
if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ))
return true;
}
} else {
if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ))
return true;
}
nic_dev = dev->uld_dev[SERVICE_T_NIC];
if (nic_dev) {
if (!strncmp(nic_dev->netdev->name, ifname, IFNAMSIZ))
return true;
}
return false;
}
static struct hinic_pcidev *
hinic_get_pcidev_by_dev_name(char *ifname, enum hinic_service_type type)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (__is_pcidev_match_dev_name(ifname, dev, type)) {
lld_dev_put();
return dev;
}
}
}
lld_dev_put();
return NULL;
}
static struct hinic_pcidev *hinic_get_pcidev_by_ifname(char *ifname)
{
struct hinic_pcidev *dev;
/* support search hwdev by chip name, net device name,
* or fc device name
*/
/* Find pcidev by chip_name first */
dev = hinic_get_pcidev_by_chip_name(ifname);
if (dev)
return dev;
/* If ifname not a chip name,
* find pcidev by FC name or netdevice name
*/
return hinic_get_pcidev_by_dev_name(ifname, SERVICE_T_MAX);
}
int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
if (!hwdev || !ifname)
return -EINVAL;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->hwdev == hwdev) {
strncpy(ifname, chip_node->chip_name,
IFNAMSIZ - 1);
ifname[IFNAMSIZ - 1] = 0;
lld_dev_put();
return 0;
}
}
}
lld_dev_put();
return -ENXIO;
}
EXPORT_SYMBOL(hinic_get_chip_name_by_hwdev);
static struct card_node *hinic_get_chip_node_by_hwdev(const void *hwdev)
{
struct card_node *chip_node = NULL;
struct card_node *node_tmp = NULL;
struct hinic_pcidev *dev;
if (!hwdev)
return NULL;
lld_dev_hold();
list_for_each_entry(node_tmp, &g_hinic_chip_list, node) {
if (!chip_node) {
list_for_each_entry(dev, &node_tmp->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->hwdev == hwdev) {
chip_node = node_tmp;
break;
}
}
}
}
lld_dev_put();
return chip_node;
}
int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[])
{
struct hinic_pcidev *dev = pci_get_drvdata(pdev);
struct card_node *chip_node;
u32 cnt;
if (!dev || !hinic_support_nic(dev->hwdev, NULL))
return -EINVAL;
lld_dev_hold();
cnt = 0;
chip_node = dev->chip_node;
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->init_state < HINIC_INIT_STATE_NIC_INITED)
continue;
if (HINIC_FUNC_IS_VF(dev->hwdev))
continue;
array[cnt] = dev->uld_dev[SERVICE_T_NIC];
cnt++;
}
lld_dev_put();
*dev_cnt = cnt;
return 0;
}
int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted, u8 *cos_up)
{
struct hinic_pcidev *dev = pci_get_drvdata(pdev);
struct card_node *chip_node;
if (!dev)
return -EINVAL;
chip_node = dev->chip_node;
*is_setted = chip_node->cos_up_setted;
if (chip_node->cos_up_setted)
memcpy(cos_up, chip_node->cos_up, sizeof(chip_node->cos_up));
return 0;
}
int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up)
{
struct hinic_pcidev *dev = pci_get_drvdata(pdev);
struct card_node *chip_node;
if (!dev)
return -EINVAL;
chip_node = dev->chip_node;
chip_node->cos_up_setted = true;
memcpy(chip_node->cos_up, cos_up, sizeof(chip_node->cos_up));
return 0;
}
void *hinic_get_hwdev_by_ifname(char *ifname)
{
struct hinic_pcidev *dev;
dev = hinic_get_pcidev_by_ifname(ifname);
if (dev)
return dev->hwdev;
return NULL;
}
void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type)
{
struct hinic_pcidev *dev;
if (type >= SERVICE_T_MAX) {
pr_err("Service type: %d is error\n", type);
return NULL;
}
dev = hinic_get_pcidev_by_dev_name(ifname, type);
if (dev)
return dev->uld_dev[type];
return NULL;
}
void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type)
{
struct hinic_pcidev *dev;
/* support search hwdev by chip name, net device name,
* or fc device name, Find pcidev by chip_name first
*/
dev = hinic_get_pcidev_by_chip_name(ifname);
if (dev)
return dev->uld_dev[type];
return NULL;
}
/* NOTICE: nictool can't use this function, because this function can't keep
* tool context mutual exclusive with remove context
*/
void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev,
enum hinic_service_type type)
{
struct hinic_pcidev *pci_adapter;
struct card_node *chip_node;
struct hinic_pcidev *dev;
if (!pdev)
return NULL;
pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter)
return NULL;
chip_node = pci_adapter->chip_node;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
/* can't test HINIC_FUNC_IN_REMOVE bit in dev->flag, because
* TOE will call this function when detach toe driver
*/
if (hinic_func_type(dev->hwdev) == TYPE_PPF) {
lld_dev_put();
return dev->uld_dev[type];
}
}
lld_dev_put();
return NULL;
}
EXPORT_SYMBOL(hinic_get_ppf_uld_by_pdev);
void *hinic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter;
struct card_node *chip_node;
struct hinic_pcidev *dev;
if (!pdev)
return NULL;
pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter)
return NULL;
chip_node = pci_adapter->chip_node;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (dev->hwdev && hinic_func_type(dev->hwdev) == TYPE_PPF) {
lld_dev_put();
return dev->hwdev;
}
}
lld_dev_put();
return NULL;
}
void hinic_get_all_chip_id(void *id_info)
{
struct nic_card_id *card_id = (struct nic_card_id *)id_info;
struct card_node *chip_node;
int i = 0;
int id, err;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%d", &id);
if (err <= 0)
pr_err("Failed to get hinic id\n");
card_id->id[i] = id;
i++;
}
lld_dev_put();
card_id->num = i;
}
static bool __is_func_valid(struct hinic_pcidev *dev)
{
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
return false;
if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED)
return false;
if (HINIC_FUNC_IS_VF(dev->hwdev))
return false;
return true;
}
bool hinic_is_valid_bar_addr(u64 offset)
{
struct card_node *chip_node = NULL;
struct hinic_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (hinic_func_type(dev->hwdev) == TYPE_VF)
continue;
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (offset == pci_resource_start(dev->pcidev, 0)) {
lld_dev_put();
return true;
}
}
}
lld_dev_put();
return false;
}
void hinic_get_card_info(void *hwdev, void *bufin)
{
struct card_node *chip_node = NULL;
struct card_info *info = (struct card_info *)bufin;
struct hinic_nic_dev *nic_dev;
struct hinic_pcidev *dev;
void *fun_hwdev;
u32 i = 0;
info->pf_num = 0;
chip_node = hinic_get_chip_node_by_hwdev(hwdev);
if (!chip_node)
return;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (!__is_func_valid(dev))
continue;
fun_hwdev = dev->hwdev;
if (((hinic_support_fc(fun_hwdev, NULL)) ||
(hinic_support_fcoe(fun_hwdev, NULL))) &&
dev->uld_dev[SERVICE_T_FC]) {
info->pf[i].pf_type |= (u32)BIT(SERVICE_T_FC);
strlcpy(info->pf[i].name,
dev->uld_dev_name[SERVICE_T_FC], IFNAMSIZ);
}
if (hinic_support_nic(fun_hwdev, NULL)) {
nic_dev = dev->uld_dev[SERVICE_T_NIC];
if (nic_dev) {
info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC);
strlcpy(info->pf[i].name,
nic_dev->netdev->name, IFNAMSIZ);
}
}
if ((hinic_support_ovs(fun_hwdev, NULL)) &&
dev->uld_dev[SERVICE_T_OVS])
info->pf[i].pf_type |= (u32)BIT(SERVICE_T_OVS);
if ((hinic_support_roce(fun_hwdev, NULL)) &&
dev->uld_dev[SERVICE_T_ROCE])
info->pf[i].pf_type |= (u32)BIT(SERVICE_T_ROCE);
if ((hinic_support_toe(fun_hwdev, NULL)) &&
dev->uld_dev[SERVICE_T_TOE])
info->pf[i].pf_type |= (u32)BIT(SERVICE_T_TOE);
if (hinic_func_for_mgmt(fun_hwdev))
strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ);
if (hinic_func_for_pt(fun_hwdev))
info->pf[i].pf_type |= (u32)BIT(SERVICE_T_PT);
if (hinic_func_for_hwpt(fun_hwdev))
info->pf[i].pf_type |= (u32)BIT(SERVICE_T_HWPT);
strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev),
sizeof(info->pf[i].bus_info));
info->pf_num++;
i = info->pf_num;
}
lld_dev_put();
}
void hinic_get_card_func_info_by_card_name(const char *chip_name,
struct hinic_card_func_info
*card_func)
{
struct card_node *chip_node = NULL;
struct hinic_pcidev *dev;
struct func_pdev_info *pdev_info;
card_func->num_pf = 0;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ))
continue;
list_for_each_entry(dev, &chip_node->func_list, node) {
if (hinic_func_type(dev->hwdev) == TYPE_VF)
continue;
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
pdev_info = &card_func->pdev_info[card_func->num_pf];
pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0);
pdev_info->bar0_phy_addr =
pci_resource_start(dev->pcidev, 0);
card_func->num_pf++;
if (card_func->num_pf >= MAX_SIZE)
break;
}
}
lld_dev_put();
}
int hinic_get_device_id(void *hwdev, u16 *dev_id)
{
struct card_node *chip_node = NULL;
struct hinic_pcidev *dev;
u16 vendor_id = 0;
u16 device_id = 0;
chip_node = hinic_get_chip_node_by_hwdev(hwdev);
if (!chip_node)
return -ENODEV;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
pci_read_config_word(dev->pcidev, 0, &vendor_id);
if (vendor_id == HINIC_PCI_VENDOR_ID) {
pci_read_config_word(dev->pcidev, 2, &device_id);
break;
}
}
lld_dev_put();
*dev_id = device_id;
return 0;
}
int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid)
{
struct card_node *chip_node = NULL;
struct hinic_pcidev *dev;
chip_node = hinic_get_chip_node_by_hwdev(hwdev);
if (!chip_node)
return -ENODEV;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (hinic_physical_port_id(dev->hwdev) == port_id) {
*pf_id = hinic_global_func_id(dev->hwdev);
*isvalid = 1;
break;
}
}
lld_dev_put();
return 0;
}
void get_fc_devname(char *devname)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->init_state < HINIC_INIT_STATE_NIC_INITED)
continue;
if (HINIC_FUNC_IS_VF(dev->hwdev))
continue;
if (dev->uld_dev[SERVICE_T_FC]) {
strlcpy(devname,
dev->uld_dev_name[SERVICE_T_FC],
IFNAMSIZ);
lld_dev_put();
return;
}
}
}
lld_dev_put();
}
enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev)
{
struct hinic_pcidev *dev = pci_get_drvdata(pdev);
if (dev)
return dev->init_state;
return HINIC_INIT_STATE_NONE;
}
enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname)
{
struct hinic_pcidev *dev;
dev = hinic_get_pcidev_by_ifname(ifname);
if (dev)
return dev->init_state;
pr_err("Can not get device %s\n", ifname);
return HINIC_INIT_STATE_NONE;
}
int hinic_get_self_test_result(char *ifname, u32 *result)
{
struct hinic_pcidev *dev = NULL;
dev = hinic_get_pcidev_by_ifname(ifname);
if (!dev) {
pr_err("Get pcidev failed by ifname: %s\n", ifname);
return -EFAULT;
}
*result = be32_to_cpu(readl((u8 __iomem *)(dev->cfg_reg_base) +
SELF_TEST_BAR_ADDR_OFFSET));
return 0;
}
struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev)
{
struct hinic_pcidev *pci_adapter;
struct hinic_nic_dev *nic_dev;
if (!lld_dev || !hinic_support_nic(lld_dev->hwdev, NULL))
return NULL;
pci_adapter = pci_get_drvdata(lld_dev->pdev);
nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC];
if (!nic_dev) {
sdk_err(&pci_adapter->pcidev->dev,
"There's no net device attached on the pci device\n");
return NULL;
}
return nic_dev->netdev;
}
EXPORT_SYMBOL(hinic_get_netdev_by_lld);
void *hinic_get_hwdev_by_netdev(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (!nic_dev || !netdev)
return NULL;
return nic_dev->hwdev;
}
EXPORT_SYMBOL(hinic_get_hwdev_by_netdev);
struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter;
struct hinic_nic_dev *nic_dev;
if (!pdev)
return NULL;
pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter || !hinic_support_nic(pci_adapter->hwdev, NULL))
return NULL;
nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC];
if (!nic_dev) {
sdk_err(&pci_adapter->pcidev->dev,
"There`s no net device attached on the pci device\n");
return NULL;
}
return nic_dev->netdev;
}
EXPORT_SYMBOL(hinic_get_netdev_by_pcidev);
struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
return &pci_adapter->sriov_info;
}
bool hinic_is_in_host(void)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->init_state > HINIC_INIT_STATE_PCI_INITED &&
hinic_func_type(dev->hwdev) != TYPE_VF) {
lld_dev_put();
return true;
}
}
}
lld_dev_put();
return false;
}
int hinic_attach_nic(struct hinic_lld_dev *lld_dev)
{
struct hinic_pcidev *dev;
if (!lld_dev)
return -EINVAL;
dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]);
}
EXPORT_SYMBOL(hinic_attach_nic);
void hinic_detach_nic(struct hinic_lld_dev *lld_dev)
{
struct hinic_pcidev *dev;
if (!lld_dev)
return;
dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
detach_uld(dev, SERVICE_T_NIC);
}
EXPORT_SYMBOL(hinic_detach_nic);
int hinic_attach_roce(struct hinic_lld_dev *lld_dev)
{
struct hinic_pcidev *dev;
if (!lld_dev)
return -EINVAL;
dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
return attach_uld(dev, SERVICE_T_ROCE, &g_uld_info[SERVICE_T_ROCE]);
}
EXPORT_SYMBOL(hinic_attach_roce);
void hinic_detach_roce(struct hinic_lld_dev *lld_dev)
{
struct hinic_pcidev *dev;
if (!lld_dev)
return;
dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
detach_uld(dev, SERVICE_T_ROCE);
}
EXPORT_SYMBOL(hinic_detach_roce);
static int __set_nic_rss_state(struct hinic_pcidev *dev, bool enable)
{
void *nic_uld;
int err = 0;
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
return 0;
nic_uld = dev->uld_dev[SERVICE_T_NIC];
if (!hinic_support_nic(dev->hwdev, NULL) || !nic_uld)
return 0;
if (hinic_func_type(dev->hwdev) == TYPE_VF)
return 0;
if (enable)
err = hinic_enable_func_rss(nic_uld);
else
err = hinic_disable_func_rss(nic_uld);
if (err) {
sdk_err(&dev->pcidev->dev, "Failed to %s rss\n",
enable ? "enable" : "disable");
}
return err;
}
int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev)
{
struct hinic_pcidev *adapter;
if (!lld_dev)
return -EINVAL;
adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev);
return __set_nic_rss_state(adapter, false);
}
EXPORT_SYMBOL(hinic_disable_nic_rss);
int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev)
{
struct hinic_pcidev *adapter;
if (!lld_dev)
return -EINVAL;
adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev);
return __set_nic_rss_state(adapter, true);
}
EXPORT_SYMBOL(hinic_enable_nic_rss);
struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev)
{
struct hinic_pcidev *adapter;
if (!pdev)
return NULL;
adapter = pci_get_drvdata(pdev);
return &adapter->id;
}
EXPORT_SYMBOL(hinic_get_pci_device_id);
static int __set_nic_func_state(struct hinic_pcidev *pci_adapter)
{
struct pci_dev *pdev = pci_adapter->pcidev;
u16 func_id;
int err;
bool enable_nic;
err = hinic_global_func_id_get(pci_adapter->hwdev, &func_id);
if (err)
return err;
err = hinic_get_func_nic_enable(pci_adapter->hwdev, func_id,
&enable_nic);
if (err) {
sdk_err(&pdev->dev, "Failed to get nic state\n");
return err;
}
if (enable_nic) {
if (is_multi_bm_slave(pci_adapter->hwdev))
hinic_set_vf_dev_cap(pci_adapter->hwdev);
err = attach_uld(pci_adapter, SERVICE_T_NIC,
&g_uld_info[SERVICE_T_NIC]);
if (err) {
sdk_err(&pdev->dev, "Failed to initialize NIC\n");
return err;
}
if (pci_adapter->init_state < HINIC_INIT_STATE_NIC_INITED)
pci_adapter->init_state = HINIC_INIT_STATE_NIC_INITED;
} else {
detach_uld(pci_adapter, SERVICE_T_NIC);
}
return 0;
}
int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev, u16 vf_func_id,
bool en)
{
struct hinic_pcidev *dev, *des_dev;
struct hinic_nic_dev *uld_dev;
int err = -EFAULT;
if (!lld_dev)
return -EINVAL;
dev = pci_get_drvdata(lld_dev->pdev);
if (!dev)
return -EFAULT;
/* find func_idx pci_adapter and disable or enable nic */
lld_dev_hold();
list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (des_dev->init_state <
HINIC_INIT_STATE_DBGTOOL_INITED &&
!test_bit(HINIC_FUNC_PRB_ERR,
&des_dev->flag))
continue;
if (hinic_global_func_id(des_dev->hwdev) != vf_func_id)
continue;
if (des_dev->init_state <
HINIC_INIT_STATE_DBGTOOL_INITED) {
break;
}
sdk_info(&dev->pcidev->dev, "Receive event: %s vf%d nic\n",
en ? "enable" : "disable", vf_func_id);
err = 0;
if (en) {
if (des_dev->uld_dev[SERVICE_T_NIC]) {
sdk_err(&des_dev->pcidev->dev,
"%s driver has attached to pcie device, cannot set VF max_queue_num\n",
s_uld_name[SERVICE_T_NIC]);
} else {
err = hinic_set_vf_dev_cap(des_dev->hwdev);
if (err) {
sdk_err(&des_dev->pcidev->dev,
"%s driver Set VF max_queue_num failed, err=%d\n",
s_uld_name[SERVICE_T_NIC], err);
break;
}
}
err = attach_uld(des_dev, SERVICE_T_NIC,
&g_uld_info[SERVICE_T_NIC]);
if (err) {
sdk_err(&des_dev->pcidev->dev, "Failed to initialize NIC\n");
break;
}
uld_dev = (struct hinic_nic_dev *)
(des_dev->uld_dev[SERVICE_T_NIC]);
uld_dev->in_vm = true;
uld_dev->is_vm_slave =
is_multi_vm_slave(uld_dev->hwdev);
uld_dev->is_bm_slave =
is_multi_bm_slave(uld_dev->hwdev);
if (des_dev->init_state < HINIC_INIT_STATE_NIC_INITED)
des_dev->init_state =
HINIC_INIT_STATE_NIC_INITED;
} else {
detach_uld(des_dev, SERVICE_T_NIC);
}
break;
}
lld_dev_put();
return err;
}
EXPORT_SYMBOL(hinic_ovs_set_vf_nic_state);
static void slave_host_mgmt_work(struct work_struct *work)
{
struct hinic_pcidev *pci_adapter =
container_of(work, struct hinic_pcidev, slave_nic_work);
__set_nic_func_state(pci_adapter);
}
static void __multi_host_mgmt(struct hinic_pcidev *dev,
struct hinic_multi_host_mgmt_event *mhost_mgmt)
{
struct hinic_pcidev *des_dev;
struct hinic_mhost_nic_func_state *nic_state = {0};
switch (mhost_mgmt->sub_cmd) {
case HINIC_MHOST_NIC_STATE_CHANGE:
nic_state = mhost_mgmt->data;
nic_state->status = 0;
/* find func_idx pci_adapter and disable or enable nic */
lld_dev_hold();
list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag))
continue;
if (des_dev->init_state <
HINIC_INIT_STATE_DBGTOOL_INITED &&
!test_bit(HINIC_FUNC_PRB_ERR,
&des_dev->flag))
continue;
if (hinic_global_func_id_hw(des_dev->hwdev) !=
nic_state->func_idx)
continue;
if (des_dev->init_state <
HINIC_INIT_STATE_DBGTOOL_INITED) {
nic_state->status =
test_bit(HINIC_FUNC_PRB_ERR,
&des_dev->flag) ? 1 : 0;
break;
}
sdk_info(&dev->pcidev->dev, "Receive nic state changed event, state: %d\n",
nic_state->enable);
/* schedule_work */
schedule_work(&des_dev->slave_nic_work);
break;
}
lld_dev_put();
break;
default:
sdk_warn(&dev->pcidev->dev, "Received unknown multi-host mgmt event %d\n",
mhost_mgmt->sub_cmd);
break;
}
}
static void send_uld_dev_event(struct hinic_pcidev *dev,
struct hinic_event_info *event)
{
enum hinic_service_type type;
for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) {
if (test_and_set_bit(type, &dev->state)) {
sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n",
event->type, s_uld_name[type]);
continue;
}
if (g_uld_info[type].event)
g_uld_info[type].event(&dev->lld_dev,
dev->uld_dev[type], event);
clear_bit(type, &dev->state);
}
}
static void send_event_to_all_pf(struct hinic_pcidev *dev,
struct hinic_event_info *event)
{
struct hinic_pcidev *des_dev = NULL;
lld_dev_hold();
list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag))
continue;
if (hinic_func_type(des_dev->hwdev) == TYPE_VF)
continue;
send_uld_dev_event(des_dev, event);
}
lld_dev_put();
}
static void send_event_to_dst_pf(struct hinic_pcidev *dev, u16 func_id,
struct hinic_event_info *event)
{
struct hinic_pcidev *des_dev = NULL;
lld_dev_hold();
list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag))
continue;
if (hinic_func_type(des_dev->hwdev) == TYPE_VF)
continue;
if (hinic_global_func_id(des_dev->hwdev) == func_id) {
send_uld_dev_event(des_dev, event);
break;
}
}
lld_dev_put();
}
void hinic_event_process(void *adapter, struct hinic_event_info *event)
{
struct hinic_pcidev *dev = adapter;
u16 func_id;
switch (event->type) {
case HINIC_EVENT_FMW_ACT_NTC:
hinic_sync_time_to_fmw(dev);
break;
case HINIC_EVENT_MCTP_GET_HOST_INFO:
__mctp_get_host_info(dev, &event->mctp_info);
break;
case HINIC_EVENT_MULTI_HOST_MGMT:
__multi_host_mgmt(dev, &event->mhost_mgmt);
break;
case HINIC_EVENT_FAULT:
if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR &&
event->info.event.chip.func_id < HINIC_MAX_PF_NUM) {
func_id = event->info.event.chip.func_id;
send_event_to_dst_pf(adapter, func_id, event);
} else {
send_uld_dev_event(adapter, event);
}
break;
case HINIC_EVENT_MGMT_WATCHDOG_EVENT:
send_event_to_all_pf(adapter, event);
break;
default:
send_uld_dev_event(adapter, event);
break;
}
}
static int mapping_bar(struct pci_dev *pdev, struct hinic_pcidev *pci_adapter)
{
u32 db_dwqe_size;
u64 dwqe_addr;
pci_adapter->cfg_reg_base =
pci_ioremap_bar(pdev, HINIC_PCI_CFG_REG_BAR);
if (!pci_adapter->cfg_reg_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map configuration regs\n");
return -ENOMEM;
}
pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
HINIC_PCI_INTR_REG_BAR);
if (!pci_adapter->intr_reg_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map interrupt regs\n");
goto map_intr_bar_err;
}
db_dwqe_size = hinic_get_db_size(pci_adapter->cfg_reg_base,
&pci_adapter->chip_mode);
pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC_PCI_DB_BAR);
pci_adapter->db_base = ioremap(pci_adapter->db_base_phy,
db_dwqe_size);
if (!pci_adapter->db_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map doorbell regs\n");
goto map_db_err;
}
if (pci_adapter->chip_mode != CHIP_MODE_NORMAL)
return 0;
dwqe_addr = pci_adapter->db_base_phy + db_dwqe_size;
#if defined(__aarch64__)
/* arm do not support call ioremap_wc() */
pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, db_dwqe_size,
__pgprot(PROT_DEVICE_nGnRnE));
#else
pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr,
db_dwqe_size);
#endif
if (!pci_adapter->dwqe_mapping) {
sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n");
goto mapping_dwqe_err;
}
return 0;
mapping_dwqe_err:
iounmap(pci_adapter->db_base);
map_db_err:
iounmap(pci_adapter->intr_reg_base);
map_intr_bar_err:
iounmap(pci_adapter->cfg_reg_base);
return -ENOMEM;
}
static void unmapping_bar(struct hinic_pcidev *pci_adapter)
{
if (pci_adapter->chip_mode == CHIP_MODE_NORMAL) {
#if defined(__aarch64__)
iounmap(pci_adapter->dwqe_mapping);
#else
io_mapping_free(pci_adapter->dwqe_mapping);
#endif
}
iounmap(pci_adapter->db_base);
iounmap(pci_adapter->intr_reg_base);
iounmap(pci_adapter->cfg_reg_base);
}
static int alloc_chip_node(struct hinic_pcidev *pci_adapter)
{
struct card_node *chip_node;
unsigned char i;
unsigned char parent_bus_number = 0;
int err;
if (!pci_is_root_bus(pci_adapter->pcidev->bus))
parent_bus_number = pci_adapter->pcidev->bus->parent->number;
if (parent_bus_number != 0) {
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
if (chip_node->dp_bus_num == parent_bus_number) {
pci_adapter->chip_node = chip_node;
return 0;
}
}
} else if (pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF ||
pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF_HV) {
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
if (chip_node) {
pci_adapter->chip_node = chip_node;
return 0;
}
}
}
for (i = 0; i < MAX_CARD_ID; i++) {
if (!FIND_BIT(card_bit_map, i)) {
card_bit_map = (u64)SET_BIT(card_bit_map, i);
break;
}
}
if (i == MAX_CARD_ID) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc card id\n");
return -EFAULT;
}
chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL);
if (!chip_node) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc chip node\n");
goto alloc_chip_err;
}
chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL);
if (!(chip_node->dbgtool_attr_file.name)) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc dbgtool attr file name\n");
goto alloc_dbgtool_attr_file_err;
}
/* parent bus number */
chip_node->dp_bus_num = parent_bus_number;
err = snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d",
HINIC_CHIP_NAME, i);
if (err <= 0 || err >= IFNAMSIZ) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to snprintf chip_name, function return(%d) and dest_len(%d)\n",
err, IFNAMSIZ);
goto alloc_dbgtool_attr_file_err;
}
err = snprintf((char *)chip_node->dbgtool_attr_file.name,
IFNAMSIZ, "%s%d", HINIC_CHIP_NAME, i);
if (err <= 0 || err >= IFNAMSIZ) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to snprintf dbgtool_attr_file_name, function return(%d) and dest_len(%d)\n",
err, IFNAMSIZ);
goto alloc_dbgtool_attr_file_err;
}
sdk_info(&pci_adapter->pcidev->dev,
"Add new chip %s to global list succeed\n",
chip_node->chip_name);
list_add_tail(&chip_node->node, &g_hinic_chip_list);
INIT_LIST_HEAD(&chip_node->func_list);
pci_adapter->chip_node = chip_node;
mutex_init(&chip_node->sfp_mutex);
return 0;
alloc_dbgtool_attr_file_err:
kfree(chip_node);
alloc_chip_err:
card_bit_map = CLEAR_BIT(card_bit_map, i);
return -ENOMEM;
}
static void free_chip_node(struct hinic_pcidev *pci_adapter)
{
struct card_node *chip_node = pci_adapter->chip_node;
u32 id;
int err;
if (list_empty(&chip_node->func_list)) {
list_del(&chip_node->node);
sdk_info(&pci_adapter->pcidev->dev,
"Delete chip %s from global list succeed\n",
chip_node->chip_name);
err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%u", &id);
if (err <= 0)
sdk_err(&pci_adapter->pcidev->dev, "Failed to get hinic id\n");
card_bit_map = CLEAR_BIT(card_bit_map, id);
kfree(chip_node->dbgtool_attr_file.name);
kfree(chip_node);
}
}
static bool hinic_get_vf_load_state(struct pci_dev *pdev)
{
unsigned char parent_bus_number;
struct card_node *chip_node;
u8 id;
if (!pdev->is_virtfn)
return false;
/* vf used in vm */
if (pci_is_root_bus(pdev->bus))
return disable_vf_load;
parent_bus_number = pdev->bus->parent->number;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
if (chip_node->dp_bus_num == parent_bus_number) {
for (id = 0; id < HINIC_MAX_PF_NUM; id++) {
if (chip_node->pf_bus_num[id] ==
pdev->bus->number) {
lld_dev_put();
return chip_node->disable_vf_load[id];
}
}
}
}
lld_dev_put();
return disable_vf_load;
}
static void hinic_set_vf_load_state(struct hinic_pcidev *pci_adapter,
bool vf_load_state)
{
struct card_node *chip_node;
u16 func_id;
if (hinic_func_type(pci_adapter->hwdev) == TYPE_VF)
return;
/* The VF on the BM slave side must be probed */
if (is_multi_bm_slave(pci_adapter->hwdev))
vf_load_state = false;
func_id = hinic_global_func_id_hw(pci_adapter->hwdev);
chip_node = pci_adapter->chip_node;
chip_node->disable_vf_load[func_id] = vf_load_state;
chip_node->pf_bus_num[func_id] = pci_adapter->pcidev->bus->number;
sdk_info(&pci_adapter->pcidev->dev, "Current function support %s, %s vf load in host\n",
(hinic_support_ovs(pci_adapter->hwdev, NULL) ? "ovs" : "nic"),
(vf_load_state ? "disable" : "enable"));
}
int hinic_ovs_set_vf_load_state(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter;
if (!pdev) {
pr_err("pdev is null\n");
return -EINVAL;
}
pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter) {
pr_err("pci_adapter is null\n");
return -EFAULT;
}
hinic_set_vf_load_state(pci_adapter, disable_vf_load);
return 0;
}
EXPORT_SYMBOL(hinic_ovs_set_vf_load_state);
static int hinic_config_deft_mrss(struct pci_dev *pdev)
{
return 0;
}
static int hinic_config_pci_cto(struct pci_dev *pdev)
{
return 0;
}
static int hinic_pci_init(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter = NULL;
int err;
err = hinic_config_deft_mrss(pdev);
if (err) {
sdk_err(&pdev->dev, "Failed to configure Max Read Request Size\n");
return err;
}
err = hinic_config_pci_cto(pdev);
if (err) {
sdk_err(&pdev->dev, "Failed to configure Completion timeout\n");
return err;
}
pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
if (!pci_adapter) {
sdk_err(&pdev->dev,
"Failed to alloc pci device adapter\n");
return -ENOMEM;
}
pci_adapter->pcidev = pdev;
mutex_init(&pci_adapter->pdev_mutex);
pci_set_drvdata(pdev, pci_adapter);
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) {
sdk_info(&pdev->dev, "VFs are not binded to hinic\n");
return 0;
}
#endif
err = pci_enable_device(pdev);
if (err) {
sdk_err(&pdev->dev, "Failed to enable PCI device\n");
goto pci_enable_err;
}
err = pci_request_regions(pdev, HINIC_DRV_NAME);
if (err) {
sdk_err(&pdev->dev, "Failed to request regions\n");
goto pci_regions_err;
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
sdk_err(&pdev->dev, "Failed to set DMA mask\n");
goto dma_mask_err;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
sdk_warn(&pdev->dev,
"Couldn't set 64-bit coherent DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
sdk_err(&pdev->dev,
"Failed to set coherent DMA mask\n");
goto dma_consistnet_mask_err;
}
}
return 0;
dma_consistnet_mask_err:
dma_mask_err:
pci_clear_master(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
pci_regions_err:
pci_disable_device(pdev);
pci_enable_err:
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
return err;
}
static void hinic_pci_deinit(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
}
static void hinic_notify_ppf_unreg(struct hinic_pcidev *pci_adapter)
{
struct card_node *chip_node = pci_adapter->chip_node;
struct hinic_pcidev *dev;
if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
return;
lld_lock_chip_node();
list_for_each_entry(dev, &chip_node->func_list, node) {
hinic_ppf_hwdev_unreg(dev->hwdev);
}
lld_unlock_chip_node();
}
static void hinic_notify_ppf_reg(struct hinic_pcidev *pci_adapter)
{
struct card_node *chip_node = pci_adapter->chip_node;
struct hinic_pcidev *dev;
if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
return;
lld_lock_chip_node();
list_for_each_entry(dev, &chip_node->func_list, node) {
hinic_ppf_hwdev_reg(dev->hwdev, pci_adapter->hwdev);
}
lld_unlock_chip_node();
}
#ifdef CONFIG_X86
/**
* cfg_order_reg - when cpu model is haswell or broadwell, should configure dma
* order register to zero
* @pci_adapter: pci adapter
*/
/*lint -save -e40 */
void cfg_order_reg(struct hinic_pcidev *pci_adapter)
{
u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56};
struct cpuinfo_x86 *cpuinfo;
u32 i;
if (HINIC_FUNC_IS_VF(pci_adapter->hwdev))
return;
cpuinfo = &cpu_data(0);
for (i = 0; i < sizeof(cpu_model); i++) {
if (cpu_model[i] == cpuinfo->x86_model)
hinic_set_pcie_order_cfg(pci_adapter->hwdev);
}
}
/*lint -restore*/
#endif
static int hinic_func_init(struct pci_dev *pdev,
struct hinic_pcidev *pci_adapter)
{
struct hinic_init_para init_para;
bool vf_load_state;
int err;
init_para.adapter_hdl = pci_adapter;
init_para.pcidev_hdl = pdev;
init_para.dev_hdl = &pdev->dev;
init_para.cfg_reg_base = pci_adapter->cfg_reg_base;
init_para.intr_reg_base = pci_adapter->intr_reg_base;
init_para.db_base = pci_adapter->db_base;
init_para.db_base_phy = pci_adapter->db_base_phy;
init_para.dwqe_mapping = pci_adapter->dwqe_mapping;
init_para.hwdev = &pci_adapter->hwdev;
init_para.chip_node = pci_adapter->chip_node;
init_para.ppf_hwdev = hinic_get_ppf_hwdev_by_pdev(pdev);
err = hinic_init_hwdev(&init_para);
if (err < 0) {
pci_adapter->hwdev = NULL;
sdk_err(&pdev->dev, "Failed to initialize hardware device\n");
return -EFAULT;
} else if (err > 0) {
if (err == (1 << HINIC_HWDEV_ALL_INITED) &&
pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED) {
pci_adapter->init_state = HINIC_INIT_STATE_HW_IF_INITED;
sdk_info(&pdev->dev,
"Initialize hardware device later\n");
queue_delayed_work(pci_adapter->slave_nic_init_workq,
&pci_adapter->slave_nic_init_dwork,
HINIC_SLAVE_NIC_DELAY_TIME);
set_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
} else if (err != (1 << HINIC_HWDEV_ALL_INITED)) {
sdk_err(&pdev->dev,
"Initialize hardware device partitial failed\n");
hinic_detect_version_compatible(pci_adapter);
hinic_notify_ppf_reg(pci_adapter);
pci_adapter->init_state =
HINIC_INIT_STATE_HW_PART_INITED;
}
return -EFAULT;
}
hinic_notify_ppf_reg(pci_adapter);
pci_adapter->init_state = HINIC_INIT_STATE_HWDEV_INITED;
vf_load_state = hinic_support_ovs(pci_adapter->hwdev, NULL) ?
true : disable_vf_load;
hinic_set_vf_load_state(pci_adapter, vf_load_state);
hinic_qps_num_set(pci_adapter->hwdev, 0);
pci_adapter->lld_dev.pdev = pdev;
pci_adapter->lld_dev.hwdev = pci_adapter->hwdev;
pci_adapter->sriov_info.pdev = pdev;
pci_adapter->sriov_info.hwdev = pci_adapter->hwdev;
hinic_event_register(pci_adapter->hwdev, pci_adapter,
hinic_event_process);
if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev))
hinic_sync_time_to_fmw(pci_adapter);
hinic_init_syncfw_timer(pci_adapter);
/* dbgtool init */
lld_lock_chip_node();
err = dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node);
if (err) {
lld_unlock_chip_node();
sdk_err(&pdev->dev, "Failed to initialize dbgtool\n");
hinic_destroy_syncfw_timer(pci_adapter);
hinic_event_unregister(pci_adapter->hwdev);
return err;
}
lld_unlock_chip_node();
pci_adapter->init_state = HINIC_INIT_STATE_DBGTOOL_INITED;
err = hinic_detect_version_compatible(pci_adapter);
if (err)
return err;
if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev) &&
FUNC_ENABLE_SRIOV_IN_DEFAULT(pci_adapter->hwdev)) {
hinic_pci_sriov_enable(pdev,
hinic_func_max_vf(pci_adapter->hwdev));
}
/* NIC is base driver, probe firstly */
err = __set_nic_func_state(pci_adapter);
if (err)
return err;
attach_ulds(pci_adapter);
#ifdef CONFIG_X86
cfg_order_reg(pci_adapter);
#endif
sdk_info(&pdev->dev, "Pcie device probed\n");
pci_adapter->init_state = HINIC_INIT_STATE_ALL_INITED;
return 0;
}
static void hinic_func_deinit(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
/* When function deinit, disable mgmt initiative report events firstly,
* then flush mgmt work-queue.
*/
hinic_disable_mgmt_msg_report(pci_adapter->hwdev);
if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
hinic_flush_mgmt_workq(pci_adapter->hwdev);
hinic_set_func_deinit_flag(pci_adapter->hwdev);
if (pci_adapter->init_state >= HINIC_INIT_STATE_NIC_INITED) {
detach_ulds(pci_adapter);
detach_uld(pci_adapter, SERVICE_T_NIC);
}
if (pci_adapter->init_state >= HINIC_INIT_STATE_DBGTOOL_INITED) {
lld_lock_chip_node();
dbgtool_knl_deinit(pci_adapter->hwdev, pci_adapter->chip_node);
lld_unlock_chip_node();
hinic_destroy_syncfw_timer(pci_adapter);
hinic_event_unregister(pci_adapter->hwdev);
}
hinic_notify_ppf_unreg(pci_adapter);
if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) {
/* Remove the current node from node-list first,
* then it's safe to free hwdev
*/
lld_lock_chip_node();
list_del(&pci_adapter->node);
lld_unlock_chip_node();
hinic_free_hwdev(pci_adapter->hwdev);
}
}
static void wait_tool_unused(void)
{
u32 loop_cnt = 0;
while (loop_cnt < HINIC_WAIT_TOOL_CNT_TIMEOUT) {
if (!atomic_read(&tool_used_cnt))
return;
usleep_range(9900, 10000);
loop_cnt++;
}
}
static inline void wait_sriov_cfg_complete(struct hinic_pcidev *pci_adapter)
{
struct hinic_sriov_info *sriov_info;
u32 loop_cnt = 0;
sriov_info = &pci_adapter->sriov_info;
set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
usleep_range(9900, 10000);
while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
!test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
return;
usleep_range(9900, 10000);
loop_cnt++;
}
}
static void hinic_remove(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter)
return;
sdk_info(&pdev->dev, "Pcie device remove begin\n");
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) {
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
return;
}
#endif
cancel_delayed_work_sync(&pci_adapter->slave_nic_init_dwork);
flush_workqueue(pci_adapter->slave_nic_init_workq);
destroy_workqueue(pci_adapter->slave_nic_init_workq);
if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED)
hinic_detect_hw_present(pci_adapter->hwdev);
switch (pci_adapter->init_state) {
case HINIC_INIT_STATE_ALL_INITED:
case HINIC_INIT_STATE_NIC_INITED:
/* Don't support hotplug when SR-IOV is enabled now.
* So disable SR-IOV capability as normal.
*/
if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) {
wait_sriov_cfg_complete(pci_adapter);
hinic_pci_sriov_disable(pdev);
}
fallthrough;
case HINIC_INIT_STATE_DBGTOOL_INITED:
case HINIC_INIT_STATE_HWDEV_INITED:
case HINIC_INIT_STATE_HW_PART_INITED:
case HINIC_INIT_STATE_HW_IF_INITED:
case HINIC_INIT_STATE_PCI_INITED:
set_bit(HINIC_FUNC_IN_REMOVE, &pci_adapter->flag);
lld_lock_chip_node();
cancel_work_sync(&pci_adapter->slave_nic_work);
lld_unlock_chip_node();
wait_tool_unused();
if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED)
hinic_func_deinit(pdev);
lld_lock_chip_node();
if (pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED)
list_del(&pci_adapter->node);
nictool_k_uninit();
free_chip_node(pci_adapter);
lld_unlock_chip_node();
unmapping_bar(pci_adapter);
hinic_pci_deinit(pdev);
break;
default:
break;
}
sdk_info(&pdev->dev, "Pcie device removed\n");
}
static void slave_host_init_delay_work(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct hinic_pcidev *pci_adapter = container_of(delay,
struct hinic_pcidev, slave_nic_init_dwork);
struct pci_dev *pdev = pci_adapter->pcidev;
struct card_node *chip_node = pci_adapter->chip_node;
int found = 0;
struct hinic_pcidev *ppf_pcidev = NULL;
int err;
if (!hinic_get_master_host_mbox_enable(pci_adapter->hwdev)) {
queue_delayed_work(pci_adapter->slave_nic_init_workq,
&pci_adapter->slave_nic_init_dwork,
HINIC_SLAVE_NIC_DELAY_TIME);
return;
}
if (hinic_func_type(pci_adapter->hwdev) == TYPE_PPF) {
err = hinic_func_init(pdev, pci_adapter);
clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
if (err)
set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
return;
}
/* Make sure the PPF must be the first one */
lld_dev_hold();
list_for_each_entry(ppf_pcidev, &chip_node->func_list, node) {
if (ppf_pcidev &&
hinic_func_type(ppf_pcidev->hwdev) == TYPE_PPF) {
found = 1;
break;
}
}
lld_dev_put();
if (found && ppf_pcidev->init_state == HINIC_INIT_STATE_ALL_INITED) {
err = hinic_func_init(pdev, pci_adapter);
clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
if (err)
set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
} else {
queue_delayed_work(pci_adapter->slave_nic_init_workq,
&pci_adapter->slave_nic_init_dwork,
HINIC_SLAVE_NIC_DELAY_TIME);
}
}
static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hinic_pcidev *pci_adapter;
int err;
sdk_info(&pdev->dev, "Pcie device probe begin\n");
err = hinic_pci_init(pdev);
if (err)
return err;
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn && hinic_get_vf_load_state(pdev))
return 0;
#endif
pci_adapter = pci_get_drvdata(pdev);
clear_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
err = mapping_bar(pdev, pci_adapter);
if (err) {
sdk_err(&pdev->dev, "Failed to map bar\n");
goto map_bar_failed;
}
pci_adapter->id = *id;
INIT_WORK(&pci_adapter->slave_nic_work, slave_host_mgmt_work);
pci_adapter->slave_nic_init_workq =
create_singlethread_workqueue(HINIC_SLAVE_NIC_DELAY);
if (!pci_adapter->slave_nic_init_workq) {
sdk_err(&pdev->dev,
"Failed to create work queue: %s\n",
HINIC_SLAVE_NIC_DELAY);
goto ceate_nic_delay_work_fail;
}
INIT_DELAYED_WORK(&pci_adapter->slave_nic_init_dwork,
slave_host_init_delay_work);
/* if chip information of pcie function exist,
* add the function into chip
*/
lld_lock_chip_node();
err = alloc_chip_node(pci_adapter);
if (err) {
sdk_err(&pdev->dev,
"Failed to add new chip node to global list\n");
goto alloc_chip_node_fail;
}
err = nictool_k_init();
if (err) {
sdk_warn(&pdev->dev, "Failed to init nictool");
goto init_nictool_err;
}
list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list);
lld_unlock_chip_node();
pci_adapter->init_state = HINIC_INIT_STATE_PCI_INITED;
err = hinic_func_init(pdev, pci_adapter);
if (err)
goto func_init_err;
return 0;
func_init_err:
if (!test_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag))
set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
return 0;
init_nictool_err:
free_chip_node(pci_adapter);
alloc_chip_node_fail:
lld_unlock_chip_node();
ceate_nic_delay_work_fail:
unmapping_bar(pci_adapter);
map_bar_failed:
hinic_pci_deinit(pdev);
sdk_err(&pdev->dev, "Pcie device probe failed\n");
return err;
}
/*lint -save -e133 -e10*/
static const struct pci_device_id hinic_pci_table[] = {
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PF), HINIC_BOARD_25GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF), 0},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF_HV), 0},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_SMTIO), HINIC_BOARD_PG_SM_25GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_100GE),
HINIC_BOARD_PG_100GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_TP_10GE),
HINIC_BOARD_PG_TP_10GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_40GE), HINIC_BOARD_40GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_100GE), HINIC_BOARD_100GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_25GE), HINIC_BOARD_25GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_MULTI_HOST), HINIC_BOARD_25GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_100GE), HINIC_BOARD_100GE},
{PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_DUAL_25GE), HINIC_BOARD_25GE},
{0, 0}
};
/*lint -restore*/
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
/**
* hinic_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
*
* Since we only need error detecting not error handling, so we
* always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER
* driver that we don't need reset(error handling).
*/
static pci_ers_result_t hinic_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct hinic_pcidev *pci_adapter;
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
sdk_err(&pdev->dev,
"Uncorrectable error detected, log and cleanup error status: 0x%08x\n",
state);
pci_adapter = pci_get_drvdata(pdev);
if (pci_adapter)
hinic_record_pcie_error(pci_adapter->hwdev);
return PCI_ERS_RESULT_CAN_RECOVER;
}
static void hinic_shutdown(struct pci_dev *pdev)
{
struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
sdk_info(&pdev->dev, "Shutdown device\n");
if (pci_adapter)
hinic_shutdown_hwdev(pci_adapter->hwdev);
pci_disable_device(pdev);
if (pci_adapter)
hinic_set_api_stop(pci_adapter->hwdev);
}
/* Cause we only need error detecting not error handling, so only error_detected
* callback is enough.
*/
static struct pci_error_handlers hinic_err_handler = {
.error_detected = hinic_io_error_detected,
};
static struct pci_driver hinic_driver = {
.name = HINIC_DRV_NAME,
.id_table = hinic_pci_table,
.probe = hinic_probe,
.remove = hinic_remove,
.shutdown = hinic_shutdown,
.sriov_configure = hinic_pci_sriov_configure,
.err_handler = &hinic_err_handler
};
static int __init hinic_lld_init(void)
{
pr_info("%s - version %s\n", HINIC_DRV_DESC, HINIC_DRV_VERSION);
memset(g_uld_info, 0, sizeof(g_uld_info));
atomic_set(&tool_used_cnt, 0);
hinic_lld_lock_init();
/* register nic driver information first, and add net device in
* nic_probe called by hinic_probe.
*/
hinic_register_uld(SERVICE_T_NIC, &nic_uld_info);
return pci_register_driver(&hinic_driver);
}
static void __exit hinic_lld_exit(void)
{
pci_unregister_driver(&hinic_driver);
hinic_unregister_uld(SERVICE_T_NIC);
}
module_init(hinic_lld_init);
module_exit(hinic_lld_exit);
int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
if (!micro_log_info || !micro_log_info->init ||
!micro_log_info->deinit) {
pr_err("Invalid information of micro log info to register\n");
return -EINVAL;
}
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (hinic_func_type(dev->hwdev) == TYPE_PPF) {
if (micro_log_info->init(dev->hwdev)) {
sdk_err(&dev->pcidev->dev,
"micro log init failed\n");
continue;
}
}
}
}
lld_dev_put();
pr_info("Register micro log succeed\n");
return 0;
}
EXPORT_SYMBOL(hinic_register_micro_log);
void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info)
{
struct card_node *chip_node;
struct hinic_pcidev *dev;
if (!micro_log_info)
return;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (hinic_func_type(dev->hwdev) == TYPE_PPF)
micro_log_info->deinit(dev->hwdev);
}
}
lld_dev_put();
pr_info("Unregister micro log succeed\n");
}
EXPORT_SYMBOL(hinic_unregister_micro_log);
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_LLD_H_
#define HINIC_LLD_H_
#define HINIC_SLAVE_NIC_DELAY "hinic_slave_nic_delay"
#define HINIC_SLAVE_NIC_DELAY_TIME (5 * HZ)
struct hinic_lld_dev {
struct pci_dev *pdev;
void *hwdev;
};
enum hinic_init_state {
HINIC_INIT_STATE_NONE,
HINIC_INIT_STATE_PCI_INITED,
HINIC_INIT_STATE_HW_IF_INITED,
HINIC_INIT_STATE_HW_PART_INITED,
HINIC_INIT_STATE_HWDEV_INITED,
HINIC_INIT_STATE_DBGTOOL_INITED,
HINIC_INIT_STATE_NIC_INITED,
HINIC_INIT_STATE_ALL_INITED,
};
struct hinic_uld_info {
/* uld_dev: should not return null even the function capability
* is not support the up layer driver
* uld_dev_name: NIC driver should copy net device name.
* FC driver could copy fc device name.
* other up layer driver don`t need copy anything
*/
int (*probe)(struct hinic_lld_dev *lld_dev,
void **uld_dev, char *uld_dev_name);
void (*remove)(struct hinic_lld_dev *lld_dev, void *uld_dev);
int (*suspend)(struct hinic_lld_dev *lld_dev,
void *uld_dev, pm_message_t state);
int (*resume)(struct hinic_lld_dev *lld_dev, void *uld_dev);
void (*event)(struct hinic_lld_dev *lld_dev, void *uld_dev,
struct hinic_event_info *event);
int (*ioctl)(void *uld_dev, u32 cmd, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
};
/* Used for the ULD HiNIC PCIe driver registration interface,
* the original interface is service_register_interface
*/
int hinic_register_uld(enum hinic_service_type uld_type,
struct hinic_uld_info *uld_info);
/* Used for the ULD HiNIC PCIe driver unregistration interface,
* the original interface is service_unregister_interface
*/
void hinic_unregister_uld(enum hinic_service_type uld_type);
void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev,
enum hinic_service_type type);
/* used for TOE/IWARP */
struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev);
/* used for TOE/IWARP */
void *hinic_get_hwdev_by_netdev(struct net_device *netdev);
struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev);
void *hinic_get_hwdev_by_ifname(char *ifname);
int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname);
void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type);
void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type);
int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]);
int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up);
int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted,
u8 *cos_up);
void hinic_get_all_chip_id(void *card_id);
void hinic_get_card_info(void *hwdev, void *bufin);
int hinic_get_device_id(void *hwdev, u16 *dev_id);
void get_fc_devname(char *devname);
int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid);
void hinic_tool_cnt_inc(void);
void hinic_tool_cnt_dec(void);
struct hinic_sriov_info;
struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev);
/* for dpdk */
void *hinic_get_pci_dev(u16 bdf);
void hinic_dpdk_pcie_remove(void *pdev);
int hinic_dpdk_pcie_probe(void *pdev);
int hinic_attach_nic(struct hinic_lld_dev *lld_dev);
void hinic_detach_nic(struct hinic_lld_dev *lld_dev);
int hinic_attach_roce(struct hinic_lld_dev *lld_dev);
void hinic_detach_roce(struct hinic_lld_dev *lld_dev);
int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev);
int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev);
int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev,
u16 vf_func_id, bool en);
int hinic_ovs_set_vf_load_state(struct pci_dev *pdev);
int hinic_get_self_test_result(char *ifname, u32 *result);
enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname);
enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev);
extern struct hinic_uld_info g_uld_info[SERVICE_T_MAX];
struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev);
bool hinic_is_in_host(void);
bool hinic_is_valid_bar_addr(u64 offset);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <linux/dcbnl.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/debugfs.h>
#include "ossl_knl.h"
#include "hinic_hw_mgmt.h"
#include "hinic_hw.h"
#include "hinic_dbg.h"
#include "hinic_nic_cfg.h"
#include "hinic_nic_dev.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
#include "hinic_qp.h"
#include "hinic_dcb.h"
#include "hinic_lld.h"
#include "hinic_sriov.h"
#include "hinic_pci_id_tbl.h"
static u16 num_qps;
module_param(num_qps, ushort, 0444);
MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default unset)");
static u16 ovs_num_qps = 16;
module_param(ovs_num_qps, ushort, 0444);
MODULE_PARM_DESC(ovs_num_qps, "Number of Queue Pairs in ovs mode (default=16)");
#define DEFAULT_POLL_WEIGHT 64
static unsigned int poll_weight = DEFAULT_POLL_WEIGHT;
module_param(poll_weight, uint, 0444);
MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)");
#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2
#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32
#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
/* suit for sdi3.0 vm mode, change this define for test best performance */
#define SDI_VM_PENDING_LIMT 2
#define SDI_VM_COALESCE_TIMER_CFG 16
#define SDI_VM_RX_PKT_RATE_HIGH 1000000
#define SDI_VM_RX_PKT_RATE_LOW 30000
#define SDI_VM_RX_USECS_HIGH 56
#define SDI_VM_RX_PENDING_LIMT_HIGH 20
#define SDI_VM_RX_USECS_LOW 16
#define SDI_VM_RX_PENDING_LIMT_LOW 2
/* if qp_coalesc_use_drv_params_switch !=0, use user setting params */
static unsigned char qp_coalesc_use_drv_params_switch;
module_param(qp_coalesc_use_drv_params_switch, byte, 0444);
MODULE_PARM_DESC(qp_coalesc_use_drv_params_switch, "QP MSI-X Interrupt coalescing parameter switch (default=0, not use drv parameter)");
static unsigned char qp_pending_limit = HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
module_param(qp_pending_limit, byte, 0444);
MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)");
static unsigned char qp_coalesc_timer_cfg =
HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
module_param(qp_coalesc_timer_cfg, byte, 0444);
MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=32)");
/* For arm64 server, the best known configuration of lro max wqe number
* is 4 (8K), for x86_64 server, it is 8 (16K). You can also
* configure these values by hinicadm.
*/
static unsigned char set_max_wqe_num;
module_param(set_max_wqe_num, byte, 0444);
MODULE_PARM_DESC(set_max_wqe_num, "Set lro max wqe number, valid range is 1 - 32, default is 4(arm) / 8(x86)");
#define DEFAULT_RX_BUFF_LEN 2
u16 rx_buff = DEFAULT_RX_BUFF_LEN;
module_param(rx_buff, ushort, 0444);
MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB");
static u32 set_lro_timer;
module_param(set_lro_timer, uint, 0444);
MODULE_PARM_DESC(set_lro_timer, "Set lro timer in micro second, valid range is 1 - 1024, default is 16");
static unsigned char set_link_status_follow = HINIC_LINK_FOLLOW_STATUS_MAX;
module_param(set_link_status_follow, byte, 0444);
MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status. 0 - default, 1 - follow, 2 - separate, other - unset. (default unset)");
static unsigned int lro_replenish_thld = 256;
module_param(lro_replenish_thld, uint, 0444);
MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer (default=256)");
static bool l2nic_interrupt_switch = true;
module_param(l2nic_interrupt_switch, bool, 0644);
MODULE_PARM_DESC(l2nic_interrupt_switch, "Control whether execute l2nic io interrupt switch or not, default is true");
static unsigned char lro_en_status = HINIC_LRO_STATUS_UNSET;
module_param(lro_en_status, byte, 0444);
MODULE_PARM_DESC(lro_en_status, "lro enable status. 0 - disable, 1 - enable, other - unset. (default unset)");
static unsigned char qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW;
module_param(qp_pending_limit_low, byte, 0444);
MODULE_PARM_DESC(qp_pending_limit_low, "MSI-X adaptive low coalesce pending limit, range is 0 - 255");
static unsigned char qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW;
module_param(qp_coalesc_timer_low, byte, 0444);
MODULE_PARM_DESC(qp_coalesc_timer_low, "MSI-X adaptive low coalesce time, range is 0 - 255");
static unsigned char qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH;
module_param(qp_pending_limit_high, byte, 0444);
MODULE_PARM_DESC(qp_pending_limit_high, "MSI-X adaptive high coalesce pending limit, range is 0 - 255");
static unsigned char qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH;
module_param(qp_coalesc_timer_high, byte, 0444);
MODULE_PARM_DESC(qp_coalesc_timer_high, "MSI-X adaptive high coalesce time, range is 0 - 255");
#define HINIC_NIC_DEV_WQ_NAME "hinic_nic_dev_wq"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK | \
NETIF_MSG_RX_ERR)
#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1))
#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap))
#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8)
#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \
VLAN_BITMAP_BITS_SIZE(nic_dev))
#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \
VLAN_BITMAP_BYTE_SIZE(nic_dev))
#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev))
#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1))
enum hinic_rx_mod {
HINIC_RX_MODE_UC = 1 << 0,
HINIC_RX_MODE_MC = 1 << 1,
HINIC_RX_MODE_BC = 1 << 2,
HINIC_RX_MODE_MC_ALL = 1 << 3,
HINIC_RX_MODE_PROMISC = 1 << 4,
};
enum hinic_rx_buff_len {
RX_BUFF_VALID_2KB = 2,
RX_BUFF_VALID_4KB = 4,
RX_BUFF_VALID_8KB = 8,
RX_BUFF_VALID_16KB = 16,
};
#define HINIC_AVG_PKT_SMALL 256U
#define HINIC_MODERATONE_DELAY HZ
#define CONVERT_UNIT 1024
#define HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 2
#define HINIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \
NETIF_F_ALL_TSO)
int hinic_netdev_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct net_device *real_dev, *ret;
struct hinic_nic_dev *nic_dev;
u16 vlan_depth;
if (!is_vlan_dev(ndev))
return NOTIFY_DONE;
dev_hold(ndev);
switch (event) {
case NETDEV_REGISTER:
real_dev = vlan_dev_real_dev(ndev);
nic_dev = hinic_get_uld_dev_by_ifname(real_dev->name,
SERVICE_T_NIC);
if (!nic_dev)
goto out;
vlan_depth = 1;
ret = vlan_dev_priv(ndev)->real_dev;
while (is_vlan_dev(ret)) {
ret = vlan_dev_priv(ret)->real_dev;
vlan_depth++;
}
if (vlan_depth == HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) {
ndev->vlan_features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
} else if (vlan_depth > HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) {
ndev->hw_features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
ndev->features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
}
break;
default:
break;
};
out:
dev_put(ndev);
return NOTIFY_DONE;
}
/* used for netdev notifier register/unregister */
DEFINE_MUTEX(g_hinic_netdev_notifiers_mutex);
static int hinic_netdev_notifiers_ref_cnt;
static struct notifier_block hinic_netdev_notifier = {
.notifier_call = hinic_netdev_event,
};
static void hinic_register_notifier(struct hinic_nic_dev *nic_dev)
{
int err;
mutex_lock(&g_hinic_netdev_notifiers_mutex);
hinic_netdev_notifiers_ref_cnt++;
if (hinic_netdev_notifiers_ref_cnt == 1) {
err = register_netdevice_notifier(&hinic_netdev_notifier);
if (err) {
hinic_info(nic_dev, drv, "Register netdevice notifier failed, err: %d\n",
err);
hinic_netdev_notifiers_ref_cnt--;
}
}
mutex_unlock(&g_hinic_netdev_notifiers_mutex);
}
static void hinic_unregister_notifier(struct hinic_nic_dev *nic_dev)
{
mutex_lock(&g_hinic_netdev_notifiers_mutex);
if (hinic_netdev_notifiers_ref_cnt == 1)
unregister_netdevice_notifier(&hinic_netdev_notifier);
if (hinic_netdev_notifiers_ref_cnt)
hinic_netdev_notifiers_ref_cnt--;
mutex_unlock(&g_hinic_netdev_notifiers_mutex);
}
void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status)
{
struct net_device *netdev = nic_dev->netdev;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
test_bit(HINIC_LP_TEST, &nic_dev->flags))
return;
if (status) {
if (netif_carrier_ok(netdev))
return;
nic_dev->link_status = status;
netif_carrier_on(netdev);
nicif_info(nic_dev, link, netdev, "Link is up\n");
} else {
if (!netif_carrier_ok(netdev))
return;
nic_dev->link_status = status;
netif_carrier_off(netdev);
nicif_info(nic_dev, link, netdev, "Link is down\n");
}
}
static void hinic_heart_lost(struct hinic_nic_dev *nic_dev)
{
nic_dev->heart_status = false;
}
static int hinic_setup_qps_resources(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
int err;
err = hinic_setup_all_tx_resources(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to create Tx queues\n");
return err;
}
err = hinic_setup_all_rx_resources(netdev, nic_dev->qps_irq_info);
if (err) {
nicif_err(nic_dev, drv, netdev,
"Failed to create Rx queues\n");
goto create_rxqs_err;
}
return 0;
create_rxqs_err:
hinic_free_all_tx_resources(netdev);
return err;
}
static int hinic_configure(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
int err;
/* rx rss init */
err = hinic_rx_configure(netdev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n");
return err;
}
return 0;
}
static void hinic_remove_configure(struct hinic_nic_dev *nic_dev)
{
hinic_rx_remove_configure(nic_dev->netdev);
}
static void hinic_setup_dcb_qps(struct hinic_nic_dev *nic_dev, u16 max_qps)
{
struct net_device *netdev = nic_dev->netdev;
u16 num_rss;
u8 num_tcs;
u8 i;
if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) ||
!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
return;
num_tcs = (u8)netdev_get_num_tc(netdev);
/* For now, we don't support to change num_tcs */
if (num_tcs != nic_dev->max_cos || max_qps < num_tcs) {
nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %d or num_qps: %d, disable DCB\n",
num_tcs, max_qps);
netdev_reset_tc(netdev);
clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
/* if we can't enable rss or get enough num_qps,
* need to sync default configure to hw
*/
hinic_configure_dcb(netdev);
} else {
/* We bind sq with cos but not tc */
num_rss = (u16)(max_qps / nic_dev->max_cos);
num_rss = min_t(u16, num_rss, nic_dev->rss_limit);
for (i = 0; i < nic_dev->max_cos; i++)
netdev_set_tc_queue(netdev, i, num_rss,
(u16)(num_rss * i));
nic_dev->num_rss = num_rss;
nic_dev->num_qps = (u16)(num_tcs * num_rss);
}
}
/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */
static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
u32 irq_size;
u16 resp_irq_num, i;
int err;
if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
nic_dev->num_rss = nic_dev->rss_limit;
nic_dev->num_qps = nic_dev->rss_limit;
} else {
nic_dev->num_rss = 0;
nic_dev->num_qps = 1;
}
hinic_setup_dcb_qps(nic_dev, nic_dev->max_qps);
irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->num_qps;
if (!irq_size) {
nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n");
return -EINVAL;
}
nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL);
if (!nic_dev->qps_irq_info) {
nicif_err(nic_dev, drv, netdev, "Failed to alloc qps_irq_info\n");
return -ENOMEM;
}
err = hinic_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->num_qps,
nic_dev->qps_irq_info, &resp_irq_num);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to alloc irqs\n");
kfree(nic_dev->qps_irq_info);
return err;
}
/* available irq number is less than rq numbers, adjust rq numbers */
if (resp_irq_num < nic_dev->num_qps) {
nic_dev->num_qps = resp_irq_num;
nic_dev->num_rss = nic_dev->num_qps;
hinic_setup_dcb_qps(nic_dev, nic_dev->num_qps);
nicif_warn(nic_dev, drv, netdev,
"Can not get enough irqs, adjust num_qps to %d\n",
nic_dev->num_qps);
/* after adjust num_qps, free the remaind irq */
for (i = nic_dev->num_qps; i < resp_irq_num; i++)
hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC,
nic_dev->qps_irq_info[i].irq_id);
}
nicif_info(nic_dev, drv, netdev, "Finally num_qps: %d, num_rss: %d\n",
nic_dev->num_qps, nic_dev->num_rss);
return 0;
}
static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
{
u16 i;
for (i = 0; i < nic_dev->num_qps; i++)
hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC,
nic_dev->qps_irq_info[i].irq_id);
kfree(nic_dev->qps_irq_info);
}
static int hinic_poll(struct napi_struct *napi, int budget)
{
int tx_pkts, rx_pkts;
struct hinic_irq *irq_cfg = container_of(napi, struct hinic_irq, napi);
struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
rx_pkts = hinic_rx_poll(irq_cfg->rxq, budget);
tx_pkts = hinic_tx_poll(irq_cfg->txq, budget);
if (tx_pkts >= budget || rx_pkts >= budget)
return budget;
set_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag);
rx_pkts += hinic_rx_poll(irq_cfg->rxq, budget - rx_pkts);
tx_pkts += hinic_tx_poll(irq_cfg->txq, budget - tx_pkts);
if (rx_pkts >= budget || tx_pkts >= budget) {
clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag);
return budget;
}
napi_complete(napi);
if (!test_and_set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag)) {
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
hinic_set_msix_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC_MSIX_ENABLE);
else if (!nic_dev->in_vm &&
(hinic_get_func_mode(nic_dev->hwdev) ==
FUNC_MOD_NORMAL_HOST))
enable_irq(irq_cfg->irq_id);
}
return max(tx_pkts, rx_pkts);
}
static void qp_add_napi(struct hinic_irq *irq_cfg)
{
struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
netif_napi_add(nic_dev->netdev, &irq_cfg->napi,
hinic_poll, nic_dev->poll_weight);
napi_enable(&irq_cfg->napi);
}
static void qp_del_napi(struct hinic_irq *irq_cfg)
{
napi_disable(&irq_cfg->napi);
netif_napi_del(&irq_cfg->napi);
}
static irqreturn_t qp_irq(int irq, void *data)
{
struct hinic_irq *irq_cfg = (struct hinic_irq *)data;
struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
u16 msix_entry_idx = irq_cfg->msix_entry_idx;
if (napi_schedule_prep(&irq_cfg->napi)) {
if (l2nic_interrupt_switch) {
/* Disable the interrupt until napi will be completed */
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
hinic_set_msix_state(nic_dev->hwdev,
msix_entry_idx,
HINIC_MSIX_DISABLE);
} else if (!nic_dev->in_vm &&
(hinic_get_func_mode(nic_dev->hwdev) ==
FUNC_MOD_NORMAL_HOST)) {
disable_irq_nosync(irq_cfg->irq_id);
}
clear_bit(HINIC_INTR_ON, &irq_cfg->intr_flag);
}
hinic_misx_intr_clear_resend_bit(nic_dev->hwdev,
msix_entry_idx, 1);
clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag);
__napi_schedule(&irq_cfg->napi);
} else if (!test_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag)) {
hinic_misx_intr_clear_resend_bit(nic_dev->hwdev, msix_entry_idx,
1);
}
return IRQ_HANDLED;
}
static int hinic_request_irq(struct hinic_irq *irq_cfg, u16 q_id)
{
struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
struct nic_interrupt_info info = {0};
int err;
qp_add_napi(irq_cfg);
info.msix_index = irq_cfg->msix_entry_idx;
info.lli_set = 0;
info.interrupt_coalesc_set = 1;
info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt;
info.coalesc_timer_cfg =
nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
nic_dev->rxqs[q_id].last_coalesc_timer_cfg =
nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
nic_dev->rxqs[q_id].last_pending_limt =
nic_dev->intr_coalesce[q_id].pending_limt;
err = hinic_set_interrupt_cfg(nic_dev->hwdev, info);
if (err) {
nicif_err(nic_dev, drv, irq_cfg->netdev,
"Failed to set RX interrupt coalescing attribute\n");
qp_del_napi(irq_cfg);
return err;
}
err = request_irq(irq_cfg->irq_id, &qp_irq, 0,
irq_cfg->irq_name, irq_cfg);
if (err) {
nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n");
qp_del_napi(irq_cfg);
return err;
}
/* assign the mask for this irq */
irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
return 0;
}
static int set_interrupt_moder(struct hinic_nic_dev *nic_dev, u16 q_id,
u8 coalesc_timer_cfg, u8 pending_limt)
{
struct nic_interrupt_info interrupt_info = {0};
int err;
if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg &&
pending_limt == nic_dev->rxqs[q_id].last_pending_limt)
return 0;
/* netdev not running or qp not in using,
* don't need to set coalesce to hw
*/
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
q_id >= nic_dev->num_qps)
return 0;
interrupt_info.lli_set = 0;
interrupt_info.interrupt_coalesc_set = 1;
interrupt_info.coalesc_timer_cfg = coalesc_timer_cfg;
interrupt_info.pending_limt = pending_limt;
interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx;
interrupt_info.resend_timer_cfg =
nic_dev->intr_coalesce[q_id].resend_timer_cfg;
err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed modifying moderation for Queue: %d\n", q_id);
} else {
nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg;
nic_dev->rxqs[q_id].last_pending_limt = pending_limt;
}
return err;
}
static void __calc_coal_para(struct hinic_nic_dev *nic_dev,
struct hinic_intr_coal_info *q_coal, u64 rate,
u8 *coalesc_timer_cfg, u8 *pending_limt)
{
if (rate < q_coal->pkt_rate_low) {
*coalesc_timer_cfg = q_coal->rx_usecs_low;
*pending_limt = q_coal->rx_pending_limt_low;
} else if (rate > q_coal->pkt_rate_high) {
*coalesc_timer_cfg = q_coal->rx_usecs_high;
*pending_limt = q_coal->rx_pending_limt_high;
} else {
*coalesc_timer_cfg =
(u8)((rate - q_coal->pkt_rate_low) *
(q_coal->rx_usecs_high -
q_coal->rx_usecs_low) /
(q_coal->pkt_rate_high -
q_coal->pkt_rate_low) +
q_coal->rx_usecs_low);
if (nic_dev->in_vm)
*pending_limt = (u8)((rate - q_coal->pkt_rate_low) *
(q_coal->rx_pending_limt_high -
q_coal->rx_pending_limt_low) /
(q_coal->pkt_rate_high -
q_coal->pkt_rate_low) +
q_coal->rx_pending_limt_low);
else
*pending_limt = q_coal->rx_pending_limt_low;
}
}
static void update_queue_coal(struct hinic_nic_dev *nic_dev, u16 qid,
u64 rate, u64 avg_pkt_size, u64 tx_rate)
{
struct hinic_intr_coal_info *q_coal;
u8 coalesc_timer_cfg, pending_limt;
q_coal = &nic_dev->intr_coalesce[qid];
if ((rate > HINIC_RX_RATE_THRESH &&
avg_pkt_size > HINIC_AVG_PKT_SMALL) ||
(nic_dev->in_vm && rate > HINIC_RX_RATE_THRESH)) {
__calc_coal_para(nic_dev, q_coal, rate,
&coalesc_timer_cfg, &pending_limt);
} else {
coalesc_timer_cfg = HINIC_LOWEST_LATENCY;
pending_limt = q_coal->rx_pending_limt_low;
}
set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg,
pending_limt);
}
#define SDI_VM_PPS_3W 30000
#define SDI_VM_PPS_5W 50000
#define SDI_VM_BPS_100MB 12500000
#define SDI_VM_BPS_1GB 125000000
static void update_queue_coal_sdi_vm(struct hinic_nic_dev *nic_dev,
u16 qid, u64 rx_pps, u64 rx_bps,
u64 tx_pps, u64 tx_bps)
{
struct hinic_intr_coal_info *q_coal = NULL;
u8 coalesc_timer_cfg, pending_limt;
q_coal = &nic_dev->intr_coalesce[qid];
if (qp_coalesc_use_drv_params_switch == 0) {
if (rx_pps < SDI_VM_PPS_3W &&
tx_pps < SDI_VM_PPS_3W &&
rx_bps < SDI_VM_BPS_100MB &&
tx_bps < SDI_VM_BPS_100MB) {
set_interrupt_moder(nic_dev, qid, 0, 0);
} else if (tx_pps > SDI_VM_PPS_3W &&
tx_pps < SDI_VM_PPS_5W &&
tx_bps > SDI_VM_BPS_1GB) {
set_interrupt_moder(nic_dev, qid, 7, 7);
} else {
__calc_coal_para(nic_dev, q_coal, rx_pps,
&coalesc_timer_cfg,
&pending_limt);
set_interrupt_moder(nic_dev, qid,
coalesc_timer_cfg,
pending_limt);
}
} else {
__calc_coal_para(nic_dev, q_coal, rx_pps,
&coalesc_timer_cfg,
&pending_limt);
set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg,
pending_limt);
}
}
static void hinic_auto_moderation_work(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct hinic_nic_dev *nic_dev = container_of(delay,
struct hinic_nic_dev,
moderation_task);
unsigned long period = (unsigned long)(jiffies -
nic_dev->last_moder_jiffies);
u64 rx_packets, rx_bytes, rx_pkt_diff, rate, avg_pkt_size;
u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate, rx_bps, tx_bps;
u16 qid;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags))
return;
queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task,
HINIC_MODERATONE_DELAY);
if (!nic_dev->adaptive_rx_coal || !period)
return;
for (qid = 0; qid < nic_dev->num_qps; qid++) {
rx_packets = nic_dev->rxqs[qid].rxq_stats.packets;
rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes;
tx_packets = nic_dev->txqs[qid].txq_stats.packets;
tx_bytes = nic_dev->txqs[qid].txq_stats.bytes;
rx_pkt_diff =
rx_packets - nic_dev->rxqs[qid].last_moder_packets;
avg_pkt_size = rx_pkt_diff ?
((unsigned long)(rx_bytes -
nic_dev->rxqs[qid].last_moder_bytes)) /
rx_pkt_diff : 0;
rate = rx_pkt_diff * HZ / period;
tx_pkt_diff =
tx_packets - nic_dev->txqs[qid].last_moder_packets;
tx_rate = tx_pkt_diff * HZ / period;
rx_bps = (unsigned long)(rx_bytes -
nic_dev->rxqs[qid].last_moder_bytes)
* HZ / period;
tx_bps = (unsigned long)(tx_bytes -
nic_dev->txqs[qid].last_moder_bytes)
* HZ / period;
if ((nic_dev->is_vm_slave && nic_dev->in_vm) ||
nic_dev->is_bm_slave) {
update_queue_coal_sdi_vm(nic_dev, qid, rate, rx_bps,
tx_rate, tx_bps);
} else {
update_queue_coal(nic_dev, qid, rate, avg_pkt_size,
tx_rate);
}
nic_dev->rxqs[qid].last_moder_packets = rx_packets;
nic_dev->rxqs[qid].last_moder_bytes = rx_bytes;
nic_dev->txqs[qid].last_moder_packets = tx_packets;
nic_dev->txqs[qid].last_moder_bytes = tx_bytes;
}
nic_dev->last_moder_jiffies = jiffies;
}
static void hinic_release_irq(struct hinic_irq *irq_cfg)
{
irq_set_affinity_hint(irq_cfg->irq_id, NULL);
synchronize_irq(irq_cfg->irq_id);
free_irq(irq_cfg->irq_id, irq_cfg);
qp_del_napi(irq_cfg);
}
static int hinic_qps_irq_init(struct hinic_nic_dev *nic_dev)
{
struct pci_dev *pdev = nic_dev->pdev;
struct irq_info *qp_irq_info;
struct hinic_irq *irq_cfg;
u16 q_id, i;
u32 local_cpu;
int err;
nic_dev->irq_cfg = kcalloc(nic_dev->num_qps, sizeof(*nic_dev->irq_cfg),
GFP_KERNEL);
if (!nic_dev->irq_cfg) {
nic_err(&pdev->dev, "Failed to alloc irq cfg\n");
return -ENOMEM;
}
for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
qp_irq_info = &nic_dev->qps_irq_info[q_id];
irq_cfg = &nic_dev->irq_cfg[q_id];
irq_cfg->irq_id = qp_irq_info->irq_id;
irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx;
irq_cfg->netdev = nic_dev->netdev;
irq_cfg->txq = &nic_dev->txqs[q_id];
irq_cfg->rxq = &nic_dev->rxqs[q_id];
nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
if (nic_dev->force_affinity) {
irq_cfg->affinity_mask = nic_dev->affinity_mask;
} else {
local_cpu =
cpumask_local_spread(q_id,
dev_to_node(&pdev->dev));
cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
}
err = snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
"%s_qp%d", nic_dev->netdev->name, q_id);
if (err <= 0 || err >= (int)sizeof(irq_cfg->irq_name)) {
nic_err(&pdev->dev,
"Failed snprintf irq_name, function return(%d) and dest_len(%d)\n",
err, (int)sizeof(irq_cfg->irq_name));
goto req_tx_irq_err;
}
set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag);
err = hinic_request_irq(irq_cfg, q_id);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n");
goto req_tx_irq_err;
}
hinic_set_msix_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC_MSIX_ENABLE);
}
INIT_DELAYED_WORK(&nic_dev->moderation_task,
hinic_auto_moderation_work);
return 0;
req_tx_irq_err:
for (i = 0; i < q_id; i++) {
hinic_set_msix_state(nic_dev->hwdev,
nic_dev->irq_cfg[i].msix_entry_idx,
HINIC_MSIX_DISABLE);
hinic_release_irq(&nic_dev->irq_cfg[i]);
}
kfree(nic_dev->irq_cfg);
return err;
}
static void hinic_qps_irq_deinit(struct hinic_nic_dev *nic_dev)
{
u16 q_id;
for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
hinic_set_msix_state(nic_dev->hwdev,
nic_dev->irq_cfg[q_id].msix_entry_idx,
HINIC_MSIX_DISABLE);
hinic_release_irq(&nic_dev->irq_cfg[q_id]);
}
kfree(nic_dev->irq_cfg);
}
int hinic_force_port_disable(struct hinic_nic_dev *nic_dev)
{
int err;
down(&nic_dev->port_state_sem);
err = hinic_set_port_enable(nic_dev->hwdev, false);
if (!err)
nic_dev->force_port_disable = true;
up(&nic_dev->port_state_sem);
return err;
}
int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable)
{
int err = 0;
down(&nic_dev->port_state_sem);
nic_dev->force_port_disable = false;
err = hinic_set_port_enable(nic_dev->hwdev, enable);
up(&nic_dev->port_state_sem);
return err;
}
int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable)
{
int err;
down(&nic_dev->port_state_sem);
/* Do nothing when force disable
* Port will disable when call force port disable
* and should not enable port when in force mode
*/
if (nic_dev->force_port_disable) {
up(&nic_dev->port_state_sem);
return 0;
}
err = hinic_set_port_enable(nic_dev->hwdev, enable);
up(&nic_dev->port_state_sem);
return err;
}
static void hinic_print_link_message(struct hinic_nic_dev *nic_dev,
u8 link_status)
{
if (nic_dev->link_status == link_status)
return;
nic_dev->link_status = link_status;
nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n",
(link_status ? "up" : "down"));
}
int hinic_open(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 link_status = 0;
int err;
if (test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n");
return 0;
}
err = hinic_setup_num_qps(nic_dev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n");
return err;
}
err = hinic_create_qps(nic_dev->hwdev, nic_dev->num_qps,
nic_dev->sq_depth, nic_dev->rq_depth,
nic_dev->qps_irq_info, HINIC_MAX_SQ_BUFDESCS);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to create queue pairs\n");
goto create_qps_err;
}
err = hinic_setup_qps_resources(nic_dev);
if (err)
goto setup_qps_resources_err;
err = hinic_init_qp_ctxts(nic_dev->hwdev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to init qp ctxts\n");
goto init_qp_ctxts_err;
}
err = hinic_set_port_mtu(nic_dev->hwdev, netdev->mtu);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n");
goto mtu_err;
}
err = hinic_configure(nic_dev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to configure txrx\n");
goto cfg_err;
}
err = hinic_qps_irq_init(nic_dev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to qps irq init\n");
goto qps_irqs_init_err;
}
err = hinic_set_vport_enable(nic_dev->hwdev, true);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n");
goto vport_enable_err;
}
err = hinic_maybe_set_port_state(nic_dev, true);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to enable port\n");
goto port_enable_err;
}
set_bit(HINIC_INTF_UP, &nic_dev->flags);
netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
netif_tx_wake_all_queues(netdev);
queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task,
HINIC_MODERATONE_DELAY);
err = hinic_get_link_state(nic_dev->hwdev, &link_status);
if (!err && link_status) {
hinic_update_pf_bw(nic_dev->hwdev);
netif_carrier_on(netdev);
}
hinic_print_link_message(nic_dev, link_status);
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status);
nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n");
return 0;
port_enable_err:
hinic_set_vport_enable(nic_dev->hwdev, false);
vport_enable_err:
hinic_flush_sq_res(nic_dev->hwdev);
/* After set vport disable 100ms, no packets will be send to host */
msleep(100);
hinic_qps_irq_deinit(nic_dev);
qps_irqs_init_err:
hinic_remove_configure(nic_dev);
cfg_err:
mtu_err:
hinic_free_qp_ctxts(nic_dev->hwdev);
init_qp_ctxts_err:
hinic_free_all_rx_resources(netdev);
hinic_free_all_tx_resources(netdev);
setup_qps_resources_err:
hinic_free_qps(nic_dev->hwdev);
create_qps_err:
hinic_destroy_num_qps(nic_dev);
return err;
}
int hinic_close(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (!test_and_clear_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n");
return 0;
}
netif_carrier_off(netdev);
netif_tx_disable(netdev);
cancel_delayed_work_sync(&nic_dev->moderation_task);
if (hinic_get_chip_present_flag(nic_dev->hwdev)) {
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
hinic_maybe_set_port_state(nic_dev, false);
hinic_set_vport_enable(nic_dev->hwdev, false);
hinic_flush_txqs(netdev);
hinic_flush_sq_res(nic_dev->hwdev);
/* After set vport disable 100ms,
* no packets will be send to host
*/
msleep(100);
}
hinic_qps_irq_deinit(nic_dev);
hinic_remove_configure(nic_dev);
if (hinic_get_chip_present_flag(nic_dev->hwdev))
hinic_free_qp_ctxts(nic_dev->hwdev);
mutex_lock(&nic_dev->nic_mutex);
hinic_free_all_rx_resources(netdev);
hinic_free_all_tx_resources(netdev);
hinic_free_qps(nic_dev->hwdev);
hinic_destroy_num_qps(nic_dev);
mutex_unlock(&nic_dev->nic_mutex);
nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n");
return 0;
}
static inline u32 calc_toeplitz_rss(u32 sip, u32 dip, u32 sport, u32 dport,
const u32 *rss_key)
{
u32 i, port, rss = 0;
port = (sport << 16) | dport;
/* The key - SIP, DIP, SPORT, DPORT */
for (i = 0; i < 32; i++)
if (sip & ((u32)1 << (u32)(31 - i)))
rss ^= (rss_key[0] << i) |
(u32)((u64)rss_key[1] >> (32 - i));
for (i = 0; i < 32; i++)
if (dip & ((u32)1 << (u32)(31 - i)))
rss ^= (rss_key[1] << i) |
(u32)((u64)rss_key[2] >> (32 - i));
for (i = 0; i < 32; i++)
if (port & ((u32)1 << (u32)(31 - i)))
rss ^= (rss_key[2] << i) |
(u32)((u64)rss_key[3] >> (32 - i));
return rss;
}
static u16 select_queue_by_toeplitz(struct net_device *dev,
struct sk_buff *skb,
unsigned int num_tx_queues)
{
struct hinic_nic_dev *nic_dev = netdev_priv(dev);
struct tcphdr *tcphdr;
struct iphdr *iphdr;
u32 hash = 0;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
while (unlikely(hash >= num_tx_queues))
hash -= num_tx_queues;
return (u16)hash;
}
/*lint -save -e778*/
if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
iphdr = ip_hdr(skb);
if (iphdr->protocol == IPPROTO_UDP ||
iphdr->protocol == IPPROTO_TCP) {
tcphdr = tcp_hdr(skb);
hash = calc_toeplitz_rss(ntohl(iphdr->daddr),
ntohl(iphdr->saddr),
ntohs(tcphdr->dest),
ntohs(tcphdr->source),
nic_dev->rss_hkey_user_be);
}
}
/*lint -restore*/
return (u16)nic_dev->rss_indir_user[hash & 0xFF];
}
static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT;
if (netdev_get_num_tc(netdev) || !nic_dev->rss_hkey_user_be)
goto fallback;
if (nic_dev->rss_hash_engine == HINIC_RSS_HASH_ENGINE_TYPE_TOEP &&
test_bit(HINIC_SAME_RXTX, &nic_dev->flags))
return select_queue_by_toeplitz(netdev, skb,
netdev->real_num_tx_queues);
fallback:
return netdev_pick_tx(netdev, skb, NULL);
}
static void hinic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_txq_stats *txq_stats;
struct hinic_rxq_stats *rxq_stats;
struct hinic_txq *txq;
struct hinic_rxq *rxq;
u64 bytes, packets, dropped, errors;
unsigned int start;
int i;
bytes = 0;
packets = 0;
dropped = 0;
for (i = 0; i < nic_dev->max_qps; i++) {
if (!nic_dev->txqs)
break;
txq = &nic_dev->txqs[i];
txq_stats = &txq->txq_stats;
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
bytes += txq_stats->bytes;
packets += txq_stats->packets;
dropped += txq_stats->dropped;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
}
stats->tx_packets = packets;
stats->tx_bytes = bytes;
stats->tx_dropped = dropped;
bytes = 0;
packets = 0;
errors = 0;
dropped = 0;
for (i = 0; i < nic_dev->max_qps; i++) {
if (!nic_dev->rxqs)
break;
rxq = &nic_dev->rxqs[i];
rxq_stats = &rxq->rxq_stats;
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
bytes += rxq_stats->bytes;
packets += rxq_stats->packets;
errors += rxq_stats->csum_errors +
rxq_stats->other_errors;
dropped += rxq_stats->dropped;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
}
stats->rx_packets = packets;
stats->rx_bytes = bytes;
stats->rx_errors = errors;
stats->rx_dropped = dropped;
}
static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 msix_idx;
u8 q_id;
HINIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout);
nicif_err(nic_dev, drv, netdev, "Tx timeout\n");
for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
continue;
msix_idx = nic_dev->irq_cfg[q_id].msix_entry_idx;
nicif_info(nic_dev, drv, netdev,
"txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx, msix mask: %d, intr_flag: 0x%lx\n",
q_id, hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id),
hinic_get_sq_hw_ci(nic_dev->hwdev, q_id),
hinic_get_sq_local_ci(nic_dev->hwdev, q_id),
nic_dev->irq_cfg[q_id].napi.state,
hinic_get_msix_state(nic_dev->hwdev, msix_idx),
nic_dev->irq_cfg[q_id].intr_flag);
}
}
static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u32 mtu = (u32)new_mtu;
int err = 0;
err = hinic_set_port_mtu(nic_dev->hwdev, mtu);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n",
new_mtu);
} else {
nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = mtu;
}
return err;
}
static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct sockaddr *saddr = addr;
u16 func_id;
int err;
if (!FUNC_SUPPORT_CHANGE_MAC(nic_dev->hwdev)) {
nicif_warn(nic_dev, drv, netdev,
"Current function don't support to set mac\n");
return -EOPNOTSUPP;
}
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) {
nicif_info(nic_dev, drv, netdev,
"Already using mac address %pM\n",
saddr->sa_data);
return 0;
}
err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
if (err)
return err;
err = hinic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data,
0, func_id);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to update mac, err: %d\n",
err);
return err == HINIC_PF_SET_VF_ALREADY ? -EPERM : err;
}
memcpy(netdev->dev_addr, saddr->sa_data, ETH_ALEN);
nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n",
saddr->sa_data);
return 0;
}
static int
hinic_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto,
u16 vid)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
u16 func_id;
u32 col, line;
int err;
col = VID_COL(nic_dev, vid);
line = VID_LINE(nic_dev, vid);
err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
if (err)
goto end;
err = hinic_add_vlan(nic_dev->hwdev, vid, func_id);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to add vlan%d\n", vid);
goto end;
}
set_bit(col, &vlan_bitmap[line]);
nicif_info(nic_dev, drv, netdev, "Add vlan %d\n", vid);
end:
return err;
}
static int
hinic_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto,
u16 vid)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
u16 func_id;
int err, col, line;
col = VID_COL(nic_dev, vid);
line = VID_LINE(nic_dev, vid);
err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
if (err)
goto end;
err = hinic_del_vlan(nic_dev->hwdev, vid, func_id);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
goto end;
}
clear_bit(col, &vlan_bitmap[line]);
nicif_info(nic_dev, drv, netdev, "Remove vlan %d\n", vid);
end:
return err;
}
#define FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable")
static int set_feature_tso(struct hinic_nic_dev *nic_dev,
netdev_features_t wanted_features,
netdev_features_t features,
netdev_features_t *failed_features)
{
netdev_features_t changed = wanted_features ^ features;
bool en = !!(wanted_features & NETIF_F_TSO);
int err;
if (!(changed & NETIF_F_TSO))
return 0;
err = hinic_set_tx_tso(nic_dev->hwdev, en);
if (err) {
hinic_err(nic_dev, drv, "%s tso failed\n", FEATURES_OP_STR(en));
*failed_features |= NETIF_F_TSO;
} else {
hinic_info(nic_dev, drv, "%s tso success\n",
FEATURES_OP_STR(en));
}
return err;
}
static int set_feature_cvlan(struct hinic_nic_dev *nic_dev,
netdev_features_t wanted_features,
netdev_features_t features,
netdev_features_t *failed_features)
{
netdev_features_t changed = wanted_features ^ features;
netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX;
bool en = !!(wanted_features & vlan_feature);
int err;
if (!(changed & vlan_feature))
return 0;
err = hinic_set_rx_vlan_offload(nic_dev->hwdev, en);
if (err) {
hinic_err(nic_dev, drv, "%s rxvlan failed\n",
FEATURES_OP_STR(en));
*failed_features |= vlan_feature;
} else {
hinic_info(nic_dev, drv, "%s rxvlan success\n",
FEATURES_OP_STR(en));
}
return err;
}
static int set_feature_rxcsum(struct hinic_nic_dev *nic_dev,
netdev_features_t wanted_features,
netdev_features_t features,
netdev_features_t *failed_features)
{
netdev_features_t changed = wanted_features ^ features;
bool en = !!(wanted_features & NETIF_F_RXCSUM);
int err;
if (!(changed & NETIF_F_RXCSUM))
return 0;
/* hw should always enable rx csum */
err = hinic_set_rx_csum_offload(nic_dev->hwdev,
HINIC_RX_CSUM_OFFLOAD_EN);
if (err) {
hinic_err(nic_dev, drv, "%s rx csum failed\n",
FEATURES_OP_STR(en));
*failed_features |= NETIF_F_RXCSUM;
} else {
hinic_info(nic_dev, drv, "%s rx csum success\n",
FEATURES_OP_STR(en));
}
return err;
}
static int set_feature_lro(struct hinic_nic_dev *nic_dev,
netdev_features_t wanted_features,
netdev_features_t features,
netdev_features_t *failed_features)
{
netdev_features_t changed = wanted_features ^ features;
bool en = !!(wanted_features & NETIF_F_LRO);
u32 lro_timer, lro_buf_size;
int err;
if (!(changed & NETIF_F_LRO))
return 0;
lro_timer = nic_dev->adaptive_cfg.lro.timer;
lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size;
err = hinic_set_rx_lro_state(nic_dev->hwdev, en, lro_timer,
lro_buf_size / nic_dev->rx_buff_len);
if (err) {
hinic_err(nic_dev, drv, "%s lro failed\n", FEATURES_OP_STR(en));
*failed_features |= NETIF_F_LRO;
} else {
hinic_info(nic_dev, drv, "%s lro success\n",
FEATURES_OP_STR(en));
}
return err;
}
static int set_features(struct hinic_nic_dev *nic_dev,
netdev_features_t pre_features,
netdev_features_t features)
{
netdev_features_t failed_features = 0;
u32 err;
err = (u32)set_feature_tso(nic_dev, features, pre_features,
&failed_features);
err |= (u32)set_feature_cvlan(nic_dev, features, pre_features,
&failed_features);
err |= (u32)set_feature_rxcsum(nic_dev, features, pre_features,
&failed_features);
err |= (u32)set_feature_lro(nic_dev, features, pre_features,
&failed_features);
if (err) {
nic_dev->netdev->features = features ^ failed_features;
return -EIO;
}
return 0;
}
static int hinic_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
return set_features(nic_dev, nic_dev->netdev->features,
features);
}
static netdev_features_t hinic_fix_features(struct net_device *netdev,
netdev_features_t features)
{
/* If Rx checksum is disabled, then LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
return features;
}
static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
{
int err;
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
if (FUNC_SUPPORT_DCB(nic_dev->hwdev)) {
err = hinic_dcb_reset_hw_config(nic_dev);
if (err) {
nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb configuration\n");
return -EFAULT;
}
}
if (FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
err = hinic_reset_port_link_cfg(nic_dev->hwdev);
if (err)
return -EFAULT;
}
hinic_set_anti_attack(nic_dev->hwdev, true);
if (set_link_status_follow < HINIC_LINK_FOLLOW_STATUS_MAX &&
FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
err = hinic_set_link_status_follow(nic_dev->hwdev,
set_link_status_follow);
if (err == HINIC_MGMT_CMD_UNSUPPORTED)
nic_warn(&nic_dev->pdev->dev,
"Current version of firmware don't support to set link status follow port status\n");
}
}
/* enable all hw features in netdev->features */
return set_features(nic_dev, ~nic_dev->netdev->features,
nic_dev->netdev->features);
}
static int hinic_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt *mqprio)
{
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
return hinic_setup_tc(dev, mqprio->num_tc);
}
static int __hinic_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return hinic_setup_tc_mqprio(dev, type_data);
default:
return -EOPNOTSUPP;
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void hinic_netpoll(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 i;
for (i = 0; i < nic_dev->num_qps; i++)
napi_schedule(&nic_dev->irq_cfg[i].napi);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
static int hinic_uc_sync(struct net_device *netdev, u8 *addr)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 func_id;
int err;
err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
if (err)
return err;
err = hinic_set_mac(nic_dev->hwdev, addr, 0, func_id);
return err;
}
static int hinic_uc_unsync(struct net_device *netdev, u8 *addr)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 func_id;
int err;
/* The addr is in use */
if (ether_addr_equal(addr, netdev->dev_addr))
return 0;
err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
if (err)
return err;
err = hinic_del_mac(nic_dev->hwdev, addr, 0, func_id);
return err;
}
static void hinic_clean_mac_list_filter(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
struct hinic_mac_filter *f, *ftmp;
list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) {
if (f->state == HINIC_MAC_HW_SYNCED)
hinic_uc_unsync(netdev, f->addr);
list_del(&f->list);
kfree(f);
}
list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) {
if (f->state == HINIC_MAC_HW_SYNCED)
hinic_uc_unsync(netdev, f->addr);
list_del(&f->list);
kfree(f);
}
}
static struct hinic_mac_filter *hinic_find_mac(struct list_head *filter_list,
u8 *addr)
{
struct hinic_mac_filter *f;
list_for_each_entry(f, filter_list, list) {
if (ether_addr_equal(addr, f->addr))
return f;
}
return NULL;
}
static struct hinic_mac_filter
*hinic_add_filter(struct hinic_nic_dev *nic_dev,
struct list_head *mac_filter_list, u8 *addr)
{
struct hinic_mac_filter *f;
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
goto out;
memcpy(f->addr, addr, ETH_ALEN);
INIT_LIST_HEAD(&f->list);
list_add_tail(&f->list, mac_filter_list);
f->state = HINIC_MAC_WAIT_HW_SYNC;
set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
out:
return f;
}
static void hinic_del_filter(struct hinic_nic_dev *nic_dev,
struct hinic_mac_filter *f)
{
set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
if (f->state == HINIC_MAC_WAIT_HW_SYNC) {
/* have not added to hw, delete it directly */
list_del(&f->list);
kfree(f);
return;
}
f->state = HINIC_MAC_WAIT_HW_UNSYNC;
}
static struct hinic_mac_filter
*hinic_mac_filter_entry_clone(struct hinic_mac_filter *src)
{
struct hinic_mac_filter *f;
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return NULL;
*f = *src;
INIT_LIST_HEAD(&f->list);
return f;
}
static void hinic_undo_del_filter_entries(struct list_head *filter_list,
struct list_head *from)
{
struct hinic_mac_filter *f, *ftmp;
list_for_each_entry_safe(f, ftmp, from, list) {
if (hinic_find_mac(filter_list, f->addr))
continue;
if (f->state == HINIC_MAC_HW_SYNCED)
f->state = HINIC_MAC_WAIT_HW_UNSYNC;
list_move_tail(&f->list, filter_list);
}
}
static void hinic_undo_add_filter_entries(struct list_head *filter_list,
struct list_head *from)
{
struct hinic_mac_filter *f, *ftmp, *tmp;
list_for_each_entry_safe(f, ftmp, from, list) {
tmp = hinic_find_mac(filter_list, f->addr);
if (tmp && tmp->state == HINIC_MAC_HW_SYNCED)
tmp->state = HINIC_MAC_WAIT_HW_SYNC;
}
}
static void hinic_cleanup_filter_list(struct list_head *head)
{
struct hinic_mac_filter *f, *ftmp;
list_for_each_entry_safe(f, ftmp, head, list) {
list_del(&f->list);
kfree(f);
}
}
static int hinic_mac_filter_sync_hw(struct hinic_nic_dev *nic_dev,
struct list_head *del_list,
struct list_head *add_list)
{
struct net_device *netdev = nic_dev->netdev;
struct hinic_mac_filter *f, *ftmp;
int err = 0, add_count = 0;
if (!list_empty(del_list)) {
list_for_each_entry_safe(f, ftmp, del_list, list) {
err = hinic_uc_unsync(netdev, f->addr);
if (err) { /* ignore errors when delete mac */
nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n");
}
list_del(&f->list);
kfree(f);
}
}
if (!list_empty(add_list)) {
list_for_each_entry_safe(f, ftmp, add_list, list) {
err = hinic_uc_sync(netdev, f->addr);
if (err) {
nic_err(&nic_dev->pdev->dev, "Failed to add mac\n");
return err;
}
add_count++;
list_del(&f->list);
kfree(f);
}
}
return add_count;
}
static int hinic_mac_filter_sync(struct hinic_nic_dev *nic_dev,
struct list_head *mac_filter_list, bool uc)
{
struct net_device *netdev = nic_dev->netdev;
struct list_head tmp_del_list, tmp_add_list;
struct hinic_mac_filter *f, *ftmp, *fclone;
int err = 0, add_count = 0;
INIT_LIST_HEAD(&tmp_del_list);
INIT_LIST_HEAD(&tmp_add_list);
list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
if (f->state != HINIC_MAC_WAIT_HW_UNSYNC)
continue;
f->state = HINIC_MAC_HW_UNSYNCED;
list_move_tail(&f->list, &tmp_del_list);
}
list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
if (f->state != HINIC_MAC_WAIT_HW_SYNC)
continue;
fclone = hinic_mac_filter_entry_clone(f);
if (!fclone) {
err = -ENOMEM;
break;
}
f->state = HINIC_MAC_HW_SYNCED;
list_add_tail(&fclone->list, &tmp_add_list);
}
if (err) {
hinic_undo_del_filter_entries(mac_filter_list, &tmp_del_list);
hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n");
}
if (err) {
hinic_cleanup_filter_list(&tmp_del_list);
hinic_cleanup_filter_list(&tmp_add_list);
return -ENOMEM;
}
add_count =
hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list);
if (list_empty(&tmp_add_list))
return add_count;
/* there are errors when add mac to hw, delete all mac in hw */
hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
/* VF don't support to enter promisc mode,
* so we can't delete any other uc mac
*/
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) {
list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
if (f->state != HINIC_MAC_HW_SYNCED)
continue;
fclone = hinic_mac_filter_entry_clone(f);
if (!fclone)
break;
f->state = HINIC_MAC_WAIT_HW_SYNC;
list_add_tail(&fclone->list, &tmp_del_list);
}
}
hinic_cleanup_filter_list(&tmp_add_list);
hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list);
/* need to enter promisc/allmulti mode */
return -ENOMEM;
}
static void hinic_mac_filter_sync_all(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
int add_count;
if (test_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags)) {
clear_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
add_count = hinic_mac_filter_sync(nic_dev,
&nic_dev->uc_filter_list,
true);
if (add_count < 0 && !HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
set_bit(HINIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state);
nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n");
} else if (add_count) {
clear_bit(HINIC_PROMISC_FORCE_ON,
&nic_dev->rx_mod_state);
}
add_count = hinic_mac_filter_sync(nic_dev,
&nic_dev->mc_filter_list,
false);
if (add_count < 0) {
set_bit(HINIC_ALLMULTI_FORCE_ON,
&nic_dev->rx_mod_state);
nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n");
} else if (add_count) {
clear_bit(HINIC_ALLMULTI_FORCE_ON,
&nic_dev->rx_mod_state);
}
}
}
#define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \
HINIC_RX_MODE_BC)
static void hinic_update_mac_filter(struct hinic_nic_dev *nic_dev,
struct netdev_hw_addr_list *src_list,
struct list_head *filter_list)
{
struct netdev_hw_addr *ha;
struct hinic_mac_filter *f, *ftmp, *filter;
/* add addr if not already in the filter list */
netif_addr_lock_bh(nic_dev->netdev);
netdev_hw_addr_list_for_each(ha, src_list) {
filter = hinic_find_mac(filter_list, ha->addr);
if (!filter)
hinic_add_filter(nic_dev, filter_list, ha->addr);
else if (filter->state == HINIC_MAC_WAIT_HW_UNSYNC)
filter->state = HINIC_MAC_HW_SYNCED;
}
netif_addr_unlock_bh(nic_dev->netdev);
/* delete addr if not in netdev list */
list_for_each_entry_safe(f, ftmp, filter_list, list) {
bool found = false;
netif_addr_lock_bh(nic_dev->netdev);
netdev_hw_addr_list_for_each(ha, src_list)
if (ether_addr_equal(ha->addr, f->addr)) {
found = true;
break;
}
netif_addr_unlock_bh(nic_dev->netdev);
if (found)
continue;
hinic_del_filter(nic_dev, f);
}
}
static void __update_mac_filter(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
if (test_and_clear_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags)) {
hinic_update_mac_filter(nic_dev, &netdev->uc,
&nic_dev->uc_filter_list);
hinic_update_mac_filter(nic_dev, &netdev->mc,
&nic_dev->mc_filter_list);
}
}
static void hinic_set_rx_mode_work(struct work_struct *work)
{
struct hinic_nic_dev *nic_dev =
container_of(work, struct hinic_nic_dev, rx_mode_work);
struct net_device *netdev = nic_dev->netdev;
int promisc_en = 0, allmulti_en = 0;
int err = 0;
__update_mac_filter(nic_dev);
hinic_mac_filter_sync_all(nic_dev);
/* VF don't support to enter promisc mode */
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
promisc_en = !!(netdev->flags & IFF_PROMISC) ||
test_bit(HINIC_PROMISC_FORCE_ON,
&nic_dev->rx_mod_state);
}
allmulti_en = !!(netdev->flags & IFF_ALLMULTI) ||
test_bit(HINIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state);
if (promisc_en !=
test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) ||
allmulti_en !=
test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) {
enum hinic_rx_mod rx_mod = HINIC_DEFAULT_RX_MODE;
rx_mod |= (promisc_en ? HINIC_RX_MODE_PROMISC : 0);
rx_mod |= (allmulti_en ? HINIC_RX_MODE_MC_ALL : 0);
/* FOR DEBUG */
if (promisc_en !=
test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state))
nicif_info(nic_dev, drv, netdev,
"%s promisc mode\n",
promisc_en ? "Enter" : "Left");
if (allmulti_en !=
test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state))
nicif_info(nic_dev, drv, netdev,
"%s all_multi mode\n",
allmulti_en ? "Enter" : "Left");
err = hinic_set_rx_mode(nic_dev->hwdev, rx_mod);
if (!err) {
promisc_en ?
set_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) :
clear_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state);
allmulti_en ?
set_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) :
clear_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state);
} else {
nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n");
}
}
}
static void hinic_nic_set_rx_mode(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
set_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags);
nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
}
if (FUNC_SUPPORT_RX_MODE(nic_dev->hwdev))
queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
}
static const struct net_device_ops hinic_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
.ndo_start_xmit = hinic_xmit_frame,
.ndo_get_stats64 = hinic_get_stats64,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_select_queue = hinic_select_queue,
.ndo_change_mtu = hinic_change_mtu,
.ndo_set_mac_address = hinic_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
.ndo_set_vf_mac = hinic_ndo_set_vf_mac,
.ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
.ndo_set_vf_rate = hinic_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = hinic_ndo_set_vf_trust,
.ndo_get_vf_config = hinic_ndo_get_vf_config,
.ndo_setup_tc = __hinic_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = hinic_netpoll,
#endif /* CONFIG_NET_POLL_CONTROLLER */
.ndo_set_rx_mode = hinic_nic_set_rx_mode,
.ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
.ndo_fix_features = hinic_fix_features,
.ndo_set_features = hinic_set_features,
};
static const struct net_device_ops hinicvf_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
.ndo_start_xmit = hinic_xmit_frame,
.ndo_get_stats64 = hinic_get_stats64,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_select_queue = hinic_select_queue,
.ndo_change_mtu = hinic_change_mtu,
.ndo_set_mac_address = hinic_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = hinic_netpoll,
#endif /* CONFIG_NET_POLL_CONTROLLER */
.ndo_set_rx_mode = hinic_nic_set_rx_mode,
.ndo_fix_features = hinic_fix_features,
.ndo_set_features = hinic_set_features,
};
static void netdev_feature_init(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
netdev_features_t hw_features;
netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_RXCSUM;
if (FUNC_SUPPORT_SCTP_CRC(nic_dev->hwdev))
netdev->features |= NETIF_F_SCTP_CRC;
netdev->vlan_features = netdev->features;
if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev))
netdev->features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
if (FUNC_SUPPORT_HW_VLAN(nic_dev->hwdev)) {
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
/* copy netdev features into list of user selectable features */
hw_features = netdev->hw_features;
hw_features |= netdev->features;
if (FUNC_SUPPORT_LRO(nic_dev->hwdev)) {
/* LRO is disable in default, only set hw features */
hw_features |= NETIF_F_LRO;
/* Enable LRO */
if (nic_dev->adaptive_cfg.lro.enable &&
!HINIC_FUNC_IS_VF(nic_dev->hwdev))
netdev->features |= NETIF_F_LRO;
}
netdev->hw_features = hw_features;
/* Set after hw_features because this could not be part of
* hw_features
*/
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->priv_flags |= IFF_UNICAST_FLT;
if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
| NETIF_F_SCTP_CRC | NETIF_F_SG;
netdev->hw_enc_features |= NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_TSO_ECN
| NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_GSO_UDP_TUNNEL;
}
}
#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) { \
if ((num_qps) > (nic_dev)->max_qps) \
nic_warn(&(nic_dev)->pdev->dev, \
"Module Parameter %s value %d is out of range, "\
"Maximum value for the device: %d, using %d\n",\
#num_qps, num_qps, (nic_dev)->max_qps, \
(nic_dev)->max_qps); \
if (!(num_qps) || (num_qps) > (nic_dev)->max_qps) \
out_qps = (nic_dev)->max_qps; \
else \
out_qps = num_qps; \
}
static void hinic_try_to_enable_rss(struct hinic_nic_dev *nic_dev)
{
u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
int i, node, err = 0;
u16 num_cpus = 0;
enum hinic_service_mode service_mode =
hinic_get_service_mode(nic_dev->hwdev);
nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev);
if (nic_dev->max_qps <= 1) {
clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
nic_dev->rss_limit = nic_dev->max_qps;
nic_dev->num_qps = nic_dev->max_qps;
nic_dev->num_rss = nic_dev->max_qps;
return;
}
err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx);
if (err) {
if (err == -ENOSPC)
nic_warn(&nic_dev->pdev->dev,
"Failed to alloc tmpl_idx for rss, table is full\n");
else
nic_err(&nic_dev->pdev->dev,
"Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
nic_dev->max_qps = 1;
nic_dev->rss_limit = nic_dev->max_qps;
nic_dev->num_qps = nic_dev->max_qps;
nic_dev->num_rss = nic_dev->max_qps;
return;
}
set_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev);
MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, nic_dev->num_qps);
/* To reduce memory footprint in ovs mode.
* VF can't get board info correctly with early pf driver.
*/
if ((hinic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) &&
service_mode == HINIC_WORK_MODE_OVS &&
hinic_func_type(nic_dev->hwdev) != TYPE_VF)
MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps,
nic_dev->num_qps);
for (i = 0; i < (int)num_online_cpus(); i++) {
node = (int)cpu_to_node(i);
if (node == dev_to_node(&nic_dev->pdev->dev))
num_cpus++;
}
if (!num_cpus)
num_cpus = (u16)num_online_cpus();
nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
nic_dev->rss_limit = nic_dev->num_qps;
nic_dev->num_rss = nic_dev->num_qps;
hinic_init_rss_parameters(nic_dev->netdev);
hinic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc);
}
static int hinic_sw_init(struct hinic_nic_dev *adapter)
{
struct net_device *netdev = adapter->netdev;
u16 func_id;
int err = 0;
sema_init(&adapter->port_state_sem, 1);
err = hinic_dcb_init(adapter);
if (err) {
nic_err(&adapter->pdev->dev, "Failed to init dcb\n");
return -EFAULT;
}
if (HINIC_FUNC_IS_VF(adapter->hwdev)) {
err = hinic_sq_cos_mapping(netdev);
if (err) {
nic_err(&adapter->pdev->dev, "Failed to set sq_cos_mapping\n");
return -EFAULT;
}
}
adapter->sq_depth = HINIC_SQ_DEPTH;
adapter->rq_depth = HINIC_RQ_DEPTH;
hinic_try_to_enable_rss(adapter);
err = hinic_get_default_mac(adapter->hwdev, netdev->dev_addr);
if (err) {
nic_err(&adapter->pdev->dev, "Failed to get MAC address\n");
goto get_mac_err;
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!HINIC_FUNC_IS_VF(adapter->hwdev)) {
nic_err(&adapter->pdev->dev, "Invalid MAC address\n");
err = -EIO;
goto err_mac;
}
nic_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random\n",
netdev->dev_addr);
eth_hw_addr_random(netdev);
}
err = hinic_global_func_id_get(adapter->hwdev, &func_id);
if (err)
goto func_id_err;
err = hinic_set_mac(adapter->hwdev, netdev->dev_addr, 0, func_id);
/* When this is VF driver, we must consider that PF has already set VF
* MAC, and we can't consider this condition is error status during
* driver probe procedure.
*/
if (err && err != HINIC_PF_SET_VF_ALREADY) {
nic_err(&adapter->pdev->dev, "Failed to set default MAC\n");
goto set_mac_err;
}
/* MTU range: 256 - 9600 */
netdev->min_mtu = HINIC_MIN_MTU_SIZE;
netdev->max_mtu = HINIC_MAX_JUMBO_FRAME_SIZE;
return 0;
set_mac_err:
func_id_err:
err_mac:
get_mac_err:
if (test_bit(HINIC_RSS_ENABLE, &adapter->flags))
hinic_rss_template_free(adapter->hwdev, adapter->rss_tmpl_idx);
return err;
}
static void hinic_assign_netdev_ops(struct hinic_nic_dev *adapter)
{
if (!HINIC_FUNC_IS_VF(adapter->hwdev)) {
adapter->netdev->netdev_ops = &hinic_netdev_ops;
if (FUNC_SUPPORT_DCB(adapter->hwdev))
adapter->netdev->dcbnl_ops = &hinic_dcbnl_ops;
hinic_set_ethtool_ops(adapter->netdev);
} else {
adapter->netdev->netdev_ops = &hinicvf_netdev_ops;
hinicvf_set_ethtool_ops(adapter->netdev);
}
adapter->netdev->watchdog_timeo = 5 * HZ;
}
#define HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT 1
#define HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER 1
#define HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT 2
#define HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER 2
#define HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER 3
#define HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT 2
#define HINIC_DFT_PG_100GE_TXRX_MSIX_COALESC_TIMER 2
#define HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER 3
static void update_queue_coal_param(struct hinic_nic_dev *nic_dev,
struct pci_device_id *id, u16 qid)
{
struct hinic_intr_coal_info *info = NULL;
info = &nic_dev->intr_coalesce[qid];
if (!nic_dev->intr_coal_set_flag) {
switch (id->driver_data) {
case HINIC_BOARD_PG_TP_10GE:
info->pending_limt =
HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT;
info->coalesce_timer_cfg =
HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER;
break;
case HINIC_BOARD_PG_SM_25GE:
info->pending_limt =
HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
info->coalesce_timer_cfg =
HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER;
break;
case HINIC_BOARD_PG_100GE:
info->pending_limt =
HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT;
info->coalesce_timer_cfg =
HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER;
break;
default:
info->pending_limt = qp_pending_limit;
info->coalesce_timer_cfg = qp_coalesc_timer_cfg;
break;
}
}
info->resend_timer_cfg = HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
info->pkt_rate_high = HINIC_RX_RATE_HIGH;
info->rx_usecs_high = qp_coalesc_timer_high;
info->rx_pending_limt_high = qp_pending_limit_high;
info->pkt_rate_low = HINIC_RX_RATE_LOW;
info->rx_usecs_low = qp_coalesc_timer_low;
info->rx_pending_limt_low = qp_pending_limit_low;
if (nic_dev->in_vm) {
if (qp_pending_limit_high == HINIC_RX_PENDING_LIMIT_HIGH)
qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH_VM;
info->pkt_rate_low = HINIC_RX_RATE_LOW_VM;
info->rx_pending_limt_high = qp_pending_limit_high;
}
/* suit for sdi3.0 vm mode vf drv or bm mode pf/vf drv */
if ((nic_dev->is_vm_slave && nic_dev->in_vm) ||
nic_dev->is_bm_slave) {
info->pkt_rate_high = SDI_VM_RX_PKT_RATE_HIGH;
info->pkt_rate_low = SDI_VM_RX_PKT_RATE_LOW;
if (qp_coalesc_use_drv_params_switch == 0) {
/* if arm server, maybe need to change this value
* again
*/
info->pending_limt = SDI_VM_PENDING_LIMT;
info->coalesce_timer_cfg = SDI_VM_COALESCE_TIMER_CFG;
info->rx_usecs_high = SDI_VM_RX_USECS_HIGH;
info->rx_pending_limt_high =
SDI_VM_RX_PENDING_LIMT_HIGH;
info->rx_usecs_low = SDI_VM_RX_USECS_LOW;
info->rx_pending_limt_low = SDI_VM_RX_PENDING_LIMT_LOW;
} else {
info->rx_usecs_high = qp_coalesc_timer_high;
info->rx_pending_limt_high = qp_pending_limit_high;
info->rx_usecs_low = qp_coalesc_timer_low;
info->rx_pending_limt_low = qp_pending_limit_low;
}
}
}
static void init_intr_coal_param(struct hinic_nic_dev *nic_dev)
{
struct pci_device_id *id;
u16 i;
id = hinic_get_pci_device_id(nic_dev->pdev);
switch (id->driver_data) {
case HINIC_BOARD_10GE:
case HINIC_BOARD_PG_TP_10GE:
nic_dev->his_link_speed = SPEED_10000;
break;
case HINIC_BOARD_25GE:
case HINIC_BOARD_PG_SM_25GE:
nic_dev->his_link_speed = SPEED_25000;
break;
case HINIC_BOARD_40GE:
nic_dev->his_link_speed = SPEED_40000;
break;
case HINIC_BOARD_100GE:
case HINIC_BOARD_PG_100GE:
nic_dev->his_link_speed = SPEED_100000;
break;
default:
break;
}
for (i = 0; i < nic_dev->max_qps; i++)
update_queue_coal_param(nic_dev, id, i);
}
static int hinic_init_intr_coalesce(struct hinic_nic_dev *nic_dev)
{
u64 size;
if (qp_pending_limit != HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT ||
qp_coalesc_timer_cfg != HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG)
nic_dev->intr_coal_set_flag = 1;
else
nic_dev->intr_coal_set_flag = 0;
size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps;
if (!size) {
nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr coalesce\n");
return -EINVAL;
}
nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL);
if (!nic_dev->intr_coalesce) {
nic_err(&nic_dev->pdev->dev, "Failed to alloc intr coalesce\n");
return -ENOMEM;
}
init_intr_coal_param(nic_dev);
if (test_bit(HINIC_INTR_ADAPT, &nic_dev->flags))
nic_dev->adaptive_rx_coal = 1;
else
nic_dev->adaptive_rx_coal = 0;
return 0;
}
static void hinic_free_intr_coalesce(struct hinic_nic_dev *nic_dev)
{
kfree(nic_dev->intr_coalesce);
}
static int hinic_alloc_qps(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
int err;
err = hinic_alloc_txqs(netdev);
if (err) {
nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n");
return err;
}
err = hinic_alloc_rxqs(netdev);
if (err) {
nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n");
goto alloc_rxqs_err;
}
err = hinic_init_intr_coalesce(nic_dev);
if (err) {
nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n");
goto init_intr_err;
}
return 0;
init_intr_err:
hinic_free_rxqs(netdev);
alloc_rxqs_err:
hinic_free_txqs(netdev);
return err;
}
static void hinic_destroy_qps(struct hinic_nic_dev *nic_dev)
{
hinic_free_intr_coalesce(nic_dev);
hinic_free_rxqs(nic_dev->netdev);
hinic_free_txqs(nic_dev->netdev);
}
static int hinic_validate_parameters(struct hinic_lld_dev *lld_dev)
{
struct pci_dev *pdev = lld_dev->pdev;
/* Check poll_weight value, default poll_weight is 64.
* The poll_weight isn't more than max queue depth,
* so the valid value range is 1~4096.
*/
if (!poll_weight) {
nic_warn(&pdev->dev, "Module Parameter poll_weight can not be 0, resetting to %d\n",
DEFAULT_POLL_WEIGHT);
poll_weight = DEFAULT_POLL_WEIGHT;
}
if (poll_weight > HINIC_MAX_QUEUE_DEPTH) {
nic_warn(&pdev->dev, "Module Parameter poll_weight value %u is out of 1~%d, resetting to max value %d\n",
poll_weight, HINIC_MAX_QUEUE_DEPTH,
HINIC_MAX_QUEUE_DEPTH);
poll_weight = HINIC_MAX_QUEUE_DEPTH;
}
/* check rx_buff value, default rx_buff is 2KB.
* Invalid rx_buff include 2KB/4KB/8KB/16KB.
*/
if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB &&
rx_buff != RX_BUFF_VALID_8KB && rx_buff != RX_BUFF_VALID_16KB) {
nic_warn(&pdev->dev, "Module Parameter rx_buff value %d is out of range, must be 2^n. Valid range is 2 - 16, resetting to %dKB",
rx_buff, DEFAULT_RX_BUFF_LEN);
rx_buff = DEFAULT_RX_BUFF_LEN;
}
if (qp_coalesc_timer_high <= qp_coalesc_timer_low) {
nic_warn(&pdev->dev, "Module Parameter qp_coalesc_timer_high: %d, qp_coalesc_timer_low: %d is invalid, resetting to default\n",
qp_coalesc_timer_high, qp_coalesc_timer_low);
qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH;
qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW;
}
if (qp_pending_limit_high <= qp_pending_limit_low) {
nic_warn(&pdev->dev, "Module Parameter qp_pending_limit_high: %d, qp_pending_limit_low: %d is invalid, resetting to default\n",
qp_pending_limit_high, qp_pending_limit_low);
qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH;
qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW;
}
return 0;
}
static void check_lro_module_param(struct hinic_nic_dev *nic_dev)
{
struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro;
/* Use module parameters first. */
if (set_lro_timer != 0 &&
set_lro_timer >= HINIC_LRO_RX_TIMER_LOWER &&
set_lro_timer <= HINIC_LRO_RX_TIMER_UPPER)
lro->timer = set_lro_timer;
/* Use module parameters first. */
if (set_max_wqe_num != 0 &&
set_max_wqe_num <= HINIC_LRO_MAX_WQE_NUM_UPPER &&
set_max_wqe_num >= HINIC_LRO_MAX_WQE_NUM_LOWER)
lro->buffer_size = set_max_wqe_num * nic_dev->rx_buff_len;
}
static void decide_rss_cfg(struct hinic_nic_dev *nic_dev)
{
struct hinic_environment_info *info = &nic_dev->env_info;
switch (info->cpu) {
case HINIC_CPU_ARM_GENERIC:
set_bit(HINIC_SAME_RXTX, &nic_dev->flags);
break;
case HINIC_CPU_X86_GENERIC:
clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
break;
default:
clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
break;
}
}
static void decide_lro_cfg(struct hinic_nic_dev *nic_dev)
{
struct hinic_environment_info *info = &nic_dev->env_info;
struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro;
if (lro_en_status < HINIC_LRO_STATUS_UNSET) {
lro->enable = lro_en_status;
} else {
/* LRO will be opened in all Huawei OS */
switch (info->os) {
case HINIC_OS_HUAWEI:
lro->enable = 1;
break;
case HINIC_OS_NON_HUAWEI:
lro->enable = 0;
break;
default:
lro->enable = 0;
break;
}
}
switch (info->board) {
case HINIC_BOARD_25GE:
lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_25GE;
break;
case HINIC_BOARD_100GE:
lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_100GE;
break;
case HINIC_BOARD_PG_TP_10GE:
lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE;
break;
case HINIC_BOARD_PG_SM_25GE:
lro->timer = HINIC_LRO_RX_TIMER_DEFAULT;
break;
case HINIC_BOARD_PG_100GE:
lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE;
break;
default:
lro->timer = HINIC_LRO_RX_TIMER_DEFAULT;
break;
}
/* Use module parameters first. */
switch (info->cpu) {
case HINIC_CPU_ARM_GENERIC:
lro->buffer_size =
HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM *
nic_dev->rx_buff_len;
break;
case HINIC_CPU_X86_GENERIC:
lro->buffer_size =
HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 *
nic_dev->rx_buff_len;
break;
default:
lro->buffer_size =
HINIC_LRO_MAX_WQE_NUM_DEFAULT *
nic_dev->rx_buff_len;
break;
}
/* lro buffer_size need modify according board type */
switch (info->board) {
case HINIC_BOARD_PG_TP_10GE:
case HINIC_BOARD_PG_SM_25GE:
case HINIC_BOARD_PG_100GE:
lro->buffer_size =
HINIC_LRO_WQE_NUM_PANGEA_DEFAULT * nic_dev->rx_buff_len;
break;
default:
break;
}
check_lro_module_param(nic_dev);
nic_info(&nic_dev->pdev->dev,
"LRO default configuration: enable %u, timer %u, buffer size %u\n",
lro->enable, lro->timer, lro->buffer_size);
}
static void decide_intr_cfg(struct hinic_nic_dev *nic_dev)
{
struct pci_device_id *id;
id = hinic_get_pci_device_id(nic_dev->pdev);
switch (id->driver_data) {
case HINIC_BOARD_PG_TP_10GE:
case HINIC_BOARD_PG_SM_25GE:
case HINIC_BOARD_PG_100GE:
clear_bit(HINIC_INTR_ADAPT, &nic_dev->flags);
break;
default:
set_bit(HINIC_INTR_ADAPT, &nic_dev->flags);
break;
}
}
static void adaptive_configuration_init(struct hinic_nic_dev *nic_dev)
{
struct pci_device_id *id;
id = hinic_get_pci_device_id(nic_dev->pdev);
if (id)
nic_dev->env_info.board = id->driver_data;
else
nic_dev->env_info.board = HINIC_BOARD_UNKNOWN;
nic_dev->env_info.os = HINIC_OS_HUAWEI;
#if defined(__aarch64__)
nic_dev->env_info.cpu = HINIC_CPU_ARM_GENERIC;
#elif defined(__x86_64__)
nic_dev->env_info.cpu = HINIC_CPU_X86_GENERIC;
#else
nic_dev->env_info.cpu = HINIC_CPU_UNKNOWN;
#endif
nic_info(&nic_dev->pdev->dev,
"Board type %u, OS type %u, CPU type %u\n",
nic_dev->env_info.board, nic_dev->env_info.os,
nic_dev->env_info.cpu);
decide_lro_cfg(nic_dev);
decide_rss_cfg(nic_dev);
decide_intr_cfg(nic_dev);
}
static int nic_probe(struct hinic_lld_dev *lld_dev, void **uld_dev,
char *uld_dev_name)
{
struct pci_dev *pdev = lld_dev->pdev;
struct hinic_nic_dev *nic_dev;
struct net_device *netdev;
u16 max_qps;
u32 page_num;
int err;
/* *uld_dev should always no be NULL */
*uld_dev = lld_dev;
if (!hinic_support_nic(lld_dev->hwdev, NULL)) {
nic_info(&pdev->dev, "Hw don't support nic\n");
return 0;
}
err = hinic_validate_parameters(lld_dev);
if (err)
return -EINVAL;
max_qps = hinic_func_max_nic_qnum(lld_dev->hwdev);
netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
if (!netdev) {
nic_err(&pdev->dev, "Failed to allocate ETH device\n");
return -ENOMEM;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
nic_dev = (struct hinic_nic_dev *)netdev_priv(netdev);
nic_dev->hwdev = lld_dev->hwdev;
nic_dev->pdev = pdev;
nic_dev->poll_weight = (int)poll_weight;
nic_dev->msg_enable = DEFAULT_MSG_ENABLE;
nic_dev->heart_status = true;
nic_dev->in_vm = !hinic_is_in_host();
nic_dev->is_vm_slave = is_multi_vm_slave(lld_dev->hwdev);
nic_dev->is_bm_slave = is_multi_bm_slave(lld_dev->hwdev);
nic_dev->lro_replenish_thld = lro_replenish_thld;
nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT);
page_num = (RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len) / PAGE_SIZE;
nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0;
mutex_init(&nic_dev->nic_mutex);
adaptive_configuration_init(nic_dev);
nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n");
err = -ENOMEM;
goto vlan_bitmap_err;
}
nic_dev->netdev = netdev;
hinic_assign_netdev_ops(nic_dev);
netdev_feature_init(netdev);
/* get nic cap from hw */
hinic_support_nic(lld_dev->hwdev, &nic_dev->nic_cap);
err = hinic_init_nic_hwdev(nic_dev->hwdev, nic_dev->rx_buff_len);
if (err) {
nic_err(&pdev->dev, "Failed to init nic hwdev\n");
goto init_nic_hwdev_err;
}
err = hinic_set_super_cqe_state(nic_dev->hwdev, true);
if (err) {
nic_err(&pdev->dev, "Failed to set super cqe\n");
goto set_supper_cqe_err;
}
err = hinic_sw_init(nic_dev);
if (err)
goto sw_init_err;
err = hinic_alloc_qps(nic_dev);
if (err) {
nic_err(&pdev->dev, "Failed to alloc qps\n");
goto alloc_qps_err;
}
nic_dev->workq = create_singlethread_workqueue(HINIC_NIC_DEV_WQ_NAME);
if (!nic_dev->workq) {
nic_err(&pdev->dev, "Failed to initialize AEQ workqueue\n");
err = -ENOMEM;
goto create_workq_err;
}
INIT_LIST_HEAD(&nic_dev->uc_filter_list);
INIT_LIST_HEAD(&nic_dev->mc_filter_list);
INIT_WORK(&nic_dev->rx_mode_work, hinic_set_rx_mode_work);
err = hinic_set_default_hw_feature(nic_dev);
if (err)
goto set_features_err;
hinic_register_notifier(nic_dev);
err = register_netdev(netdev);
if (err) {
nic_err(&pdev->dev, "Failed to register netdev\n");
err = -ENOMEM;
goto netdev_err;
}
netif_carrier_off(netdev);
*uld_dev = nic_dev;
nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n");
return 0;
netdev_err:
hinic_unregister_notifier(nic_dev);
set_features_err:
destroy_workqueue(nic_dev->workq);
create_workq_err:
hinic_destroy_qps(nic_dev);
alloc_qps_err:
hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
hinic_global_func_id_hw(nic_dev->hwdev));
sw_init_err:
(void)hinic_set_super_cqe_state(nic_dev->hwdev, false);
set_supper_cqe_err:
hinic_free_nic_hwdev(nic_dev->hwdev);
init_nic_hwdev_err:
kfree(nic_dev->vlan_bitmap);
vlan_bitmap_err:
free_netdev(netdev);
return err;
}
static void nic_remove(struct hinic_lld_dev *lld_dev, void *adapter)
{
struct hinic_nic_dev *nic_dev = adapter;
struct net_device *netdev;
if (!nic_dev || !hinic_support_nic(lld_dev->hwdev, NULL))
return;
netdev = nic_dev->netdev;
unregister_netdev(netdev);
hinic_unregister_notifier(nic_dev);
cancel_work_sync(&nic_dev->rx_mode_work);
destroy_workqueue(nic_dev->workq);
hinic_destroy_qps(nic_dev);
hinic_clean_mac_list_filter(nic_dev);
hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
hinic_global_func_id_hw(nic_dev->hwdev));
if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx);
(void)hinic_set_super_cqe_state(nic_dev->hwdev, false);
hinic_free_nic_hwdev(nic_dev->hwdev);
kfree(nic_dev->vlan_bitmap);
free_netdev(netdev);
}
int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
int err, err_netdev = 0;
nicif_info(nic_dev, drv, netdev, "Start to disable RSS\n");
if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
nicif_info(nic_dev, drv, netdev, "RSS not enabled, do nothing\n");
return 0;
}
if (netif_running(netdev)) {
err_netdev = hinic_close(netdev);
if (err_netdev) {
nicif_err(nic_dev, drv, netdev,
"Failed to close netdev\n");
return -EFAULT;
}
}
/* free rss template */
err = hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to free RSS template\n");
} else {
nicif_info(nic_dev, drv, netdev, "Success to free RSS template\n");
clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
}
if (netif_running(netdev)) {
err_netdev = hinic_open(netdev);
if (err_netdev)
nicif_err(nic_dev, drv, netdev,
"Failed to open netdev\n");
}
return err ? err : err_netdev;
}
int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
int err, err_netdev = 0;
nicif_info(nic_dev, drv, netdev, "Start to enable RSS\n");
if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
nicif_info(nic_dev, drv, netdev, "RSS already enabled, do nothing\n");
return 0;
}
if (netif_running(netdev)) {
err_netdev = hinic_close(netdev);
if (err_netdev) {
nicif_err(nic_dev, drv, netdev,
"Failed to close netdev\n");
return -EFAULT;
}
}
err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx);
if (err) {
if (err == -ENOSPC)
nicif_warn(nic_dev, drv, netdev,
"Failed to alloc RSS template, table is full\n");
else
nicif_err(nic_dev, drv, netdev,
"Failed to alloc RSS template\n");
} else {
set_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
nicif_info(nic_dev, drv, netdev, "Success to alloc RSS template\n");
}
if (netif_running(netdev)) {
err_netdev = hinic_open(netdev);
if (err_netdev)
nicif_err(nic_dev, drv, netdev,
"Failed to open netdev\n");
}
return err ? err : err_netdev;
}
static const char *hinic_module_link_err[LINK_ERR_NUM] = {
"Unrecognized module",
};
static void hinic_port_module_event_handler(struct hinic_nic_dev *nic_dev,
struct hinic_event_info *event)
{
enum port_module_event_type type = event->module_event.type;
enum link_err_type err_type = event->module_event.err_type;
switch (type) {
case HINIC_PORT_MODULE_CABLE_PLUGGED:
case HINIC_PORT_MODULE_CABLE_UNPLUGGED:
nicif_info(nic_dev, link, nic_dev->netdev,
"Port module event: Cable %s\n",
type == HINIC_PORT_MODULE_CABLE_PLUGGED ?
"plugged" : "unplugged");
break;
case HINIC_PORT_MODULE_LINK_ERR:
if (err_type >= LINK_ERR_NUM) {
nicif_info(nic_dev, link, nic_dev->netdev,
"Link failed, Unknown error type: 0x%x\n",
err_type);
} else {
nicif_info(nic_dev, link, nic_dev->netdev,
"Link failed, error type: 0x%x: %s\n",
err_type, hinic_module_link_err[err_type]);
}
break;
default:
nicif_err(nic_dev, link, nic_dev->netdev,
"Unknown port module type %d\n", type);
break;
}
}
static void hinic_intr_coalesc_change(struct hinic_nic_dev *nic_dev,
struct hinic_event_info *event)
{
u32 hw_to_os_speed[LINK_SPEED_LEVELS] = {SPEED_10, SPEED_100,
SPEED_1000, SPEED_10000,
SPEED_25000, SPEED_40000,
SPEED_100000};
u8 qid, coalesc_timer_cfg, pending_limt;
struct pci_device_id *id;
u32 speed;
int err;
if (nic_dev->adaptive_rx_coal)
return;
speed = hw_to_os_speed[event->link_info.speed];
if (speed == nic_dev->his_link_speed)
return;
id = hinic_get_pci_device_id(nic_dev->pdev);
switch (id->driver_data) {
case HINIC_BOARD_PG_TP_10GE:
return;
case HINIC_BOARD_PG_SM_25GE:
if (speed == SPEED_10000) {
pending_limt =
HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT;
coalesc_timer_cfg =
HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER;
} else if (speed == SPEED_25000) {
pending_limt =
HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
coalesc_timer_cfg =
HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER;
} else {
pending_limt =
HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
coalesc_timer_cfg =
HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER;
}
break;
case HINIC_BOARD_PG_100GE:
return;
default:
return;
}
for (qid = 0; qid < nic_dev->num_qps; qid++) {
if (!nic_dev->intr_coalesce[qid].user_set_intr_coal_flag) {
err = set_interrupt_moder(nic_dev, qid,
coalesc_timer_cfg,
pending_limt);
if (!err) {
nic_dev->intr_coalesce[qid].pending_limt =
pending_limt;
nic_dev->intr_coalesce[qid].coalesce_timer_cfg =
coalesc_timer_cfg;
}
}
}
nic_dev->his_link_speed = speed;
}
void nic_event(struct hinic_lld_dev *lld_dev, void *adapter,
struct hinic_event_info *event)
{
struct hinic_nic_dev *nic_dev = adapter;
struct net_device *netdev;
enum hinic_event_type type;
if (!nic_dev || !event || !hinic_support_nic(lld_dev->hwdev, NULL))
return;
netdev = nic_dev->netdev;
type = event->type;
switch (type) {
case HINIC_EVENT_LINK_DOWN:
hinic_link_status_change(nic_dev, false);
break;
case HINIC_EVENT_LINK_UP:
hinic_link_status_change(nic_dev, true);
hinic_intr_coalesc_change(nic_dev, event);
break;
case HINIC_EVENT_HEART_LOST:
hinic_heart_lost(nic_dev);
hinic_link_status_change(nic_dev, false);
break;
case HINIC_EVENT_FAULT:
if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR &&
event->info.event.chip.func_id ==
hinic_global_func_id(lld_dev->hwdev))
hinic_link_status_change(nic_dev, false);
break;
case HINIC_EVENT_DCB_STATE_CHANGE:
if (nic_dev->default_cos_id == event->dcb_state.default_cos)
break;
/* PF notify to vf, don't need to handle this event */
if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
break;
nicif_info(nic_dev, drv, netdev, "Change default cos %d to %d\n",
nic_dev->default_cos_id,
event->dcb_state.default_cos);
nic_dev->default_cos_id = event->dcb_state.default_cos;
hinic_set_sq_default_cos(netdev, nic_dev->default_cos_id);
break;
case HINIC_EVENT_PORT_MODULE_EVENT:
hinic_port_module_event_handler(nic_dev, event);
break;
case HINIC_EVENT_MGMT_WATCHDOG_EVENT:
hinic_link_status_change(nic_dev, false);
break;
default:
break;
}
}
struct hinic_uld_info nic_uld_info = {
.probe = nic_probe,
.remove = nic_remove,
.suspend = NULL,
.resume = NULL,
.event = nic_event,
.ioctl = nic_ioctl,
}; /*lint -e766*/
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_NIC_DEV_H
#define HINIC_NIC_DEV_H
#include <linux/netdevice.h>
#include <linux/semaphore.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include "ossl_knl.h"
#include "hinic_nic_io.h"
#include "hinic_nic_cfg.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
#define HINIC_DRV_NAME "hinic"
#define HINIC_CHIP_NAME "hinic"
#define HINIC_DRV_VERSION "2.3.2.17"
struct vf_data_storage;
#define HINIC_FUNC_IS_VF(hwdev) (hinic_func_type(hwdev) == TYPE_VF)
enum hinic_flags {
HINIC_INTF_UP,
HINIC_MAC_FILTER_CHANGED,
HINIC_LP_TEST,
HINIC_RSS_ENABLE,
HINIC_DCB_ENABLE,
HINIC_SAME_RXTX,
HINIC_INTR_ADAPT,
HINIC_UPDATE_MAC_FILTER,
HINIC_ETS_ENABLE,
};
#define RX_BUFF_NUM_PER_PAGE 2
#define HINIC_MAX_MAC_NUM 3
#define LP_PKT_CNT 64
struct hinic_mac_addr {
u8 addr[ETH_ALEN];
u16 state;
};
enum hinic_rx_mode_state {
HINIC_HW_PROMISC_ON,
HINIC_HW_ALLMULTI_ON,
HINIC_PROMISC_FORCE_ON,
HINIC_ALLMULTI_FORCE_ON,
};
enum mac_filter_state {
HINIC_MAC_WAIT_HW_SYNC,
HINIC_MAC_HW_SYNCED,
HINIC_MAC_WAIT_HW_UNSYNC,
HINIC_MAC_HW_UNSYNCED,
};
struct hinic_mac_filter {
struct list_head list;
u8 addr[ETH_ALEN];
unsigned long state;
};
/* TC bandwidth allocation per direction */
struct hinic_tc_attr {
u8 pg_id; /* Priority Group(PG) ID */
u8 bw_pct; /* % of PG's bandwidth */
u8 up_map; /* User Priority to Traffic Class mapping */
u8 prio_type;
};
/* User priority configuration */
struct hinic_tc_cfg {
struct hinic_tc_attr path[2]; /* One each for Tx/Rx */
bool pfc_en;
};
struct hinic_dcb_config {
u8 pg_tcs;
u8 pfc_tcs;
bool pfc_state;
struct hinic_tc_cfg tc_cfg[HINIC_DCB_TC_MAX];
u8 bw_pct[2][HINIC_DCB_PG_MAX]; /* One each for Tx/Rx */
};
enum hinic_intr_flags {
HINIC_INTR_ON,
HINIC_RESEND_ON,
};
struct hinic_irq {
struct net_device *netdev;
/* IRQ corresponding index number */
u16 msix_entry_idx;
u32 irq_id; /* The IRQ number from OS */
char irq_name[IFNAMSIZ + 16];
struct napi_struct napi;
cpumask_t affinity_mask;
struct hinic_txq *txq;
struct hinic_rxq *rxq;
unsigned long intr_flag;
};
struct hinic_intr_coal_info {
u8 pending_limt;
u8 coalesce_timer_cfg;
u8 resend_timer_cfg;
u64 pkt_rate_low;
u8 rx_usecs_low;
u8 rx_pending_limt_low;
u64 pkt_rate_high;
u8 rx_usecs_high;
u8 rx_pending_limt_high;
u8 user_set_intr_coal_flag;
};
#define HINIC_NIC_STATS_INC(nic_dev, field) \
{ \
u64_stats_update_begin(&(nic_dev)->stats.syncp); \
(nic_dev)->stats.field++; \
u64_stats_update_end(&(nic_dev)->stats.syncp); \
}
struct hinic_nic_stats {
u64 netdev_tx_timeout;
/* Subdivision statistics show in private tool */
u64 tx_carrier_off_drop;
u64 tx_invalid_qid;
struct u64_stats_sync syncp;
};
struct hinic_nic_dev {
struct pci_dev *pdev;
struct net_device *netdev;
void *hwdev;
int poll_weight;
unsigned long *vlan_bitmap;
u16 num_qps;
u16 max_qps;
u32 msg_enable;
unsigned long flags;
u16 sq_depth;
u16 rq_depth;
/* mapping from priority */
u8 sq_cos_mapping[HINIC_DCB_UP_MAX];
u8 default_cos_id;
struct hinic_txq *txqs;
struct hinic_rxq *rxqs;
struct nic_service_cap nic_cap;
struct irq_info *qps_irq_info;
struct hinic_irq *irq_cfg;
struct work_struct rx_mode_work;
struct delayed_work moderation_task;
struct workqueue_struct *workq;
struct list_head uc_filter_list;
struct list_head mc_filter_list;
unsigned long rx_mod_state;
int netdev_uc_cnt;
int netdev_mc_cnt;
int lb_test_rx_idx;
int lb_pkt_len;
u8 *lb_test_rx_buf;
u8 rss_tmpl_idx;
u16 num_rss;
u16 rss_limit;
u8 rss_hash_engine;
struct nic_rss_type rss_type;
u8 *rss_hkey_user;
/* hkey in big endian */
u32 *rss_hkey_user_be;
u32 *rss_indir_user;
u8 dcbx_cap;
u32 dcb_changes;
u8 max_cos;
u8 up_valid_bitmap;
u8 up_cos[HINIC_DCB_UP_MAX];
struct ieee_ets hinic_ieee_ets_default;
struct ieee_ets hinic_ieee_ets;
struct ieee_pfc hinic_ieee_pfc;
struct hinic_dcb_config dcb_cfg;
struct hinic_dcb_config tmp_dcb_cfg;
struct hinic_dcb_config save_dcb_cfg;
unsigned long dcb_flags;
int disable_port_cnt;
/* lock for disable or enable traffic flow */
struct semaphore dcb_sem;
bool heart_status;
struct hinic_intr_coal_info *intr_coalesce;
unsigned long last_moder_jiffies;
u32 adaptive_rx_coal;
u8 intr_coal_set_flag;
u32 his_link_speed;
/* interrupt coalesce must be different in virtual machine */
bool in_vm;
bool is_vm_slave;
int is_bm_slave;
struct hinic_nic_stats stats;
/* lock for nic resource */
struct mutex nic_mutex;
bool force_port_disable;
struct semaphore port_state_sem;
u8 link_status;
struct hinic_environment_info env_info;
struct hinic_adaptive_cfg adaptive_cfg;
/* pangea cpu affinity setting */
bool force_affinity;
cpumask_t affinity_mask;
u32 lro_replenish_thld;
u16 rx_buff_len;
u32 page_order;
};
extern struct hinic_uld_info nic_uld_info;
int hinic_open(struct net_device *netdev);
int hinic_close(struct net_device *netdev);
void hinic_set_ethtool_ops(struct net_device *netdev);
void hinicvf_set_ethtool_ops(struct net_device *netdev);
void hinic_update_num_qps(struct net_device *netdev);
int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
int hinic_force_port_disable(struct hinic_nic_dev *nic_dev);
int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable);
int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable);
void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status);
int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev);
int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev);
#define hinic_msg(level, nic_dev, msglvl, format, arg...) \
do { \
if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \
== NETREG_REGISTERED) \
nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \
format, ## arg); \
else \
nic_##level(&(nic_dev)->pdev->dev, \
format, ## arg); \
} while (0)
#define hinic_info(nic_dev, msglvl, format, arg...) \
hinic_msg(info, nic_dev, msglvl, format, ## arg)
#define hinic_warn(nic_dev, msglvl, format, arg...) \
hinic_msg(warn, nic_dev, msglvl, format, ## arg)
#define hinic_err(nic_dev, msglvl, format, arg...) \
hinic_msg(err, nic_dev, msglvl, format, ## arg)
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <net/sock.h>
#include "ossl_knl.h"
#include "hinic_hw.h"
#include "hinic_hw_mgmt.h"
#include "hinic_lld.h"
#include "hinic_nic_dev.h"
#include "hinic_dbg.h"
#include "hinic_nictool.h"
#include "hinic_qp.h"
#include "hinic_dcb.h"
#include "hinic_dbgtool_knl.h"
#define HIADM_DEV_PATH "/dev/nictool_dev"
#define HIADM_DEV_CLASS "nictool_class"
#define HIADM_DEV_NAME "nictool_dev"
#define HINIC_CMDQ_BUF_MAX_SIZE 2048U
#define MSG_MAX_IN_SIZE (2048 * 1024)
#define MSG_MAX_OUT_SIZE (2048 * 1024)
static dev_t g_dev_id = {0};
/*lint -save -e104 -e808*/
static struct class *g_nictool_class;
/*lint -restore*/
static struct cdev g_nictool_cdev;
static int g_nictool_init_flag;
static int g_nictool_ref_cnt;
typedef int (*nic_driv_module)(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
struct nic_drv_module_handle {
enum driver_cmd_type driv_cmd_name;
nic_driv_module driv_func;
};
typedef int (*hw_driv_module)(void *hwdev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
struct hw_drv_module_handle {
enum driver_cmd_type driv_cmd_name;
hw_driv_module driv_func;
};
static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in)
{
if (!buf_in)
return;
if (nt_msg->module == SEND_TO_UCODE)
hinic_free_cmd_buf(hwdev, buf_in);
else
kfree(buf_in);
}
static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg,
u32 in_size, void **buf_in)
{
void *msg_buf;
if (!in_size)
return 0;
if (nt_msg->module == SEND_TO_UCODE) {
struct hinic_cmd_buf *cmd_buf;
if (in_size > HINIC_CMDQ_BUF_MAX_SIZE) {
pr_err("Cmdq in size(%u) more than 2KB\n", in_size);
return -ENOMEM;
}
cmd_buf = hinic_alloc_cmd_buf(hwdev);
if (!cmd_buf) {
pr_err("Alloc cmdq cmd buffer failed in %s\n",
__func__);
return -ENOMEM;
}
msg_buf = cmd_buf->buf;
*buf_in = (void *)cmd_buf;
cmd_buf->size = (u16)in_size;
} else {
if (in_size > MSG_MAX_IN_SIZE) {
pr_err("In size(%u) more than 2M\n", in_size);
return -ENOMEM;
}
msg_buf = kzalloc(in_size, GFP_KERNEL);
*buf_in = msg_buf;
}
if (!(*buf_in)) {
pr_err("Alloc buffer in failed\n");
return -ENOMEM;
}
if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) {
pr_err("%s:%d: Copy from user failed\n",
__func__, __LINE__);
free_buff_in(hwdev, nt_msg, *buf_in);
return -EFAULT;
}
return 0;
}
static void free_buff_out(void *hwdev, struct msg_module *nt_msg,
void *buf_out)
{
if (!buf_out)
return;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm)
hinic_free_cmd_buf(hwdev, buf_out);
else
kfree(buf_out);
}
static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg,
u32 out_size, void **buf_out)
{
if (!out_size)
return 0;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm) {
struct hinic_cmd_buf *cmd_buf;
if (out_size > HINIC_CMDQ_BUF_MAX_SIZE) {
pr_err("Cmdq out size(%u) more than 2KB\n", out_size);
return -ENOMEM;
}
cmd_buf = hinic_alloc_cmd_buf(hwdev);
*buf_out = (void *)cmd_buf;
} else {
if (out_size > MSG_MAX_OUT_SIZE) {
pr_err("out size(%u) more than 2M\n", out_size);
return -ENOMEM;
}
*buf_out = kzalloc(out_size, GFP_KERNEL);
}
if (!(*buf_out)) {
pr_err("Alloc buffer out failed\n");
return -ENOMEM;
}
return 0;
}
static int copy_buf_out_to_user(struct msg_module *nt_msg,
u32 out_size, void *buf_out)
{
int ret = 0;
void *msg_out;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm)
msg_out = ((struct hinic_cmd_buf *)buf_out)->buf;
else
msg_out = buf_out;
if (copy_to_user(nt_msg->out_buf, msg_out, out_size))
ret = -EFAULT;
return ret;
}
static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
struct hinic_dbg_sq_info *sq_info,
u32 *msg_size);
static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
struct hinic_dbg_rq_info *rq_info,
u32 *msg_size);
static int get_tx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 q_id;
int err;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get tx info\n");
return -EFAULT;
}
if (!buf_in || !buf_out || in_size != sizeof(int))
return -EINVAL;
q_id = *((u16 *)buf_in);
err = hinic_dbg_get_sq_info(nic_dev, q_id, buf_out, out_size);
return err;
}
static int get_q_num(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 num_qp;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get queue number\n");
return -EFAULT;
}
if (!buf_out)
return -EFAULT;
num_qp = hinic_dbg_get_qp_num(nic_dev->hwdev);
if (!num_qp)
return -EFAULT;
if (*out_size != sizeof(u16)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
*((u16 *)buf_out) = num_qp;
return 0;
}
static int get_tx_wqe_info(struct hinic_nic_dev *nic_dev,
void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct hinic_wqe_info *info = buf_in;
u16 q_id = 0;
u16 idx = 0, wqebb_cnt = 1;
int err;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get tx wqe info\n");
return -EFAULT;
}
if (!info || !buf_out || in_size != sizeof(*info))
return -EFAULT;
q_id = (u16)info->q_id;
idx = (u16)info->wqe_id;
err = hinic_dbg_get_sq_wqe_info(nic_dev->hwdev, q_id,
idx, wqebb_cnt,
buf_out, (u16 *)out_size);
return err;
}
static int get_rx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 q_id;
int err;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get rx info\n");
return -EFAULT;
}
if (!buf_in || !buf_out || in_size != sizeof(int))
return -EINVAL;
q_id = *((u16 *)buf_in);
err = hinic_dbg_get_rq_info(nic_dev, q_id, buf_out, out_size);
for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
nicif_info(nic_dev, drv, nic_dev->netdev,
"qid: %u, coalesc_timer:0x%x, pending_limit: 0x%x\n",
q_id, nic_dev->rxqs[q_id].last_coalesc_timer_cfg,
nic_dev->rxqs[q_id].last_pending_limt);
}
return err;
}
static int get_rx_wqe_info(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_wqe_info *info = buf_in;
u16 q_id = 0;
u16 idx = 0, wqebb_cnt = 1;
int err;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get rx wqe info\n");
return -EFAULT;
}
if (!info || !buf_out || in_size != sizeof(*info))
return -EFAULT;
q_id = (u16)info->q_id;
idx = (u16)info->wqe_id;
err = hinic_dbg_get_rq_wqe_info(nic_dev->hwdev, q_id,
idx, wqebb_cnt,
buf_out, (u16 *)out_size);
return err;
}
static int get_inter_num(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u16 intr_num;
intr_num = hinic_intr_num(nic_dev->hwdev);
if (*out_size != sizeof(u16)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
*(u16 *)buf_out = intr_num;
return 0;
}
static void clean_nicdev_stats(struct hinic_nic_dev *nic_dev)
{
u64_stats_update_begin(&nic_dev->stats.syncp);
nic_dev->stats.netdev_tx_timeout = 0;
nic_dev->stats.tx_carrier_off_drop = 0;
nic_dev->stats.tx_invalid_qid = 0;
u64_stats_update_end(&nic_dev->stats.syncp);
}
static int clear_func_static(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
int i;
if (*out_size != 0) {
pr_err("Unexpect out buf size from user: %d, expect: 0\n",
*out_size);
return -EINVAL;
}
clean_nicdev_stats(nic_dev);
for (i = 0; i < nic_dev->max_qps; i++) {
hinic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats);
hinic_txq_clean_stats(&nic_dev->txqs[i].txq_stats);
}
return 0;
}
static int get_num_cos(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u8 *num_cos = buf_out;
if (!buf_out || !out_size)
return -EINVAL;
if (*out_size != sizeof(*num_cos)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*num_cos));
return -EFAULT;
}
return hinic_get_num_cos(nic_dev, num_cos);
}
static int get_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_cos_up_map *map = buf_out;
if (!buf_out || !out_size)
return -EINVAL;
if (*out_size != sizeof(*map)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*map));
return -EFAULT;
}
return hinic_get_cos_up_map(nic_dev, &map->num_cos, map->cos_up);
}
static int set_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_cos_up_map *map = buf_in;
if (!buf_in || !out_size || in_size != sizeof(*map))
return -EINVAL;
if (*out_size != sizeof(*map)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*map));
return -EINVAL;
}
return hinic_set_cos_up_map(nic_dev, map->cos_up);
}
static int get_rx_cqe_info(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_wqe_info *info = buf_in;
u16 q_id = 0;
u16 idx = 0;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get rx cqe info\n");
return -EFAULT;
}
if (!info || !buf_out || in_size != sizeof(*info))
return -EFAULT;
if (*out_size != sizeof(struct hinic_rq_cqe)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(struct hinic_rq_cqe));
return -EFAULT;
}
q_id = (u16)info->q_id;
idx = (u16)info->wqe_id;
if (q_id >= nic_dev->num_qps || idx >= nic_dev->rxqs[q_id].q_depth)
return -EFAULT;
memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe,
sizeof(struct hinic_rq_cqe));
return 0;
}
static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
struct hinic_dbg_sq_info *sq_info,
u32 *msg_size)
{
int err;
if (!nic_dev)
return -EINVAL;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get sq info\n");
return -EFAULT;
}
if (q_id >= nic_dev->num_qps) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Input queue id is larger than the actual queue number\n");
return -EINVAL;
}
if (*msg_size != sizeof(*sq_info)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*msg_size, sizeof(*sq_info));
return -EFAULT;
}
sq_info->q_id = q_id;
sq_info->pi = hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id);
sq_info->ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id);
sq_info->fi = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id);
sq_info->q_depth = nic_dev->txqs[q_id].q_depth;
/* pi_reverse */
sq_info->weqbb_size = HINIC_SQ_WQEBB_SIZE;
/* priority */
sq_info->ci_addr = hinic_dbg_get_sq_ci_addr(nic_dev->hwdev, q_id);
sq_info->cla_addr = hinic_dbg_get_sq_cla_addr(nic_dev->hwdev, q_id);
sq_info->slq_handle = hinic_dbg_get_sq_wq_handle(nic_dev->hwdev, q_id);
/* direct wqe */
err = hinic_dbg_get_sq_db_addr(nic_dev->hwdev,
q_id, &sq_info->db_addr.map_addr,
&sq_info->db_addr.phy_addr,
&sq_info->pg_idx);
sq_info->glb_sq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id;
return err;
}
static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
struct hinic_dbg_rq_info *rq_info,
u32 *msg_size)
{
if (!nic_dev)
return -EINVAL;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't get rq info\n");
return -EFAULT;
}
if (q_id >= nic_dev->num_qps) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Input queue id is larger than the actual queue number\n");
return -EINVAL;
}
if (*msg_size != sizeof(*rq_info)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*msg_size, sizeof(*rq_info));
return -EFAULT;
}
rq_info->q_id = q_id;
rq_info->glb_rq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id;
rq_info->hw_pi = hinic_dbg_get_rq_hw_pi(nic_dev->hwdev, q_id);
rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx &
nic_dev->rxqs[q_id].q_mask;
rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update;
rq_info->wqebb_size = HINIC_RQ_WQE_SIZE;
rq_info->q_depth = nic_dev->rxqs[q_id].q_depth;
rq_info->buf_len = nic_dev->rxqs[q_id].buf_len;
rq_info->slq_handle = hinic_dbg_get_rq_wq_handle(nic_dev->hwdev, q_id);
if (!rq_info->slq_handle) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Get rq slq handle null\n");
return -EFAULT;
}
rq_info->ci_wqe_page_addr =
hinic_slq_get_first_pageaddr(rq_info->slq_handle);
rq_info->ci_cla_tbl_addr =
hinic_dbg_get_rq_cla_addr(nic_dev->hwdev, q_id);
rq_info->msix_idx = nic_dev->rxqs[q_id].msix_entry_idx;
rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id;
return 0;
}
static int get_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_nic_loop_mode *mode = buf_out;
int err;
if (!out_size || !mode)
return -EFAULT;
if (*out_size != sizeof(*mode)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*mode));
return -EFAULT;
}
err = hinic_get_loopback_mode_ex(nic_dev->hwdev, &mode->loop_mode,
&mode->loop_ctrl);
return err;
}
static int set_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_nic_loop_mode *mode = buf_in;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't set loopback mode\n");
return -EFAULT;
}
if (!mode || !out_size || in_size != sizeof(*mode))
return -EFAULT;
if (*out_size != sizeof(*mode)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*mode));
return -EINVAL;
}
return hinic_set_loopback_mode_ex(nic_dev->hwdev, mode->loop_mode,
mode->loop_ctrl);
}
static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
enum hinic_nic_link_mode *link = buf_in;
u8 link_status;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Netdev is down, can't set link mode\n");
return -EFAULT;
}
if (!link || !out_size || in_size != sizeof(*link))
return -EFAULT;
if (*out_size != sizeof(*link)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*link));
return -EINVAL;
}
switch (*link) {
case HINIC_LINK_MODE_AUTO:
if (hinic_get_link_state(nic_dev->hwdev, &link_status))
link_status = false;
hinic_link_status_change(nic_dev, (bool)link_status);
nicif_info(nic_dev, drv, nic_dev->netdev,
"Set link mode: auto succeed, now is link %s\n",
(link_status ? "up" : "down"));
break;
case HINIC_LINK_MODE_UP:
hinic_link_status_change(nic_dev, true);
nicif_info(nic_dev, drv, nic_dev->netdev,
"Set link mode: up succeed\n");
break;
case HINIC_LINK_MODE_DOWN:
hinic_link_status_change(nic_dev, false);
nicif_info(nic_dev, drv, nic_dev->netdev,
"Set link mode: down succeed\n");
break;
default:
nicif_err(nic_dev, drv, nic_dev->netdev,
"Invalid link mode %d to set\n", *link);
return -EINVAL;
}
return 0;
}
static int set_dcb_cfg(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
union _dcb_ctl dcb_ctl = {.data = 0};
int err;
if (!buf_in || !buf_out || *out_size != sizeof(u32) ||
in_size != sizeof(u32))
return -EINVAL;
dcb_ctl.data = *((u32 *)buf_in);
err = hinic_setup_dcb_tool(nic_dev->netdev,
&dcb_ctl.dcb_data.dcb_en,
!!dcb_ctl.dcb_data.wr_flag);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to setup dcb state to %d\n",
!!dcb_ctl.dcb_data.dcb_en);
err = EINVAL;
}
dcb_ctl.dcb_data.err = (u8)err;
*((u32 *)buf_out) = (u32)dcb_ctl.data;
return 0;
}
int get_pfc_info(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
union _pfc pfc = {.data = 0};
if (!buf_in || !buf_out || *out_size != sizeof(u32) ||
in_size != sizeof(u32))
return -EINVAL;
pfc.data = *((u32 *)buf_in);
hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev,
&pfc.pfc_data.pfc_en, false);
hinic_dcbnl_get_pfc_cfg_tool(nic_dev->netdev,
&pfc.pfc_data.pfc_priority);
hinic_dcbnl_get_tc_num_tool(nic_dev->netdev,
&pfc.pfc_data.num_of_tc);
*((u32 *)buf_out) = (u32)pfc.data;
return 0;
}
int set_pfc_control(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u8 pfc_en = 0;
u8 err = 0;
if (!buf_in || !buf_out || *out_size != sizeof(u8) ||
in_size != sizeof(u8))
return -EINVAL;
pfc_en = *((u8 *)buf_in);
if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Need to enable dcb first\n");
err = 0xff;
goto exit;
}
hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev, &pfc_en, true);
err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to set pfc to %s\n",
pfc_en ? "enable" : "disable");
}
exit:
*((u8 *)buf_out) = (u8)err;
return 0;
}
int set_ets(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct _ets ets = {0};
u8 err = 0;
u8 i;
u8 support_tc = nic_dev->max_cos;
if (!buf_in || !buf_out || *out_size != sizeof(u8) ||
in_size != sizeof(struct _ets))
return -EINVAL;
memcpy(&ets, buf_in, sizeof(struct _ets));
if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Need to enable dcb first\n");
err = 0xff;
goto exit;
}
if (ets.flag_com.ets_flag.flag_ets_enable) {
hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets.ets_en, true);
if (!ets.ets_en)
goto exit;
}
if (!(test_bit(HINIC_ETS_ENABLE, &nic_dev->flags))) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Need to enable ets first\n");
err = 0xff;
goto exit;
}
if (ets.flag_com.ets_flag.flag_ets_cos) {
for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
if (ets.tc[i] >= HINIC_DCB_TC_MAX) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"ETS tc id %d out of range\n",
ets.tc[i]);
err = 0xFF;
goto exit;
}
}
hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets.tc, true);
}
if (ets.flag_com.ets_flag.flag_ets_percent) {
for (i = support_tc; i < HINIC_DCB_TC_MAX; i++) {
if (ets.ets_percent[i]) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"ETS setting out of range\n");
break;
}
}
hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev,
ets.ets_percent, true);
}
if (ets.flag_com.ets_flag.flag_ets_strict)
hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev,
&ets.strict, true);
err = hinic_dcbnl_set_ets_tool(nic_dev->netdev);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to set ets [%d]\n", err);
}
exit:
*((u8 *)buf_out) = err;
return 0;
}
int get_support_up(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u8 *up_num = buf_out;
u8 support_up = 0;
u8 i;
u8 up_valid_bitmap = nic_dev->up_valid_bitmap;
if (!buf_in || !buf_out || !out_size)
return -EINVAL;
if (*out_size != sizeof(*up_num)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*up_num));
return -EFAULT;
}
for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
if (up_valid_bitmap & BIT(i))
support_up++;
}
*up_num = support_up;
return 0;
}
int get_support_tc(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u8 *tc_num = buf_out;
if (!buf_in || !buf_out || !out_size)
return -EINVAL;
if (*out_size != sizeof(*tc_num)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*tc_num));
return -EFAULT;
}
hinic_dcbnl_get_tc_num_tool(nic_dev->netdev, tc_num);
return 0;
}
int get_ets_info(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct _ets *ets = buf_out;
if (!buf_in || !buf_out || *out_size != sizeof(*ets))
return -EINVAL;
hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev,
ets->ets_percent, false);
hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets->tc, false);
hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets->ets_en, false);
hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev, &ets->strict, false);
ets->err = 0;
return 0;
}
int set_pfc_priority(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u8 pfc_prority = 0;
u8 err = 0;
if (!buf_in || !buf_out || *out_size != sizeof(u8) ||
in_size != sizeof(u8))
return -EINVAL;
pfc_prority = *((u8 *)buf_in);
if (!((test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) &&
nic_dev->tmp_dcb_cfg.pfc_state)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Need to enable pfc first\n");
err = 0xff;
goto exit;
}
hinic_dcbnl_set_pfc_cfg_tool(nic_dev->netdev, pfc_prority);
err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to set pfc to %x priority\n",
pfc_prority);
}
exit:
*((u8 *)buf_out) = (u8)err;
return 0;
}
static int set_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u32 pf_bw_limit = 0;
int err;
if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"To set VF bandwidth rate, please use ip link cmd\n");
return -EINVAL;
}
if (!buf_in || !buf_out || in_size != sizeof(u32) ||
*out_size != sizeof(u8))
return -EINVAL;
pf_bw_limit = *((u32 *)buf_in);
err = hinic_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit);
if (err) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to set pf bandwidth limit to %d%%\n",
pf_bw_limit);
if (err < 0)
return err;
}
*((u8 *)buf_out) = (u8)err;
return 0;
}
static int get_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u32 pf_bw_limit = 0;
int err;
if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"To get VF bandwidth rate, please use ip link cmd\n");
return -EINVAL;
}
if (!buf_out || *out_size != sizeof(u32)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(u32));
return -EFAULT;
}
err = hinic_dbg_get_pf_bw_limit(nic_dev->hwdev, &pf_bw_limit);
if (err)
return err;
*((u32 *)buf_out) = pf_bw_limit;
return 0;
}
static int get_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_nic_poll_weight *weight_info = buf_out;
if (!buf_out || *out_size != sizeof(*weight_info)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(*weight_info));
return -EFAULT;
}
weight_info->poll_weight = nic_dev->poll_weight;
return 0;
}
static int set_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_nic_poll_weight *weight_info = buf_in;
if (!buf_in || in_size != sizeof(*weight_info) ||
*out_size != sizeof(u32)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect in buf size: %u or out buf size: %d from user, expect: %lu\n",
in_size, *out_size, sizeof(*weight_info));
return -EFAULT;
}
nic_dev->poll_weight = weight_info->poll_weight;
return 0;
}
static int get_homologue(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_homologues *homo = buf_out;
if (!buf_out || *out_size != sizeof(*homo)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*homo));
return -EFAULT;
}
if (test_bit(HINIC_SAME_RXTX, &nic_dev->flags))
homo->homo_state = HINIC_HOMOLOGUES_ON;
else
homo->homo_state = HINIC_HOMOLOGUES_OFF;
return 0;
}
static int set_homologue(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_homologues *homo = buf_in;
if (!buf_in || in_size != sizeof(*homo) ||
*out_size != sizeof(*homo)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect in buf size: %d or out buf size: %d from user, expect: %lu\n",
in_size, *out_size, sizeof(*homo));
return -EFAULT;
}
if (homo->homo_state == HINIC_HOMOLOGUES_ON) {
set_bit(HINIC_SAME_RXTX, &nic_dev->flags);
} else if (homo->homo_state == HINIC_HOMOLOGUES_OFF) {
clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
} else {
pr_err("Invalid parameters\n");
return -EFAULT;
}
return 0;
}
static int get_sset_count(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
u32 count;
if (!buf_in || !buf_out || in_size != sizeof(u32) ||
*out_size != sizeof(u32)) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Invalid parameters\n");
return -EINVAL;
}
switch (*((u32 *)buf_in)) {
case HINIC_SHOW_SSET_IO_STATS:
count = hinic_get_io_stats_size(nic_dev);
break;
default:
count = 0;
break;
}
*((u32 *)buf_out) = count;
return 0;
}
static int get_sset_stats(struct hinic_nic_dev *nic_dev, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct hinic_show_item *items = buf_out;
u32 sset, count, size;
int err;
if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out)
return -EINVAL;
size = sizeof(u32);
err = get_sset_count(nic_dev, buf_in, in_size, &count, &size);
if (err)
return -EINVAL;
if (count * sizeof(*items) != *out_size) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, count * sizeof(*items));
return -EINVAL;
}
sset = *((u32 *)buf_in);
switch (sset) {
case HINIC_SHOW_SSET_IO_STATS:
hinic_get_io_stats(nic_dev, items);
break;
default:
nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %d to get stats\n",
sset);
err = -EINVAL;
break;
}
return err;
}
static int get_func_type(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 func_typ;
func_typ = hinic_func_type(hwdev);
if (!buf_out || *out_size != sizeof(u16)) {
pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
*(u16 *)buf_out = func_typ;
return 0;
}
static int get_func_id(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 func_id;
if (!buf_out || *out_size != sizeof(u16)) {
pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
func_id = hinic_global_func_id_hw(hwdev);
*(u16 *)buf_out = func_id;
return 0;
}
static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
int offset = 0;
struct chip_fault_stats *fault_info;
if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) ||
in_size != sizeof(*fault_info)) {
pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*fault_info));
return -EFAULT;
}
fault_info = (struct chip_fault_stats *)buf_in;
offset = fault_info->offset;
fault_info = (struct chip_fault_stats *)buf_out;
hinic_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset);
return 0;
}
static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
return hinic_dbg_get_hw_stats(hwdev, buf_out, (u16 *)out_size);
}
static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
return hinic_dbg_clear_hw_stats(hwdev, out_size);
}
static int get_drv_version(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct drv_version_info *ver_info;
char ver_str[MAX_VER_INFO_LEN] = {0};
int err;
if (*out_size != sizeof(*ver_info)) {
pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(*ver_info));
return -EFAULT;
}
err = snprintf(ver_str, sizeof(ver_str),
"%s [compiled with the kernel]", HINIC_DRV_VERSION);
if (err <= 0 || err >= MAX_VER_INFO_LEN) {
pr_err("Failed snprintf driver version, function return(%d) and dest_len(%d)\n",
err, MAX_VER_INFO_LEN);
return -EFAULT;
}
ver_info = (struct drv_version_info *)buf_out;
memcpy(ver_info->ver, ver_str, sizeof(ver_str));
return 0;
}
static int get_self_test(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
return 0;
}
static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
return 0;
}
static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
if (!buf_in || !buf_out || in_size != sizeof(struct card_info) ||
*out_size != sizeof(struct card_info)) {
pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(struct card_info));
return -EFAULT;
}
hinic_get_card_info(hwdev, buf_out);
return 0;
}
static int get_device_id(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 dev_id;
int err;
if (!buf_out || *out_size != sizeof(u16)) {
pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
err = hinic_get_device_id(hwdev, &dev_id);
if (err)
return err;
*((u32 *)buf_out) = dev_id;
return 0;
}
static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
bool in_host;
if (!buf_out || (*out_size != sizeof(u8)))
return -EINVAL;
in_host = hinic_is_in_host();
if (in_host)
*((u8 *)buf_out) = 0;
else
*((u8 *)buf_out) = 1;
return 0;
}
static int get_pf_id(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct hinic_pf_info *pf_info;
u32 port_id = 0;
int err;
if (!buf_out || (*out_size != sizeof(*pf_info)) ||
!buf_in || in_size != sizeof(u32))
return -EINVAL;
port_id = *((u32 *)buf_in);
pf_info = (struct hinic_pf_info *)buf_out;
err = hinic_get_pf_id(hwdev, port_id, &pf_info->pf_id,
&pf_info->isvalid);
if (err)
return err;
return 0;
}
static int __get_card_usr_api_chain_mem(int card_idx)
{
unsigned char *tmp;
int i;
mutex_lock(&g_addr_lock);
card_id = card_idx;
if (!g_card_vir_addr[card_idx]) {
g_card_vir_addr[card_idx] =
(void *)__get_free_pages(GFP_KERNEL,
DBGTOOL_PAGE_ORDER);
if (!g_card_vir_addr[card_idx]) {
pr_err("Alloc api chain memory fail for card %d\n",
card_idx);
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
memset(g_card_vir_addr[card_idx], 0,
PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
g_card_phy_addr[card_idx] =
virt_to_phys(g_card_vir_addr[card_idx]);
if (!g_card_phy_addr[card_idx]) {
pr_err("phy addr for card %d is 0\n", card_idx);
free_pages((unsigned long)g_card_vir_addr[card_idx],
DBGTOOL_PAGE_ORDER);
g_card_vir_addr[card_idx] = NULL;
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
tmp = g_card_vir_addr[card_idx];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
SetPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
}
mutex_unlock(&g_addr_lock);
return 0;
}
static int get_pf_dev_info(char *dev_name, struct msg_module *nt_msg)
{
struct pf_dev_info dev_info[16] = { {0} };
struct card_node *card_info = NULL;
int i;
int err;
if (nt_msg->len_info.out_buff_len != sizeof(dev_info) ||
nt_msg->len_info.in_buff_len != sizeof(dev_info)) {
pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n",
nt_msg->len_info.out_buff_len,
nt_msg->len_info.in_buff_len, (sizeof(dev_info) * 16));
return -EINVAL;
}
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ))
break;
}
if (i == MAX_CARD_NUM || !card_info) {
pr_err("Can't find this card %s\n", dev_name);
return -EFAULT;
}
err = __get_card_usr_api_chain_mem(i);
if (err) {
pr_err("Faile to get api chain memory for userspace %s\n",
dev_name);
return -EFAULT;
}
chipif_get_all_pf_dev_info(dev_info, i,
card_info->func_handle_array);
/* Copy the dev_info to user mode */
if (copy_to_user(nt_msg->out_buf, dev_info, sizeof(dev_info))) {
pr_err("Copy dev_info to user fail\n");
return -EFAULT;
}
return 0;
}
static int knl_free_mem(char *dev_name, struct msg_module *nt_msg)
{
struct card_node *card_info = NULL;
int i;
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ))
break;
}
if (i == MAX_CARD_NUM || !card_info) {
pr_err("Can't find this card %s\n", dev_name);
return -EFAULT;
}
dbgtool_knl_free_mem(i);
return 0;
}
extern void hinic_get_card_func_info_by_card_name(const char *chip_name,
struct hinic_card_func_info
*card_func);
static int get_card_func_info(char *dev_name, struct msg_module *nt_msg)
{
struct hinic_card_func_info card_func_info = {0};
int id, err;
if (nt_msg->len_info.out_buff_len != sizeof(card_func_info) ||
nt_msg->len_info.in_buff_len != sizeof(card_func_info)) {
pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n",
nt_msg->len_info.out_buff_len,
nt_msg->len_info.in_buff_len, sizeof(card_func_info));
return -EINVAL;
}
err = memcmp(dev_name, HINIC_CHIP_NAME, strlen(HINIC_CHIP_NAME));
if (err) {
pr_err("Invalid chip name %s\n", dev_name);
return err;
}
err = sscanf(dev_name, HINIC_CHIP_NAME "%d", &id);
if (err <= 0) {
pr_err("Failed to get hinic id\n");
return err;
}
if (id >= MAX_CARD_NUM) {
pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1);
return -EINVAL;
}
hinic_get_card_func_info_by_card_name(dev_name, &card_func_info);
if (!card_func_info.num_pf) {
pr_err("None function found for %s\n", dev_name);
return -EFAULT;
}
err = __get_card_usr_api_chain_mem(id);
if (err) {
pr_err("Faile to get api chain memory for userspace %s\n",
dev_name);
return -EFAULT;
}
card_func_info.usr_api_phy_addr = g_card_phy_addr[id];
/* Copy the dev_info to user mode */
if (copy_to_user(nt_msg->out_buf, &card_func_info,
sizeof(card_func_info))) {
pr_err("Copy dev_info to user fail\n");
return -EFAULT;
}
return 0;
}
#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30
static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u32 loop_cnt = 0;
if (*out_size != 0) {
pr_err("Unexpect out buf size from user: %d, expect: 0\n",
*out_size);
return -EINVAL;
}
while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) {
if (!hinic_get_mgmt_channel_status(hwdev))
return 0;
msleep(1000);
loop_cnt++;
}
if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT)
return -ETIMEDOUT;
return 0;
}
struct nic_drv_module_handle nic_driv_module_cmd_handle[] = {
{TX_INFO, get_tx_info},
{Q_NUM, get_q_num},
{TX_WQE_INFO, get_tx_wqe_info},
{RX_INFO, get_rx_info},
{RX_WQE_INFO, get_rx_wqe_info},
{RX_CQE_INFO, get_rx_cqe_info},
{GET_INTER_NUM, get_inter_num},
{CLEAR_FUNC_STASTIC, clear_func_static},
{GET_NUM_COS, get_num_cos},
{GET_COS_UP_MAP, get_dcb_cos_up_map},
{SET_COS_UP_MAP, set_dcb_cos_up_map},
{GET_LOOPBACK_MODE, get_loopback_mode},
{SET_LOOPBACK_MODE, set_loopback_mode},
{SET_LINK_MODE, set_link_mode},
{SET_PF_BW_LIMIT, set_pf_bw_limit},
{GET_PF_BW_LIMIT, get_pf_bw_limit},
{GET_POLL_WEIGHT, get_poll_weight},
{SET_POLL_WEIGHT, set_poll_weight},
{GET_HOMOLOGUE, get_homologue},
{SET_HOMOLOGUE, set_homologue},
{GET_SSET_COUNT, get_sset_count},
{GET_SSET_ITEMS, get_sset_stats},
{SET_PFC_CONTROL, set_pfc_control},
{SET_ETS, set_ets},
{GET_ETS_INFO, get_ets_info},
{SET_PFC_PRIORITY, set_pfc_priority},
{SET_DCB_CFG, set_dcb_cfg},
{GET_PFC_INFO, get_pfc_info},
{GET_SUPPORT_UP, get_support_up},
{GET_SUPPORT_TC, get_support_tc},
};
struct hw_drv_module_handle hw_driv_module_cmd_handle[] = {
{FUNC_TYPE, get_func_type},
{GET_FUNC_IDX, get_func_id},
{GET_DRV_VERSION, get_drv_version},
{GET_HW_STATS, get_hw_stats},
{CLEAR_HW_STATS, clear_hw_stats},
{GET_SELF_TEST_RES, get_self_test},
{GET_CHIP_FAULT_STATS, get_chip_faults_stats},
{GET_CHIP_ID, get_chip_id_test},
{GET_SINGLE_CARD_INFO, get_single_card_info},
{GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status},
{GET_DEVICE_ID, get_device_id},
{IS_DRV_IN_VM, is_driver_in_vm},
{GET_PF_ID, get_pf_id},
};
static int send_to_nic_driver(struct hinic_nic_dev *nic_dev,
u32 cmd, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
int index, num_cmds = sizeof(nic_driv_module_cmd_handle) /
sizeof(nic_driv_module_cmd_handle[0]);
enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd;
int err = 0;
mutex_lock(&nic_dev->nic_mutex);
for (index = 0; index < num_cmds; index++) {
if (cmd_type ==
nic_driv_module_cmd_handle[index].driv_cmd_name) {
err = nic_driv_module_cmd_handle[index].driv_func
(nic_dev, buf_in,
in_size, buf_out, out_size);
break;
}
}
mutex_unlock(&nic_dev->nic_mutex);
if (index == num_cmds)
return -EINVAL;
return err;
}
static int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
int index, num_cmds = sizeof(hw_driv_module_cmd_handle) /
sizeof(hw_driv_module_cmd_handle[0]);
enum driver_cmd_type cmd_type =
(enum driver_cmd_type)(nt_msg->msg_formate);
int err = 0;
for (index = 0; index < num_cmds; index++) {
if (cmd_type ==
hw_driv_module_cmd_handle[index].driv_cmd_name) {
err = hw_driv_module_cmd_handle[index].driv_func
(hwdev, buf_in,
in_size, buf_out, out_size);
break;
}
}
if (index == num_cmds)
return -EINVAL;
return err;
}
static int send_to_ucode(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
int ret = 0;
if (nt_msg->ucode_cmd.ucode_db.ucode_imm) {
ret = hinic_cmdq_direct_resp
(hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
nt_msg->ucode_cmd.ucode_db.comm_mod_type,
nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
buf_in, buf_out, 0);
if (ret)
pr_err("Send direct cmdq err: %d\n", ret);
} else {
ret = hinic_cmdq_detail_resp
(hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
nt_msg->ucode_cmd.ucode_db.comm_mod_type,
nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
buf_in, buf_out, 0);
if (ret)
pr_err("Send detail cmdq err: %d\n", ret);
}
return ret;
}
enum api_csr_op_width {
OP_WIDTH_4B,
OP_WIDTH_8B,
OP_WIDTH_UNKNOWN,
};
static int api_csr_read(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size,
enum api_csr_op_width width)
{
struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in;
u32 op_bytes = (width == OP_WIDTH_4B ? sizeof(u32) : sizeof(u64));
int ret = 0;
u32 rd_len, rd_addr, rd_cnt;
u32 offset = 0;
u8 node_id;
u32 i;
if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) ||
*out_size != up_log_msg->rd_len || width >= OP_WIDTH_UNKNOWN)
return -EINVAL;
rd_len = up_log_msg->rd_len;
rd_addr = up_log_msg->addr;
node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
rd_cnt = rd_len / op_bytes;
if (rd_len % op_bytes)
rd_cnt++;
for (i = 0; i < rd_cnt; i++) {
if (width == OP_WIDTH_4B)
ret = hinic_api_csr_rd32(hwdev, node_id,
rd_addr + offset,
(u32 *)(((u8 *)buf_out) +
offset));
else
ret = hinic_api_csr_rd64(hwdev, node_id,
rd_addr + offset,
(u64 *)(((u8 *)buf_out) +
offset));
if (ret) {
pr_err("Read csr failed, err: %d, node_id: %d, csr addr: 0x%08x\n",
ret, node_id, rd_addr + offset);
return ret;
}
offset += op_bytes;
}
*out_size = rd_len;
return ret;
}
static int api_csr_write(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size,
enum api_csr_op_width width)
{
struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
u32 op_bytes = (width == OP_WIDTH_4B ? sizeof(u32) : sizeof(u64));
int ret = 0;
u32 rd_len, rd_addr, rd_cnt;
u32 offset = 0;
u8 node_id;
u32 i;
u8 *data = NULL;
if (!buf_in || in_size != sizeof(*csr_write_msg) ||
width >= OP_WIDTH_UNKNOWN)
return -EINVAL;
rd_len = csr_write_msg->rd_len;
rd_addr = csr_write_msg->addr;
node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
if (rd_len % op_bytes) {
pr_err("Csr length must be a multiple of %d\n", op_bytes);
return -EFAULT;
}
rd_cnt = rd_len / op_bytes;
data = kzalloc(rd_len, GFP_KERNEL);
if (!data) {
pr_err("No more memory\n");
return -EFAULT;
}
if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) {
pr_err("Copy information from user failed\n");
kfree(data);
return -EFAULT;
}
for (i = 0; i < rd_cnt; i++) {
if (width == OP_WIDTH_4B)
ret = hinic_api_csr_wr32(hwdev, node_id,
rd_addr + offset,
*((u32 *)(data + offset)));
else
ret = hinic_api_csr_wr64(hwdev, node_id,
rd_addr + offset,
*((u64 *)(data + offset)));
if (ret) {
pr_err("Write csr failed, ret: %d, node_id: %d, csr addr: 0x%08x\n",
ret, rd_addr + offset, node_id);
kfree(data);
return ret;
}
offset += op_bytes;
}
*out_size = 0;
kfree(data);
return ret;
}
static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd)
{
if (mod == HINIC_MOD_L2NIC && cmd == HINIC_PORT_CMD_UPDATE_FW)
return UP_UPDATEFW_TIME_OUT_VAL;
else
return UP_COMP_TIME_OUT_VAL;
}
static int check_useparam_valid(struct msg_module *nt_msg, void *buf_in)
{
struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
u32 rd_len = csr_write_msg->rd_len;
if (rd_len > TOOL_COUNTER_MAX_LEN) {
pr_err("Csr read or write len is invalid\n");
return -EINVAL;
}
return 0;
}
static int send_to_up(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
int ret = 0;
if (nt_msg->up_cmd.up_db.up_api_type == API_CMD ||
nt_msg->up_cmd.up_db.up_api_type == API_CLP) {
enum hinic_mod_type mod;
u8 cmd;
u32 timeout;
mod = (enum hinic_mod_type)nt_msg->up_cmd.up_db.comm_mod_type;
cmd = nt_msg->up_cmd.up_db.chipif_cmd;
timeout = get_up_timeout_val(mod, cmd);
if (nt_msg->up_cmd.up_db.up_api_type == API_CMD)
ret = hinic_msg_to_mgmt_sync(hwdev, mod, cmd,
buf_in, (u16)in_size,
buf_out, (u16 *)out_size,
timeout);
else
ret = hinic_clp_to_mgmt(hwdev, mod, cmd,
buf_in, (u16)in_size,
buf_out, (u16 *)out_size);
if (ret) {
pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n",
mod, cmd);
return ret;
}
} else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) {
if (check_useparam_valid(nt_msg, buf_in))
return -EINVAL;
switch (nt_msg->up_cmd.up_db.chipif_cmd) {
case API_CSR_WRITE:
ret = api_csr_write(hwdev, nt_msg, buf_in, in_size,
buf_out, out_size, OP_WIDTH_4B);
break;
case API_CSR_READ:
ret = api_csr_read(hwdev, nt_msg, buf_in, in_size,
buf_out, out_size, OP_WIDTH_4B);
break;
case API_CSR_WRITE_8B:
ret = api_csr_write(hwdev, nt_msg, buf_in, in_size,
buf_out, out_size, OP_WIDTH_8B);
break;
case API_CSR_READ_8B:
ret = api_csr_read(hwdev, nt_msg, buf_in, in_size,
buf_out, out_size, OP_WIDTH_8B);
break;
default:
pr_err("Unsupported chipif cmd: %d\n",
nt_msg->up_cmd.up_db.chipif_cmd);
ret = -EINVAL;
break;
}
}
return ret;
}
static int sm_rd32(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out)
{
u32 val1;
int ret;
ret = hinic_sm_ctr_rd32(hwdev, node, instance, id, &val1);
if (ret) {
pr_err("Get sm ctr information (32 bits)failed\n");
val1 = 0xffffffff;
}
buf_out->val1 = val1;
return ret;
}
static int sm_rd64_pair(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out)
{
u64 val1 = 0, val2 = 0;
int ret;
ret = hinic_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2);
if (ret) {
pr_err("Get sm ctr information (64 bits pair)failed\n");
val1 = 0xffffffff;
}
buf_out->val1 = val1;
buf_out->val2 = val2;
return ret;
}
static int sm_rd64(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out)
{
u64 val1;
int ret;
ret = hinic_sm_ctr_rd64(hwdev, node, instance, id, &val1);
if (ret) {
pr_err("Get sm ctr information (64 bits)failed\n");
val1 = 0xffffffff;
}
buf_out->val1 = val1;
return ret;
}
typedef int (*sm_module)(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out);
struct sm_module_handle {
enum sm_cmd_type sm_cmd_name;
sm_module sm_func;
};
struct sm_module_handle sm_module_cmd_handle[] = {
{SM_CTR_RD32, sm_rd32},
{SM_CTR_RD64_PAIR, sm_rd64_pair},
{SM_CTR_RD64, sm_rd64}
};
static int send_to_sm(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
struct sm_in_st *sm_in = buf_in;
struct sm_out_st *sm_out = buf_out;
u32 msg_formate = nt_msg->msg_formate;
int index, num_cmds = sizeof(sm_module_cmd_handle) /
sizeof(sm_module_cmd_handle[0]);
int ret = 0;
if (!buf_in || !buf_out || in_size != sizeof(*sm_in) ||
*out_size != sizeof(*sm_out))
return -EINVAL;
for (index = 0; index < num_cmds; index++) {
if (msg_formate == sm_module_cmd_handle[index].sm_cmd_name)
ret = sm_module_cmd_handle[index].sm_func(hwdev,
(u32)sm_in->id,
(u8)sm_in->instance,
(u8)sm_in->node, sm_out);
}
if (ret)
pr_err("Get sm information fail\n");
*out_size = sizeof(struct sm_out_st);
return ret;
}
static bool is_hwdev_cmd_support(unsigned int mod,
char *ifname, u32 up_api_type)
{
void *hwdev;
hwdev = hinic_get_hwdev_by_ifname(ifname);
if (!hwdev) {
pr_err("Can not get the device %s correctly\n", ifname);
return false;
}
switch (mod) {
case SEND_TO_UP:
case SEND_TO_SM:
if (FUNC_SUPPORT_MGMT(hwdev)) {
if (up_api_type == API_CLP) {
if (!hinic_is_hwdev_mod_inited
(hwdev, HINIC_HWDEV_CLP_INITED)) {
pr_err("CLP have not initialized\n");
return false;
}
} else if (!hinic_is_hwdev_mod_inited
(hwdev, HINIC_HWDEV_MGMT_INITED)) {
pr_err("MGMT have not initialized\n");
return false;
}
} else if (!hinic_is_hwdev_mod_inited
(hwdev, HINIC_HWDEV_MBOX_INITED)) {
pr_err("MBOX have not initialized\n");
return false;
}
if (mod == SEND_TO_SM &&
((hinic_func_type(hwdev) == TYPE_VF) ||
(!hinic_is_hwdev_mod_inited(hwdev,
HINIC_HWDEV_MGMT_INITED)))) {
pr_err("Current function do not support this cmd\n");
return false;
}
break;
case SEND_TO_UCODE:
if (!hinic_is_hwdev_mod_inited(hwdev,
HINIC_HWDEV_CMDQ_INITED)) {
pr_err("CMDQ have not initialized\n");
return false;
}
break;
default:
return false;
}
return true;
}
static bool nictool_k_is_cmd_support(unsigned int mod,
char *ifname, u32 up_api_type)
{
enum hinic_init_state init_state =
hinic_get_init_state_by_ifname(ifname);
bool support = true;
if (init_state == HINIC_INIT_STATE_NONE)
return false;
if (mod == SEND_TO_NIC_DRIVER) {
if (init_state < HINIC_INIT_STATE_NIC_INITED) {
pr_err("NIC driver have not initialized\n");
return false;
}
} else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) {
return is_hwdev_cmd_support(mod, ifname, up_api_type);
} else if ((mod >= HINICADM_OVS_DRIVER &&
mod <= HINICADM_FCOE_DRIVER) ||
mod == SEND_TO_HW_DRIVER) {
if (init_state < HINIC_INIT_STATE_HWDEV_INITED) {
pr_err("Hwdev have not initialized\n");
return false;
}
} else {
pr_err("Unsupport mod %d\n", mod);
support = false;
}
return support;
}
static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size,
void **buf_in, u32 out_size, void **buf_out)
{
int ret;
ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in);
if (ret) {
pr_err("Alloc tool cmd buff in failed\n");
return ret;
}
ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out);
if (ret) {
pr_err("Alloc tool cmd buff out failed\n");
goto out_free_buf_in;
}
return 0;
out_free_buf_in:
free_buff_in(hwdev, nt_msg, *buf_in);
return ret;
}
static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg,
void *buf_in, void *buf_out)
{
free_buff_out(hwdev, nt_msg, buf_out);
free_buff_in(hwdev, nt_msg, buf_in);
}
static int get_self_test_cmd(struct msg_module *nt_msg)
{
int ret;
u32 res = 0;
ret = hinic_get_self_test_result(nt_msg->device_name, &res);
if (ret) {
pr_err("Get self test result failed\n");
return -EFAULT;
}
ret = copy_buf_out_to_user(nt_msg, sizeof(res), &res);
if (ret)
pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__);
return ret;
}
static int get_all_chip_id_cmd(struct msg_module *nt_msg)
{
struct nic_card_id card_id;
memset(&card_id, 0, sizeof(card_id));
hinic_get_all_chip_id((void *)&card_id);
if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) {
pr_err("Copy chip id to user failed\n");
return -EFAULT;
}
return 0;
}
int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
return send_to_nic_driver(uld_dev, cmd, buf_in,
in_size, buf_out, out_size);
}
static void *__get_dev_support_nic_cmd(struct msg_module *nt_msg,
enum hinic_service_type type)
{
void *uld_dev = NULL;
/* set/get qos must use chip_name(hinic0) */
switch (nt_msg->msg_formate) {
case GET_COS_UP_MAP:
case SET_COS_UP_MAP:
case GET_NUM_COS:
uld_dev = hinic_get_uld_by_chip_name(nt_msg->device_name, type);
if (!uld_dev)
pr_err("Get/set cos_up must use chip_name(hinic0)\n");
return uld_dev;
default:
break;
}
uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type);
if (!uld_dev)
pr_err("Can not get the uld dev correctly: %s, nic driver may be not register\n",
nt_msg->device_name);
return uld_dev;
}
static void *get_support_uld_dev(struct msg_module *nt_msg,
enum hinic_service_type type)
{
char *service_name[SERVICE_T_MAX] = {"NIC", "OVS", "ROCE", "TOE",
"IWARP", "FC", "FCOE"};
void *hwdev = NULL;
void *uld_dev = NULL;
switch (nt_msg->module) {
case SEND_TO_NIC_DRIVER:
hwdev = hinic_get_hwdev_by_ifname(nt_msg->device_name);
if (!hinic_support_nic(hwdev, NULL)) {
pr_err("Current function don't support NIC\n");
return NULL;
}
return __get_dev_support_nic_cmd(nt_msg, type);
default:
break;
}
uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type);
if (!uld_dev)
pr_err("Can not get the uld dev correctly: %s, %s driver may be not register\n",
nt_msg->device_name, service_name[type]);
return uld_dev;
}
static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
enum hinic_service_type type;
int ret = 0;
type = nt_msg->module - SEND_TO_SM;
*out_size = sizeof(struct drv_version_info);
if (!g_uld_info[type].ioctl)
return ret;
ret = g_uld_info[type].ioctl(NULL, nt_msg->msg_formate, buf_in, in_size,
buf_out, out_size);
if (ret)
return ret;
if (copy_to_user(nt_msg->out_buf, buf_out, *out_size))
return -EFAULT;
return ret;
}
int send_to_service_driver(struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
enum hinic_service_type type;
void *uld_dev;
int ret = -EINVAL;
if (nt_msg->module == SEND_TO_NIC_DRIVER)
type = SERVICE_T_NIC;
else
type = nt_msg->module - SEND_TO_SM;
if (type < SERVICE_T_MAX) {
uld_dev = get_support_uld_dev(nt_msg, type);
if (!uld_dev)
return -EINVAL;
if (g_uld_info[type].ioctl)
ret = g_uld_info[type].ioctl(uld_dev,
nt_msg->msg_formate,
buf_in, in_size, buf_out,
out_size);
} else {
pr_err("Ioctl input module id: %d is incorrectly\n",
nt_msg->module);
}
return ret;
}
static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
int ret;
switch (nt_msg->module) {
case SEND_TO_HW_DRIVER:
ret = send_to_hw_driver(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_UP:
ret = send_to_up(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_UCODE:
ret = send_to_ucode(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_SM:
ret = send_to_sm(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
default:
ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out,
out_size);
break;
}
return ret;
}
static int get_nictool_drv_cap(struct msg_module *nt_msg)
{
int ret;
u64 support = 0;
if (nt_msg->len_info.out_buff_len != sizeof(u64)) {
pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
nt_msg->len_info.out_buff_len, sizeof(u64));
return -EINVAL;
}
support |= NICTOOL_SUPPORT_API_CSR;
ret = copy_buf_out_to_user(nt_msg, sizeof(support), &support);
if (ret)
pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__);
return ret;
}
static bool hinic_is_special_handling_cmd(struct msg_module *nt_msg, int *ret)
{
bool handled = true;
if (nt_msg->module != SEND_TO_HW_DRIVER)
return false;
switch (nt_msg->msg_formate) {
case GET_SELF_TEST_RES:
*ret = get_self_test_cmd(nt_msg);
break;
case GET_CHIP_ID:
*ret = get_all_chip_id_cmd(nt_msg);
break;
case GET_PF_DEV_INFO:
*ret = get_pf_dev_info(nt_msg->device_name, nt_msg);
break;
case CMD_FREE_MEM:
*ret = knl_free_mem(nt_msg->device_name, nt_msg);
break;
case GET_CHIP_INFO:
*ret = get_card_func_info(nt_msg->device_name, nt_msg);
break;
case GET_NICTOOL_CAP:
*ret = get_nictool_drv_cap(nt_msg);
break;
default:
handled = false;
break;
}
return handled;
}
static long nictool_k_unlocked_ioctl(struct file *pfile,
unsigned int cmd, unsigned long arg)
{
void *hwdev;
struct msg_module nt_msg;
void *buf_out = NULL;
void *buf_in = NULL;
u32 out_size_expect = 0;
u32 out_size = 0;
u32 in_size = 0;
unsigned int cmd_raw = 0;
int ret = 0;
memset(&nt_msg, 0, sizeof(nt_msg));
if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) {
pr_err("Copy information from user failed\n");
return -EFAULT;
}
/* end with '\0' */
nt_msg.device_name[IFNAMSIZ - 1] = '\0';
cmd_raw = nt_msg.module;
out_size_expect = nt_msg.len_info.out_buff_len;
in_size = nt_msg.len_info.in_buff_len;
hinic_tool_cnt_inc();
if (hinic_is_special_handling_cmd(&nt_msg, &ret))
goto out_free_lock;
if (cmd_raw == HINICADM_FC_DRIVER &&
nt_msg.msg_formate == GET_CHIP_ID)
get_fc_devname(nt_msg.device_name);
if (!nictool_k_is_cmd_support(cmd_raw, nt_msg.device_name,
nt_msg.up_cmd.up_db.up_api_type)) {
ret = -EFAULT;
goto out_free_lock;
}
/* get the netdevice */
hwdev = hinic_get_hwdev_by_ifname(nt_msg.device_name);
if (!hwdev) {
pr_err("Can not get the device %s correctly\n",
nt_msg.device_name);
ret = -ENODEV;
goto out_free_lock;
}
ret = alloc_tmp_buf(hwdev, &nt_msg, in_size,
&buf_in, out_size_expect, &buf_out);
if (ret) {
pr_err("Alloc tmp buff failed\n");
goto out_free_lock;
}
out_size = out_size_expect;
if (nt_msg.msg_formate == GET_DRV_VERSION &&
(cmd_raw == HINICADM_FC_DRIVER || cmd_raw == HINICADM_TOE_DRIVER)) {
ret = get_service_drv_version(hwdev, &nt_msg, buf_in,
in_size, buf_out, &out_size);
goto out_free_buf;
}
ret = nictool_exec_cmd(hwdev, &nt_msg, buf_in,
in_size, buf_out, &out_size);
if (ret)
goto out_free_buf;
ret = copy_buf_out_to_user(&nt_msg, out_size_expect, buf_out);
if (ret)
pr_err("Copy information to user failed\n");
out_free_buf:
free_tmp_buf(hwdev, &nt_msg, buf_in, buf_out);
out_free_lock:
hinic_tool_cnt_dec();
return (long)ret;
}
static int nictool_k_open(struct inode *pnode, struct file *pfile)
{
return 0;
}
static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static const struct file_operations fifo_operations = {
.owner = THIS_MODULE,
.open = nictool_k_open,
.read = nictool_k_read,
.write = nictool_k_write,
.unlocked_ioctl = nictool_k_unlocked_ioctl,
.mmap = hinic_mem_mmap,
};
int if_nictool_exist(void)
{
struct file *fp = NULL;
int exist = 0;
fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0);
if (IS_ERR(fp)) {
exist = 0;
} else {
(void)filp_close(fp, NULL);
exist = 1;
}
return exist;
}
/**
* nictool_k_init - initialize the hw interface
*/
int nictool_k_init(void)
{
int ret;
struct device *pdevice;
if (g_nictool_init_flag) {
g_nictool_ref_cnt++;
/* already initialized */
return 0;
}
if (if_nictool_exist()) {
pr_err("Nictool device exists\n");
return 0;
}
ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME);
if (ret < 0) {
pr_err("Register nictool_dev fail(0x%x)\n", ret);
return ret;
}
/* Create equipment */
/*lint -save -e160*/
g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS);
/*lint -restore*/
if (IS_ERR(g_nictool_class)) {
pr_err("Create nictool_class fail\n");
ret = -EFAULT;
goto class_create_err;
}
/* Initializing the character device */
cdev_init(&g_nictool_cdev, &fifo_operations);
/* Add devices to the operating system */
ret = cdev_add(&g_nictool_cdev, g_dev_id, 1);
if (ret < 0) {
pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret);
goto cdev_add_err;
}
/* Export device information to user space
* (/sys/class/class name/device name)
*/
pdevice = device_create(g_nictool_class, NULL,
g_dev_id, NULL, HIADM_DEV_NAME);
if (IS_ERR(pdevice)) {
pr_err("Export nictool device information to user space fail\n");
ret = -EFAULT;
goto device_create_err;
}
g_nictool_init_flag = 1;
g_nictool_ref_cnt = 1;
pr_info("Register nictool_dev to system succeed\n");
return 0;
device_create_err:
cdev_del(&g_nictool_cdev);
cdev_add_err:
class_destroy(g_nictool_class);
class_create_err:
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
return ret;
}
void nictool_k_uninit(void)
{
if (g_nictool_init_flag) {
if ((--g_nictool_ref_cnt))
return;
}
g_nictool_init_flag = 0;
if (!g_nictool_class || IS_ERR(g_nictool_class))
return;
cdev_del(&g_nictool_cdev);
device_destroy(g_nictool_class, g_dev_id);
class_destroy(g_nictool_class);
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
pr_info("Unregister nictool_dev succeed\n");
}
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_NICTOOL_H_
#define HINIC_NICTOOL_H_
#include "hinic_dfx_def.h"
/* completion timeout interval, unit is jiffies*/
#define UP_COMP_TIME_OUT_VAL 10000U
struct sm_in_st {
int node;
int id;
int instance;
};
struct sm_out_st {
u64 val1;
u64 val2;
};
struct up_log_msg_st {
u32 rd_len;
u32 addr;
};
struct csr_write_st {
u32 rd_len;
u32 addr;
u8 *data;
};
struct ipsurx_stats_info {
u32 addr;
u32 rd_cnt;
};
struct ucode_cmd_st {
union {
struct {
u32 comm_mod_type : 8;
u32 ucode_cmd_type : 4;
u32 cmdq_ack_type : 3;
u32 ucode_imm : 1;
u32 len : 16;
} ucode_db;
u32 value;
};
};
struct up_cmd_st {
union {
struct {
u32 comm_mod_type : 8;
u32 chipif_cmd : 8;
u32 up_api_type : 16;
} up_db;
u32 value;
};
};
struct _dcb_data {
u8 wr_flag;
u8 dcb_en;
u8 err;
u8 rsvd;
};
union _dcb_ctl {
struct _dcb_data dcb_data;
u32 data;
};
struct _pfc_data {
u8 pfc_en;
u8 pfc_priority;
u8 num_of_tc;
u8 err;
};
union _pfc {
struct _pfc_data pfc_data;
u32 data;
};
union _flag_com {
struct _ets_flag {
u8 flag_ets_enable : 1;
u8 flag_ets_percent : 1;
u8 flag_ets_cos : 1;
u8 flag_ets_strict : 1;
u8 rev : 4;
} ets_flag;
u8 data;
};
struct _ets {
u8 ets_en;
u8 err;
u8 strict;
u8 tc[8];
u8 ets_percent[8];
union _flag_com flag_com;
};
#define API_CMD 0x1
#define API_CHAIN 0x2
#define API_CLP 0x3
struct msg_module {
char device_name[IFNAMSIZ];
unsigned int module;
union {
u32 msg_formate;
struct ucode_cmd_st ucode_cmd;
struct up_cmd_st up_cmd;
};
struct {
u32 in_buff_len;
u32 out_buff_len;
} len_info;
u32 res;
void *in_buff;
void *out_buf;
};
#define MAX_VER_INFO_LEN 128
struct drv_version_info {
char ver[MAX_VER_INFO_LEN];
};
struct chip_fault_stats {
int offset;
u8 chip_faults[MAX_DRV_BUF_SIZE];
};
struct hinic_wqe_info {
int q_id;
void *slq_handle;
unsigned int wqe_id;
};
struct hinic_cos_up_map {
u8 cos_up[HINIC_DCB_UP_MAX];
u8 num_cos;
};
struct hinic_tx_hw_page {
u64 phy_addr;
u64 *map_addr;
};
struct hinic_dbg_sq_info {
u16 q_id;
u16 pi;
u16 ci; /* sw_ci */
u16 fi; /* hw_ci */
u32 q_depth;
u16 pi_reverse;
u16 weqbb_size;
u8 priority;
u16 *ci_addr;
u64 cla_addr;
void *slq_handle;
struct hinic_tx_hw_page direct_wqe;
struct hinic_tx_hw_page db_addr;
u32 pg_idx;
u32 glb_sq_id;
};
struct hinic_dbg_rq_info {
u16 q_id;
u16 glb_rq_id;
u16 hw_pi;
u16 ci; /* sw_ci */
u16 sw_pi;
u16 wqebb_size;
u16 q_depth;
u16 buf_len;
void *slq_handle;
u64 ci_wqe_page_addr;
u64 ci_cla_tbl_addr;
u16 msix_idx;
u32 msix_vector;
};
#define BUSINFO_LEN 32
struct pf_info {
char name[IFNAMSIZ];
char bus_info[BUSINFO_LEN];
u32 pf_type;
};
#define MAX_SIZE 16
struct card_info {
struct pf_info pf[MAX_SIZE];
u32 pf_num;
};
struct nic_card_id {
u32 id[MAX_SIZE];
u32 num;
};
struct func_pdev_info {
u64 bar0_phy_addr;
u64 bar0_size;
u64 rsvd1[4];
};
struct hinic_card_func_info {
u32 num_pf;
u32 rsvd0;
u64 usr_api_phy_addr;
struct func_pdev_info pdev_info[MAX_SIZE];
};
#define MAX_CARD_NUM 64
extern void *g_card_node_array[MAX_CARD_NUM];
extern void *g_card_vir_addr[MAX_CARD_NUM];
extern u64 g_card_phy_addr[MAX_CARD_NUM];
extern struct mutex g_addr_lock;
extern int card_id;
struct hinic_nic_loop_mode {
u32 loop_mode;
u32 loop_ctrl;
};
struct hinic_nic_poll_weight {
int poll_weight;
};
enum hinic_homologues_state {
HINIC_HOMOLOGUES_OFF = 0,
HINIC_HOMOLOGUES_ON = 1,
};
struct hinic_homologues {
enum hinic_homologues_state homo_state;
};
struct hinic_pf_info {
u32 isvalid;
u32 pf_id;
};
int nictool_k_init(void);
void nictool_k_uninit(void);
u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev);
void hinic_get_io_stats(struct hinic_nic_dev *nic_dev,
struct hinic_show_item *items);
#define TOOL_COUNTER_MAX_LEN 512
#endif
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_PCI_ID_TBL_H
#define HINIC_PCI_ID_TBL_H
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define HINIC_DEV_ID_1822_PF 0x1822
#define HINIC_DEV_ID_1822_VF 0x375E
#define HINIC_DEV_ID_1822_VF_HV 0x379E
#define HINIC_DEV_ID_1822_SMTIO 0x020B
#define HINIC_DEV_ID_1822_PANGEA_100GE 0x0208
#define HINIC_DEV_ID_1822_PANGEA_TP_10GE 0x0204
#define HINIC_DEV_ID_1822_KR_40GE 0x020D
#define HINIC_DEV_ID_1822_KR_100GE 0x0205
#define HINIC_DEV_ID_1822_DUAL_25GE 0x0206
#define HINIC_DEV_ID_1822_KR_25GE 0x0210
#define HINIC_DEV_ID_1822_MULTI_HOST 0x0211
#define HINIC_DEV_ID_1822_100GE 0x0200
#define HINIC_DEV_ID_1822_100GE_MULTI_HOST 0x0201
#define HIFC_DEV_ID_1822_8G 0x0212
#define HIFC_DEV_ID_1822_16G 0x0203
#define HIFC_DEV_ID_1822_32G 0x0202
#define HIFC_DEV_ID_1822_SMTIO 0x020C
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include "hinic_nic_io.h"
#include "hinic_qp.h"
#define BUF_DESC_SHIFT 1
#define BUF_DESC_SIZE(nr_descs) (((u32)nr_descs) << BUF_DESC_SHIFT)
void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info,
int nr_descs, u8 owner)
{
u32 ctrl_size, task_size, bufdesc_size;
ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
bufdesc_size = BUF_DESC_SIZE(nr_descs);
ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
SQ_CTRL_SET(task_size, TASKSECT_LEN) |
SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
SQ_CTRL_SET(ctrl_size, LEN) |
SQ_CTRL_SET(owner, OWNER);
ctrl->ctrl_fmt = be32_to_cpu(ctrl->ctrl_fmt);
ctrl->queue_info = queue_info;
ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC);
if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) {
ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS);
} else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) {
/* mss should not less than 80 */
ctrl->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info,
MSS);
ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS);
}
ctrl->queue_info = be32_to_cpu(ctrl->queue_info);
}
int hinic_get_rx_done(struct hinic_rq_cqe *cqe)
{
u32 status;
int rx_done;
status = be32_to_cpu(cqe->status);
rx_done = RQ_CQE_STATUS_GET(status, RXDONE);
if (!rx_done)
return 0;
return 1;
}
void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old)
{
u32 status;
status = RQ_CQE_STATUS_CLEAR(status_old, RXDONE);
cqe->status = cpu_to_be32(status);
/* Make sure Rxdone has been set */
wmb();
}
int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe)
{
u32 pkt_info;
int super_cqe_en;
pkt_info = be32_to_cpu(cqe->pkt_info);
super_cqe_en = RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN);
if (!super_cqe_en)
return 0;
return 1;
}
u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe)
{
u32 vlan_len = be32_to_cpu(cqe->vlan_len);
return RQ_CQE_SGE_GET(vlan_len, LEN);
}
u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe)
{
u32 pkt_num = be32_to_cpu(cqe->pkt_info);
return RQ_CQE_PKT_NUM_GET(pkt_num, NUM);
}
u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe,
bool last)
{
u32 pkt_len = be32_to_cpu(cqe->pkt_info);
if (!last)
return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN);
else
return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN);
}
void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,
dma_addr_t cqe_dma)
{
struct hinic_rq_wqe *rq_wqe = (struct hinic_rq_wqe *)wqe;
struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
ctrl->ctrl_fmt =
RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
buf_desc->addr_high = upper_32_bits(buf_addr);
buf_desc->addr_low = lower_32_bits(buf_addr);
}
void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
u32 *queue_info,
enum sq_l4offload_type l4_offload,
u32 l4_len, u32 offset)
{
u32 tcp_udp_cs = 0, sctp = 0;
u32 mss = TX_MSS_DEFAULT;
/* tcp_udp_cs should be setted to calculate outter checksum when vxlan
* packets without inner l3 and l4
*/
if (unlikely(l4_offload == SCTP_OFFLOAD_ENABLE))
sctp = 1;
else
tcp_udp_cs = 1;
task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD);
task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
*queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) |
SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) |
SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP);
*queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS);
*queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
}
void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
u32 *queue_info,
enum sq_l4offload_type l4_offload,
u32 l4_len,
u32 offset, u32 ip_ident, u32 mss)
{
u32 tso = 0, ufo = 0;
if (l4_offload == TCP_OFFLOAD_ENABLE)
tso = 1;
else if (l4_offload == UDP_OFFLOAD_ENABLE)
ufo = 1;
task->ufo_v6_identify = be32_to_cpu(ip_ident);
/* just keep the same code style here */
task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD);
task->pkt_info0 |= SQ_TASK_INFO0_SET(tso || ufo, TSO_UFO);
task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
*queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) |
SQ_CTRL_QUEUE_INFO_SET(tso, TSO) |
SQ_CTRL_QUEUE_INFO_SET(ufo, UFO) |
SQ_CTRL_QUEUE_INFO_SET(!!l4_offload, TCPUDP_CS);
/* cs must be calculate by hw if tso is enable */
*queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS);
/* qsf was initialized in prepare_sq_wqe */
*queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
}
void hinic_set_vlan_tx_offload(struct hinic_sq_task *task,
u32 *queue_info,
u16 vlan_tag, u16 vlan_pri)
{
task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
*queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI);
}
void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len)
{
task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN);
}
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_QP_H
#define HINIC_QP_H
#include "hinic_qe_def.h"
#include "hinic_port_cmd.h"
/* frags and linner */
#define HINIC_MAX_SQ_BUFDESCS (MAX_SKB_FRAGS + 1)
#define HINIC_MAX_SQ_SGE 17
#define HINIC_MAX_SKB_NR_FRAGE (HINIC_MAX_SQ_SGE - 1)
#define HINIC_GSO_MAX_SIZE 65536
struct hinic_sq_ctrl {
u32 ctrl_fmt;
u32 queue_info;
};
struct hinic_sq_task {
u32 pkt_info0;
u32 pkt_info1;
u32 pkt_info2;
u32 ufo_v6_identify;
u32 pkt_info4;
u32 rsvd5;
};
struct hinic_sq_bufdesc {
u32 hi_addr;
u32 lo_addr;
u32 len;
u32 rsvd;
};
struct hinic_sq_wqe {
struct hinic_sq_ctrl ctrl;
struct hinic_sq_task task;
struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
};
struct hinic_rq_ctrl {
u32 ctrl_fmt;
};
struct hinic_rq_cqe {
u32 status;
u32 vlan_len;
u32 offload_type;
u32 hash_val;
u32 rsvd4;
u32 rsvd5;
u32 rsvd6;
u32 pkt_info;
};
struct hinic_rq_cqe_sect {
struct hinic_sge sge;
u32 rsvd;
};
struct hinic_rq_bufdesc {
u32 addr_high;
u32 addr_low;
};
struct hinic_rq_wqe {
struct hinic_rq_ctrl ctrl;
u32 rsvd;
struct hinic_rq_cqe_sect cqe_sect;
struct hinic_rq_bufdesc buf_desc;
};
void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info,
int nr_descs, u8 owner);
u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe);
int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe);
u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe, bool last);
u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe);
int hinic_get_rx_done(struct hinic_rq_cqe *cqe);
void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old);
void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,
dma_addr_t cqe_dma);
static inline void hinic_task_set_outter_l3(struct hinic_sq_task *task,
enum sq_l3_type l3_type,
u32 network_len)
{
task->pkt_info2 |= SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
}
static inline void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
enum sq_tunnel_l4_type l4_type,
u32 tunnel_len)
{
task->pkt_info2 |= SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
}
static inline void hinic_task_set_inner_l3(struct hinic_sq_task *task,
enum sq_l3_type l3_type,
u32 network_len)
{
task->pkt_info0 |= SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
task->pkt_info1 |= SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
}
void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
enum sq_l4offload_type l4_offload,
u32 l4_len, u32 offset);
void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
enum sq_l4offload_type l4_offload, u32 l4_len,
u32 offset, u32 ip_ident, u32 mss);
void hinic_set_vlan_tx_offload(struct hinic_sq_task *task, u32 *queue_info,
u16 vlan_tag, u16 vlan_pri);
void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
#include "ossl_knl.h"
#include "hinic_hw.h"
#include "hinic_hw_mgmt.h"
#include "hinic_nic_io.h"
#include "hinic_nic_cfg.h"
#include "hinic_nic_dev.h"
#include "hinic_qp.h"
#include "hinic_rx.h"
static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev);
#define HINIC_RX_HDR_SIZE 256
#define HINIC_RX_IPV6_PKT 7
#define HINIC_RX_VXLAN_PKT 0xb
#define RXQ_STATS_INC(rxq, field) \
{ \
u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \
(rxq)->rxq_stats.field++; \
u64_stats_update_end(&(rxq)->rxq_stats.syncp); \
}
static bool rx_alloc_mapped_page(struct hinic_rxq *rxq,
struct hinic_rx_info *rx_info)
{
struct net_device *netdev = rxq->netdev;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct pci_dev *pdev = nic_dev->pdev;
struct page *page = rx_info->page;
dma_addr_t dma = rx_info->buf_dma_addr;
if (likely(dma))
return true;
/* alloc new page for storage */
page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC, nic_dev->page_order);
if (unlikely(!page)) {
RXQ_STATS_INC(rxq, alloc_rx_buf_err);
return false;
}
/* map page for use */
dma = dma_map_page(&pdev->dev, page, 0, rxq->dma_rx_buff_size,
DMA_FROM_DEVICE);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (unlikely(dma_mapping_error(&pdev->dev, dma))) {
RXQ_STATS_INC(rxq, map_rx_buf_err);
__free_pages(page, nic_dev->page_order);
return false;
}
rx_info->page = page;
rx_info->buf_dma_addr = dma;
rx_info->page_offset = 0;
return true;
}
static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
{
struct net_device *netdev = rxq->netdev;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rq_wqe *rq_wqe;
struct hinic_rx_info *rx_info;
dma_addr_t dma_addr = 0;
u16 pi = 0;
int rq_wqe_len;
int i;
for (i = 0; i < rxq->q_depth; i++) {
rx_info = &rxq->rx_info[i];
rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
if (!rq_wqe) {
nicif_err(nic_dev, drv, netdev, "Failed to get rq wqe, rxq id: %d, wqe id: %d\n",
rxq->q_id, i);
break;
}
hinic_prepare_rq_wqe(rq_wqe, pi, dma_addr, rx_info->cqe_dma);
rq_wqe_len = sizeof(struct hinic_rq_wqe);
hinic_cpu_to_be32(rq_wqe, rq_wqe_len);
rx_info->rq_wqe = rq_wqe;
}
hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, rxq->q_depth);
return i;
}
static int hinic_rx_fill_buffers(struct hinic_rxq *rxq)
{
struct net_device *netdev = rxq->netdev;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rq_wqe *rq_wqe;
struct hinic_rx_info *rx_info;
dma_addr_t dma_addr;
int i;
int free_wqebbs = rxq->delta - 1;
for (i = 0; i < free_wqebbs; i++) {
rx_info = &rxq->rx_info[rxq->next_to_update];
if (unlikely(!rx_alloc_mapped_page(rxq, rx_info)))
break;
dma_addr = rx_info->buf_dma_addr + rx_info->page_offset;
rq_wqe = rx_info->rq_wqe;
rq_wqe->buf_desc.addr_high =
cpu_to_be32(upper_32_bits(dma_addr));
rq_wqe->buf_desc.addr_low =
cpu_to_be32(lower_32_bits(dma_addr));
rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
}
if (likely(i)) {
/* Write all the wqes before pi update */
wmb();
hinic_update_rq_hw_pi(nic_dev->hwdev, rxq->q_id,
rxq->next_to_update);
rxq->delta -= i;
rxq->next_to_alloc = rxq->next_to_update;
} else if (free_wqebbs == rxq->q_depth - 1) {
RXQ_STATS_INC(rxq, rx_buf_empty);
}
return i;
}
void hinic_rx_free_buffers(struct hinic_rxq *rxq)
{
u16 i;
struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
struct hinic_rx_info *rx_info;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rxq->q_depth; i++) {
rx_info = &rxq->rx_info[i];
if (rx_info->buf_dma_addr) {
dma_unmap_page(rxq->dev, rx_info->buf_dma_addr,
rxq->dma_rx_buff_size,
DMA_FROM_DEVICE);
rx_info->buf_dma_addr = 0;
}
if (rx_info->page) {
__free_pages(rx_info->page, nic_dev->page_order);
rx_info->page = NULL;
}
}
}
static void hinic_reuse_rx_page(struct hinic_rxq *rxq,
struct hinic_rx_info *old_rx_info)
{
struct hinic_rx_info *new_rx_info;
u16 nta = rxq->next_to_alloc;
new_rx_info = &rxq->rx_info[nta];
/* update, and store next to alloc */
nta++;
rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0;
new_rx_info->page = old_rx_info->page;
new_rx_info->page_offset = old_rx_info->page_offset;
new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr,
new_rx_info->page_offset,
rxq->buf_len,
DMA_FROM_DEVICE);
}
static bool hinic_add_rx_frag(struct hinic_rxq *rxq,
struct hinic_rx_info *rx_info,
struct sk_buff *skb, u32 size)
{
struct page *page;
u8 *va;
page = rx_info->page;
va = (u8 *)page_address(page) + rx_info->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
#endif
dma_sync_single_range_for_cpu(rxq->dev,
rx_info->buf_dma_addr,
rx_info->page_offset,
rxq->buf_len,
DMA_FROM_DEVICE);
if (size <= HINIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
memcpy(__skb_put(skb, size), va,
ALIGN(size, sizeof(long))); /*lint !e666*/
/* page is not reserved, we can reuse buffer as-is */
if (likely(page_to_nid(page) == numa_node_id()))
return true;
/* this page cannot be reused so discard it */
put_page(page);
return false;
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(int)rx_info->page_offset, (int)size, rxq->buf_len);
/* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_node_id()))
return false;
/* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1))
return false;
/* flip page offset to other buffer */
rx_info->page_offset ^= rxq->buf_len;
page_ref_inc(page);
return true;
}
static void __packaging_skb(struct hinic_rxq *rxq, struct sk_buff *head_skb,
u8 sge_num, u32 pkt_len)
{
struct hinic_rx_info *rx_info;
struct sk_buff *skb;
u8 frag_num = 0;
u32 size;
u16 sw_ci;
sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask;
skb = head_skb;
while (sge_num) {
rx_info = &rxq->rx_info[sw_ci];
sw_ci = (sw_ci + 1) & rxq->q_mask;
if (unlikely(pkt_len > rxq->buf_len)) {
size = rxq->buf_len;
pkt_len -= rxq->buf_len;
} else {
size = pkt_len;
}
if (unlikely(frag_num == MAX_SKB_FRAGS)) {
frag_num = 0;
if (skb == head_skb)
skb = skb_shinfo(skb)->frag_list;
else
skb = skb->next;
}
if (unlikely(skb != head_skb)) {
head_skb->len += size;
head_skb->data_len += size;
head_skb->truesize += rxq->buf_len;
}
if (likely(hinic_add_rx_frag(rxq, rx_info, skb, size))) {
hinic_reuse_rx_page(rxq, rx_info);
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rxq->dev, rx_info->buf_dma_addr,
rxq->dma_rx_buff_size, DMA_FROM_DEVICE);
}
/* clear contents of buffer_info */
rx_info->buf_dma_addr = 0;
rx_info->page = NULL;
sge_num--;
frag_num++;
}
}
static struct sk_buff *hinic_fetch_rx_buffer(struct hinic_rxq *rxq, u32 pkt_len)
{
struct sk_buff *head_skb, *cur_skb, *skb = NULL;
struct net_device *netdev = rxq->netdev;
u8 sge_num, skb_num;
u16 wqebb_cnt = 0;
head_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE);
if (unlikely(!head_skb))
return NULL;
sge_num = (u8)(pkt_len >> rxq->rx_buff_shift) +
((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
if (likely(sge_num <= MAX_SKB_FRAGS))
skb_num = 1;
else
skb_num = (sge_num / MAX_SKB_FRAGS) +
((sge_num % MAX_SKB_FRAGS) ? 1 : 0);
while (unlikely(skb_num > 1)) {
cur_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE);
if (unlikely(!cur_skb))
goto alloc_skb_fail;
if (!skb) {
skb_shinfo(head_skb)->frag_list = cur_skb;
skb = cur_skb;
} else {
skb->next = cur_skb;
skb = cur_skb;
}
skb_num--;
}
prefetchw(head_skb->data);
wqebb_cnt = sge_num;
__packaging_skb(rxq, head_skb, sge_num, pkt_len);
rxq->cons_idx += wqebb_cnt;
rxq->delta += wqebb_cnt;
return head_skb;
alloc_skb_fail:
dev_kfree_skb_any(head_skb);
return NULL;
}
void hinic_rxq_get_stats(struct hinic_rxq *rxq,
struct hinic_rxq_stats *stats)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
stats->bytes = rxq_stats->bytes;
stats->packets = rxq_stats->packets;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
stats->dropped = rxq_stats->dropped;
stats->rx_buf_empty = rxq_stats->rx_buf_empty;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats)
{
u64_stats_update_begin(&rxq_stats->syncp);
rxq_stats->bytes = 0;
rxq_stats->packets = 0;
rxq_stats->errors = 0;
rxq_stats->csum_errors = 0;
rxq_stats->other_errors = 0;
rxq_stats->dropped = 0;
rxq_stats->alloc_skb_err = 0;
rxq_stats->alloc_rx_buf_err = 0;
rxq_stats->map_rx_buf_err = 0;
rxq_stats->rx_buf_empty = 0;
u64_stats_update_end(&rxq_stats->syncp);
}
static void rxq_stats_init(struct hinic_rxq *rxq)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
u64_stats_init(&rxq_stats->syncp);
hinic_rxq_clean_stats(rxq_stats);
}
static void hinic_pull_tail(struct sk_buff *skb)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
/* it is valid to use page_address instead of kmap since we are
* working with pages allocated out of the lomem pool per
* alloc_page(GFP_ATOMIC)
*/
va = skb_frag_address(frag);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, HINIC_RX_HDR_SIZE);
/* update all of the pointers */
skb_frag_size_sub(frag, HINIC_RX_HDR_SIZE);
frag->bv_offset += HINIC_RX_HDR_SIZE;
skb->data_len -= HINIC_RX_HDR_SIZE;
skb->tail += HINIC_RX_HDR_SIZE;
}
static void hinic_rx_csum(struct hinic_rxq *rxq, u32 status,
struct sk_buff *skb)
{
struct net_device *netdev = rxq->netdev;
u32 csum_err;
csum_err = HINIC_GET_RX_CSUM_ERR(status);
if (unlikely(csum_err == HINIC_RX_CSUM_IPSU_OTHER_ERR))
rxq->rxq_stats.other_errors++;
if (!(netdev->features & NETIF_F_RXCSUM))
return;
if (!csum_err) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
/* pkt type is recognized by HW, and csum is err */
if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
HINIC_RX_CSUM_IPSU_OTHER_ERR)))
rxq->rxq_stats.csum_errors++;
skb->ip_summed = CHECKSUM_NONE;
}
}
static void hinic_rx_gro(struct hinic_rxq *rxq, u32 offload_type,
struct sk_buff *skb)
{
struct net_device *netdev = rxq->netdev;
bool l2_tunnel;
if (!(netdev->features & NETIF_F_GRO))
return;
l2_tunnel = HINIC_GET_RX_PKT_TYPE(offload_type) == HINIC_RX_VXLAN_PKT ?
1 : 0;
if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY)
/* If we checked the outer header let the stack know */
skb->csum_level = 1;
}
static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev,
struct sk_buff *skb)
{
struct net_device *netdev = nic_dev->netdev;
u8 *lb_buf = nic_dev->lb_test_rx_buf;
void *frag_data;
int lb_len = nic_dev->lb_pkt_len;
int pkt_offset, frag_len, i;
if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
nic_dev->lb_test_rx_idx = 0;
nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, recive too more test pkt\n");
}
if (skb->len != nic_dev->lb_pkt_len) {
nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n");
nic_dev->lb_test_rx_idx++;
return;
}
pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
frag_len = (int)skb_headlen(skb);
memcpy((lb_buf + pkt_offset), skb->data, frag_len);
pkt_offset += frag_len;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
memcpy((lb_buf + pkt_offset), frag_data, frag_len);
pkt_offset += frag_len;
}
nic_dev->lb_test_rx_idx++;
}
int recv_one_pkt(struct hinic_rxq *rxq, struct hinic_rq_cqe *rx_cqe,
u32 pkt_len, u32 vlan_len, u32 status)
{
struct sk_buff *skb;
struct net_device *netdev = rxq->netdev;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u32 offload_type;
skb = hinic_fetch_rx_buffer(rxq, pkt_len);
if (unlikely(!skb)) {
RXQ_STATS_INC(rxq, alloc_skb_err);
return -ENOMEM;
}
/* place header in linear portion of buffer */
if (skb_is_nonlinear(skb))
hinic_pull_tail(skb);
hinic_rx_csum(rxq, status, skb);
offload_type = be32_to_cpu(rx_cqe->offload_type);
hinic_rx_gro(rxq, offload_type, skb);
if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
u16 vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
/* if the packet is a vlan pkt, the vid may be 0 */
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
if (unlikely(test_bit(HINIC_LP_TEST, &nic_dev->flags)))
hinic_copy_lp_data(nic_dev, skb);
skb_record_rx_queue(skb, rxq->q_id);
skb->protocol = eth_type_trans(skb, netdev);
if (skb_has_frag_list(skb)) {
napi_gro_flush(&rxq->irq_cfg->napi, false);
netif_receive_skb(skb);
} else {
napi_gro_receive(&rxq->irq_cfg->napi, skb);
}
return 0;
}
void rx_pass_super_cqe(struct hinic_rxq *rxq, u32 index, u32 pkt_num,
struct hinic_rq_cqe *cqe)
{
u8 sge_num = 0;
u32 pkt_len;
while (index < pkt_num) {
pkt_len = hinic_get_pkt_len_for_super_cqe
(cqe, index == (pkt_num - 1));
sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) +
((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
index++;
}
rxq->cons_idx += sge_num;
rxq->delta += sge_num;
}
static inline int __recv_supper_cqe(struct hinic_rxq *rxq,
struct hinic_rq_cqe *rx_cqe, u32 pkt_info,
u32 vlan_len, u32 status, int *pkts,
u64 *rx_bytes, u32 *dropped)
{
u32 pkt_len;
int i, pkt_num = 0;
pkt_num = HINIC_GET_RQ_CQE_PKT_NUM(pkt_info);
i = 0;
while (i < pkt_num) {
pkt_len = ((i == (pkt_num - 1)) ?
RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) :
RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN));
if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len,
vlan_len, status))) {
if (i) {
rx_pass_super_cqe(rxq, i,
pkt_num,
rx_cqe);
*dropped += (pkt_num - i);
}
break;
}
*rx_bytes += pkt_len;
(*pkts)++;
i++;
}
if (!i)
return -EFAULT;
return 0;
}
#define LRO_PKT_HDR_LEN_IPV4 66
#define LRO_PKT_HDR_LEN_IPV6 86
#define LRO_PKT_HDR_LEN(cqe) \
(HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
int hinic_rx_poll(struct hinic_rxq *rxq, int budget)
{
struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
u32 status, pkt_len, vlan_len, pkt_info, dropped = 0;
struct hinic_rq_cqe *rx_cqe;
u64 rx_bytes = 0;
u16 sw_ci, num_lro;
int pkts = 0, nr_pkts = 0;
u16 num_wqe = 0;
while (likely(pkts < budget)) {
sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask;
rx_cqe = rxq->rx_info[sw_ci].cqe;
status = be32_to_cpu(rx_cqe->status);
if (!HINIC_GET_RX_DONE(status))
break;
/* make sure we read rx_done before packet length */
rmb();
vlan_len = be32_to_cpu(rx_cqe->vlan_len);
pkt_info = be32_to_cpu(rx_cqe->pkt_info);
pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len);
if (unlikely(HINIC_GET_SUPER_CQE_EN(pkt_info))) {
if (unlikely(__recv_supper_cqe(rxq, rx_cqe, pkt_info,
vlan_len, status, &pkts,
&rx_bytes, &dropped)))
break;
nr_pkts += (int)HINIC_GET_RQ_CQE_PKT_NUM(pkt_info);
} else {
if (recv_one_pkt(rxq, rx_cqe, pkt_len,
vlan_len, status))
break;
rx_bytes += pkt_len;
pkts++;
nr_pkts++;
num_lro = HINIC_GET_RX_NUM_LRO(status);
if (num_lro) {
rx_bytes += ((num_lro - 1) *
LRO_PKT_HDR_LEN(rx_cqe));
num_wqe +=
(u16)(pkt_len >> rxq->rx_buff_shift) +
((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
}
}
rx_cqe->status = 0;
if (num_wqe >= nic_dev->lro_replenish_thld)
break;
}
if (rxq->delta >= HINIC_RX_BUFFER_WRITE)
hinic_rx_fill_buffers(rxq);
u64_stats_update_begin(&rxq->rxq_stats.syncp);
rxq->rxq_stats.packets += nr_pkts;
rxq->rxq_stats.bytes += rx_bytes;
rxq->rxq_stats.dropped += dropped;
u64_stats_update_end(&rxq->rxq_stats.syncp);
return pkts;
}
static int rx_alloc_cqe(struct hinic_rxq *rxq)
{
struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
struct pci_dev *pdev = nic_dev->pdev;
struct hinic_rx_info *rx_info;
struct hinic_rq_cqe *cqe_va;
dma_addr_t cqe_pa;
u32 cqe_mem_size;
int idx;
cqe_mem_size = sizeof(*rx_info->cqe) * rxq->q_depth;
rxq->cqe_start_vaddr = dma_alloc_coherent(&pdev->dev, cqe_mem_size,
&rxq->cqe_start_paddr,
GFP_KERNEL);
if (!rxq->cqe_start_vaddr) {
nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate cqe dma\n");
return -ENOMEM;
}
cqe_va = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
cqe_pa = rxq->cqe_start_paddr;
for (idx = 0; idx < rxq->q_depth; idx++) {
rx_info = &rxq->rx_info[idx];
rx_info->cqe = cqe_va;
rx_info->cqe_dma = cqe_pa;
cqe_va++;
cqe_pa += sizeof(*rx_info->cqe);
}
hinic_rq_cqe_addr_set(nic_dev->hwdev, rxq->q_id, rxq->cqe_start_paddr);
return 0;
}
static void rx_free_cqe(struct hinic_rxq *rxq)
{
struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
struct pci_dev *pdev = nic_dev->pdev;
u32 cqe_mem_size;
cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
dma_free_coherent(&pdev->dev, cqe_mem_size,
rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
}
static int hinic_setup_rx_resources(struct hinic_rxq *rxq,
struct net_device *netdev,
struct irq_info *entry)
{
struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
u64 rx_info_sz;
int err, pkts;
rxq->irq_id = entry->irq_id;
rxq->msix_entry_idx = entry->msix_entry_idx;
rxq->next_to_alloc = 0;
rxq->next_to_update = 0;
rxq->delta = rxq->q_depth;
rxq->q_mask = rxq->q_depth - 1;
rxq->cons_idx = 0;
rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
if (!rx_info_sz) {
nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size rx info\n");
return -EINVAL;
}
rxq->rx_info = kzalloc(rx_info_sz, GFP_KERNEL);
if (!rxq->rx_info)
return -ENOMEM;
err = rx_alloc_cqe(rxq);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx cqe\n");
goto rx_cqe_err;
}
pkts = hinic_rx_fill_wqe(rxq);
if (pkts != rxq->q_depth) {
nicif_err(nic_dev, drv, netdev, "Failed to fill rx wqe\n");
err = -ENOMEM;
goto rx_pkts_err;
}
pkts = hinic_rx_fill_buffers(rxq);
if (!pkts) {
nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx buffer\n");
err = -ENOMEM;
goto rx_pkts_err;
}
return 0;
rx_pkts_err:
rx_free_cqe(rxq);
rx_cqe_err:
kfree(rxq->rx_info);
return err;
}
static void hinic_free_rx_resources(struct hinic_rxq *rxq)
{
hinic_rx_free_buffers(rxq);
rx_free_cqe(rxq);
kfree(rxq->rx_info);
}
int hinic_setup_all_rx_resources(struct net_device *netdev,
struct irq_info *msix_entries)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 i, q_id;
int err;
for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
err = hinic_setup_rx_resources(&nic_dev->rxqs[q_id],
nic_dev->netdev,
&msix_entries[q_id]);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to set up rxq resource\n");
goto init_rxq_err;
}
}
return 0;
init_rxq_err:
for (i = 0; i < q_id; i++)
hinic_free_rx_resources(&nic_dev->rxqs[i]);
return err;
}
void hinic_free_all_rx_resources(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 q_id;
for (q_id = 0; q_id < nic_dev->num_qps; q_id++)
hinic_free_rx_resources(&nic_dev->rxqs[q_id]);
}
int hinic_alloc_rxqs(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct pci_dev *pdev = nic_dev->pdev;
struct hinic_rxq *rxq;
u16 num_rxqs = nic_dev->max_qps;
u16 q_id;
u64 rxq_size;
rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
if (!rxq_size) {
nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n");
return -EINVAL;
}
nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL);
if (!nic_dev->rxqs) {
nic_err(&pdev->dev, "Failed to allocate rxqs\n");
return -ENOMEM;
}
for (q_id = 0; q_id < num_rxqs; q_id++) {
rxq = &nic_dev->rxqs[q_id];
rxq->netdev = netdev;
rxq->dev = &pdev->dev;
rxq->q_id = q_id;
rxq->buf_len = nic_dev->rx_buff_len;
rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len);
rxq->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE *
nic_dev->rx_buff_len;
rxq->q_depth = nic_dev->rq_depth;
rxq->q_mask = nic_dev->rq_depth - 1;
rxq_stats_init(rxq);
}
return 0;
}
void hinic_free_rxqs(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
hinic_clear_rss_config_user(nic_dev);
kfree(nic_dev->rxqs);
}
void hinic_init_rss_parameters(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
nic_dev->rss_type.tcp_ipv6_ext = 1;
nic_dev->rss_type.ipv6_ext = 1;
nic_dev->rss_type.tcp_ipv6 = 1;
nic_dev->rss_type.ipv6 = 1;
nic_dev->rss_type.tcp_ipv4 = 1;
nic_dev->rss_type.ipv4 = 1;
nic_dev->rss_type.udp_ipv6 = 1;
nic_dev->rss_type.udp_ipv4 = 1;
}
void hinic_set_default_rss_indir(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (!nic_dev->rss_indir_user)
return;
nicif_info(nic_dev, drv, netdev,
"Discard user configured Rx flow hash indirection\n");
kfree(nic_dev->rss_indir_user);
nic_dev->rss_indir_user = NULL;
}
static void hinic_maybe_reconfig_rss_indir(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int i;
if (!nic_dev->rss_indir_user)
return;
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
goto discard_user_rss_indir;
for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
if (nic_dev->rss_indir_user[i] >= nic_dev->num_qps)
goto discard_user_rss_indir;
}
return;
discard_user_rss_indir:
hinic_set_default_rss_indir(netdev);
}
static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev)
{
kfree(nic_dev->rss_hkey_user);
nic_dev->rss_hkey_user_be = NULL;
nic_dev->rss_hkey_user = NULL;
kfree(nic_dev->rss_indir_user);
nic_dev->rss_indir_user = NULL;
}
static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev,
u8 num_tcs, u32 *indir)
{
u16 num_rss, tc_group_size;
int i;
if (num_tcs)
tc_group_size = HINIC_RSS_INDIR_SIZE / num_tcs;
else
tc_group_size = HINIC_RSS_INDIR_SIZE;
num_rss = nic_dev->num_rss;
for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
indir[i] = (i / tc_group_size) * num_rss + i % num_rss;
}
static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
{
u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
hinic_rss_cfg(nic_dev->hwdev, 0, nic_dev->rss_tmpl_idx, 0, prio_tc);
}
int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc,
u8 *prio_tc)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 tmpl_idx = 0xFF;
u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
u32 *indir_tbl;
u8 *hkey;
int err;
tmpl_idx = nic_dev->rss_tmpl_idx;
/* RSS key */
if (nic_dev->rss_hkey_user)
hkey = nic_dev->rss_hkey_user;
else
hkey = default_rss_key;
err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hkey);
if (err)
return err;
hinic_maybe_reconfig_rss_indir(netdev);
indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL);
if (!indir_tbl) {
nicif_err(nic_dev, drv, netdev, "Failed to allocate set hw rss indir_tbl\n");
return -ENOMEM;
}
if (nic_dev->rss_indir_user)
memcpy(indir_tbl, nic_dev->rss_indir_user,
sizeof(u32) * HINIC_RSS_INDIR_SIZE);
else
hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl);
err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indir_tbl);
if (err)
goto out;
err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, nic_dev->rss_type);
if (err)
goto out;
err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
nic_dev->rss_hash_engine);
if (err)
goto out;
err = hinic_rss_cfg(nic_dev->hwdev, rss_en, tmpl_idx, num_tc, prio_tc);
if (err)
goto out;
kfree(indir_tbl);
return 0;
out:
kfree(indir_tbl);
return err;
}
static int hinic_rss_init(struct hinic_nic_dev *nic_dev)
{
struct net_device *netdev = nic_dev->netdev;
u32 *indir_tbl;
u8 cos, num_tc = 0;
u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
int err;
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
num_tc = nic_dev->max_cos;
for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) {
if (cos < HINIC_DCB_COS_MAX - nic_dev->max_cos)
prio_tc[cos] = nic_dev->max_cos - 1;
else
prio_tc[cos] = (HINIC_DCB_COS_MAX - 1) - cos;
}
} else {
num_tc = 0;
}
indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL);
if (!indir_tbl) {
nicif_err(nic_dev, drv, netdev, "Failed to allocate rss init indir_tbl\n");
return -ENOMEM;
}
if (nic_dev->rss_indir_user)
memcpy(indir_tbl, nic_dev->rss_indir_user,
sizeof(u32) * HINIC_RSS_INDIR_SIZE);
else
hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl);
err = hinic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc);
if (err) {
kfree(indir_tbl);
return err;
}
kfree(indir_tbl);
return 0;
}
int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u8 tmpl_idx = nic_dev->rss_tmpl_idx;
/* RSS must be enable when dcb is enabled */
return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
}
int hinic_rx_configure(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err;
if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
err = hinic_rss_init(nic_dev);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to init rss\n");
return -EFAULT;
}
}
err = hinic_dcb_set_rq_iq_mapping(nic_dev->hwdev,
hinic_func_max_qnum(nic_dev->hwdev),
NULL);
if (err) {
nicif_err(nic_dev, drv, netdev, "Failed to set rq_iq mapping\n");
goto set_rq_cos_mapping_err;
}
return 0;
set_rq_cos_mapping_err:
if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
hinic_rss_deinit(nic_dev);
return err;
}
void hinic_rx_remove_configure(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
hinic_rss_deinit(nic_dev);
}
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_RX_H
#define HINIC_RX_H
/* rx cqe checksum err */
#define HINIC_RX_CSUM_IP_CSUM_ERR BIT(0)
#define HINIC_RX_CSUM_TCP_CSUM_ERR BIT(1)
#define HINIC_RX_CSUM_UDP_CSUM_ERR BIT(2)
#define HINIC_RX_CSUM_IGMP_CSUM_ERR BIT(3)
#define HINIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4)
#define HINIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5)
#define HINIC_RX_CSUM_SCTP_CRC_ERR BIT(6)
#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7)
#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8)
#define HINIC_SUPPORT_LRO_ADAP_QPS_MAX 16
#define HINIC_RX_BUFFER_WRITE 16
struct hinic_rxq_stats {
u64 packets;
u64 bytes;
u64 errors;
u64 csum_errors;
u64 other_errors;
u64 dropped;
u64 rx_buf_empty;
u64 alloc_skb_err;
u64 alloc_rx_buf_err;
u64 map_rx_buf_err;
struct u64_stats_sync syncp;
};
struct hinic_rx_info {
dma_addr_t buf_dma_addr;
struct hinic_rq_cqe *cqe;
dma_addr_t cqe_dma;
struct page *page;
u32 page_offset;
struct hinic_rq_wqe *rq_wqe;
};
struct hinic_rxq {
struct net_device *netdev;
u16 q_id;
u16 q_depth;
u16 q_mask;
u16 buf_len;
u32 rx_buff_shift;
u32 dma_rx_buff_size;
struct hinic_rxq_stats rxq_stats;
u16 cons_idx;
u16 delta;
u32 irq_id;
u16 msix_entry_idx;
struct hinic_rx_info *rx_info;
struct hinic_irq *irq_cfg;
u16 next_to_alloc;
u16 next_to_update;
struct device *dev; /* device for DMA mapping */
unsigned long status;
dma_addr_t cqe_start_paddr;
void *cqe_start_vaddr;
u64 last_moder_packets;
u64 last_moder_bytes;
u8 last_coalesc_timer_cfg;
u8 last_pending_limt;
};
void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats);
void hinic_rxq_get_stats(struct hinic_rxq *rxq,
struct hinic_rxq_stats *stats);
int hinic_alloc_rxqs(struct net_device *netdev);
void hinic_free_rxqs(struct net_device *netdev);
void hinic_init_rss_parameters(struct net_device *netdev);
void hinic_set_default_rss_indir(struct net_device *netdev);
int hinic_setup_all_rx_resources(struct net_device *netdev,
struct irq_info *msix_entries);
void hinic_free_all_rx_resources(struct net_device *netdev);
void hinic_rx_remove_configure(struct net_device *netdev);
int hinic_rx_configure(struct net_device *netdev);
int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc,
u8 *prio_tc);
int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc);
int hinic_rx_poll(struct hinic_rxq *rxq, int budget);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include "ossl_knl.h"
#include "hinic_hw.h"
#include "hinic_nic_cfg.h"
#include "hinic_nic_dev.h"
#include "hinic_sriov.h"
#include "hinic_lld.h"
int hinic_pci_sriov_disable(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_IOV
struct hinic_sriov_info *sriov_info;
u16 tmp_vfs;
sriov_info = hinic_get_sriov_info_by_pcidev(dev);
/* if SR-IOV is already disabled then nothing will be done */
if (!sriov_info->sriov_enabled)
return 0;
if (test_and_set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) {
nic_err(&sriov_info->pdev->dev,
"SR-IOV disable in process, please wait\n");
return -EPERM;
}
/* If our VFs are assigned we cannot shut down SR-IOV
* without causing issues, so just leave the hardware
* available but disabled
*/
if (pci_vfs_assigned(sriov_info->pdev)) {
clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
nic_warn(&sriov_info->pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
return -EPERM;
}
sriov_info->sriov_enabled = false;
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(sriov_info->pdev);
tmp_vfs = (u16)sriov_info->num_vfs;
sriov_info->num_vfs = 0;
hinic_deinit_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
OS_VF_ID_TO_HW(tmp_vfs - 1));
clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
#endif
return 0;
}
int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct hinic_sriov_info *sriov_info;
int err = 0;
int pre_existing_vfs = 0;
sriov_info = hinic_get_sriov_info_by_pcidev(dev);
if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
nic_err(&sriov_info->pdev->dev,
"SR-IOV enable in process, please wait, num_vfs %d\n",
num_vfs);
return -EPERM;
}
pre_existing_vfs = pci_num_vf(sriov_info->pdev);
if (num_vfs > pci_sriov_get_totalvfs(sriov_info->pdev)) {
clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
return -ERANGE;
}
if (pre_existing_vfs && pre_existing_vfs != num_vfs) {
err = hinic_pci_sriov_disable(sriov_info->pdev);
if (err) {
clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
return err;
}
} else if (pre_existing_vfs == num_vfs) {
clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
return num_vfs;
}
err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
OS_VF_ID_TO_HW((u16)num_vfs - 1));
if (err) {
nic_err(&sriov_info->pdev->dev,
"Failed to init vf in hardware before enable sriov, error %d\n",
err);
clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
return err;
}
err = pci_enable_sriov(sriov_info->pdev, num_vfs);
if (err) {
nic_err(&sriov_info->pdev->dev,
"Failed to enable SR-IOV, error %d\n", err);
clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
return err;
}
sriov_info->sriov_enabled = true;
sriov_info->num_vfs = num_vfs;
clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
return num_vfs;
#else
return 0;
#endif
}
static bool hinic_is_support_sriov_configure(struct pci_dev *pdev)
{
enum hinic_init_state state = hinic_get_init_state(pdev);
struct hinic_sriov_info *sriov_info;
if (state < HINIC_INIT_STATE_NIC_INITED) {
nic_err(&pdev->dev, "NIC device not initialized, don't support to configure sriov\n");
return false;
}
sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
if (FUNC_SRIOV_FIX_NUM_VF(sriov_info->hwdev)) {
nic_err(&pdev->dev, "Don't support to changed sriov configuration\n");
return false;
}
return true;
}
int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
struct hinic_sriov_info *sriov_info;
if (!hinic_is_support_sriov_configure(dev))
return -EFAULT;
sriov_info = hinic_get_sriov_info_by_pcidev(dev);
if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
return -EFAULT;
if (!num_vfs)
return hinic_pci_sriov_disable(dev);
else
return hinic_pci_sriov_enable(dev, num_vfs);
}
int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct hinic_nic_dev *adapter = netdev_priv(netdev);
struct hinic_sriov_info *sriov_info;
int err;
if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) {
nicif_err(adapter, drv, netdev,
"Current function don't support to set vf mac\n");
return -EOPNOTSUPP;
}
sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
if (is_multicast_ether_addr(mac) || /*lint !e574*/
vf >= sriov_info->num_vfs) /*lint !e574*/
return -EINVAL;
err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
if (err) {
nicif_info(adapter, drv, netdev, "Failed to set MAC %pM on VF %d\n",
mac, vf);
return err;
}
if (is_zero_ether_addr(mac))
nicif_info(adapter, drv, netdev, "Removing MAC on VF %d\n", vf);
else
nicif_info(adapter, drv, netdev, "Setting MAC %pM on VF %d\n",
mac, vf);
nicif_info(adapter, drv, netdev, "Reload the VF driver to make this change effective\n");
return 0;
}
/*lint -save -e574 -e734*/
static int set_hw_vf_vlan(struct hinic_sriov_info *sriov_info,
u16 cur_vlanprio, int vf, u16 vlan, u8 qos)
{
int err = 0;
u16 old_vlan = cur_vlanprio & VLAN_VID_MASK;
if (vlan || qos) {
if (cur_vlanprio) {
err = hinic_kill_vf_vlan(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf));
if (err) {
nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d old vlan %d\n",
vf, old_vlan);
return err;
}
}
err = hinic_add_vf_vlan(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf), vlan, qos);
if (err) {
nic_err(&sriov_info->pdev->dev, "Failed to add vf %d new vlan %d\n",
vf, vlan);
return err;
}
} else {
err = hinic_kill_vf_vlan(sriov_info->hwdev, OS_VF_ID_TO_HW(vf));
if (err) {
nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d vlan %d\n",
vf, old_vlan);
return err;
}
}
return hinic_update_mac_vlan(sriov_info->hwdev, old_vlan, vlan,
OS_VF_ID_TO_HW(vf));
}
int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct hinic_nic_dev *adapter = netdev_priv(netdev);
struct hinic_sriov_info *sriov_info;
u16 vlanprio, cur_vlanprio;
if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) {
nicif_err(adapter, drv, netdev,
"Current function don't support to set vf vlan\n");
return -EOPNOTSUPP;
}
sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
cur_vlanprio = hinic_vf_info_vlanprio(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf));
/* duplicate request, so just return success */
if (vlanprio == cur_vlanprio)
return 0;
return set_hw_vf_vlan(sriov_info, cur_vlanprio, vf, vlan, qos);
}
int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
struct hinic_nic_dev *adapter = netdev_priv(netdev);
struct hinic_sriov_info *sriov_info;
int err = 0;
bool cur_spoofchk;
sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
if (vf >= sriov_info->num_vfs)
return -EINVAL;
cur_spoofchk = hinic_vf_info_spoofchk(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf));
/* same request, so just return success */
if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
return 0;
err = hinic_set_vf_spoofchk(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf), setting);
if (!err) {
nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n",
vf, setting ? "on" : "off");
} else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
nicif_err(adapter, drv, netdev,
"Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
err = -EOPNOTSUPP;
}
return err;
}
int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
{
struct hinic_nic_dev *adapter = netdev_priv(netdev);
struct hinic_sriov_info *sriov_info;
int err = 0;
bool cur_trust;
sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
if (vf >= sriov_info->num_vfs)
return -EINVAL;
cur_trust = hinic_vf_info_trust(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf));
/* same request, so just return success */
if ((setting && cur_trust) || (!setting && !cur_trust))
return 0;
err = hinic_set_vf_trust(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf), setting);
if (!err)
nicif_info(adapter, drv, netdev, "Set VF %d trusted %s succeed\n",
vf, setting ? "on" : "off");
else
nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n",
vf, setting ? "on" : "off");
return err;
}
int hinic_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
struct hinic_nic_dev *adapter = netdev_priv(netdev);
struct hinic_sriov_info *sriov_info;
sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
if (vf >= sriov_info->num_vfs)
return -EINVAL;
hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
return 0;
}
/**
* hinic_ndo_set_vf_link_state
* @netdev: network interface device structure
* @vf_id: VF identifier
* @link: required link state
* Return: 0 - success, negative - failure
* Set the link state of a specified VF, regardless of physical link state
*/
int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
{
struct hinic_nic_dev *adapter = netdev_priv(netdev);
struct hinic_sriov_info *sriov_info;
static const char * const vf_link[] = {"auto", "enable", "disable"};
int err;
if (FUNC_FORCE_LINK_UP(adapter->hwdev)) {
nicif_err(adapter, drv, netdev,
"Current function don't support to set vf link state\n");
return -EOPNOTSUPP;
}
sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
/* validate the request */
if (vf_id >= sriov_info->num_vfs) {
nicif_err(adapter, drv, netdev,
"Invalid VF Identifier %d\n", vf_id);
return -EINVAL;
}
err = hinic_set_vf_link_state(sriov_info->hwdev,
OS_VF_ID_TO_HW(vf_id), link);
if (!err)
nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n",
vf_id, vf_link[link]);
return err;
}
#define HINIC_TX_RATE_TABLE_FULL 12
int hinic_ndo_set_vf_bw(struct net_device *netdev,
int vf, int min_tx_rate, int max_tx_rate)
{
struct hinic_nic_dev *adapter = netdev_priv(netdev);
struct nic_port_info port_info = {0};
struct hinic_sriov_info *sriov_info;
u8 link_status = 0;
u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
SPEED_25000, SPEED_40000, SPEED_100000};
int err = 0;
if (!FUNC_SUPPORT_RATE_LIMIT(adapter->hwdev)) {
nicif_err(adapter, drv, netdev,
"Current function don't support to set vf rate limit\n");
return -EOPNOTSUPP;
}
sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
/* verify VF is active */
if (vf >= sriov_info->num_vfs) {
nicif_err(adapter, drv, netdev, "VF number must be less than %d\n",
sriov_info->num_vfs);
return -EINVAL;
}
if (max_tx_rate < min_tx_rate) {
nicif_err(adapter, drv, netdev, "Invalid rate, max rate %d must greater than min rate %d\n",
max_tx_rate, min_tx_rate);
return -EINVAL;
}
err = hinic_get_link_state(adapter->hwdev, &link_status);
if (err) {
nicif_err(adapter, drv, netdev,
"Get link status failed when set vf tx rate\n");
return -EIO;
}
if (!link_status) {
nicif_err(adapter, drv, netdev,
"Link status must be up when set vf tx rate\n");
return -EINVAL;
}
err = hinic_get_port_info(adapter->hwdev, &port_info);
if (err || port_info.speed > LINK_SPEED_100GB)
return -EIO;
/* rate limit cannot be less than 0 and greater than link speed */
if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) {
nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %d]\n",
speeds[port_info.speed]);
return -EINVAL;
}
err = hinic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf),
max_tx_rate, min_tx_rate);
if (err) {
nicif_err(adapter, drv, netdev,
"Unable to set VF %d max rate %d min rate %d%s\n",
vf, max_tx_rate, min_tx_rate,
err == HINIC_TX_RATE_TABLE_FULL ?
", tx rate profile is full" : "");
return -EIO;
}
nicif_info(adapter, drv, netdev,
"Set VF %d max tx rate %d min tx rate %d successfully\n",
vf, max_tx_rate, min_tx_rate);
return 0;
}
/*lint -restore*/
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_SRIOV_H
#define HINIC_SRIOV_H
enum hinic_sriov_state {
HINIC_SRIOV_DISABLE,
HINIC_SRIOV_ENABLE,
HINIC_FUNC_REMOVE,
};
struct hinic_sriov_info {
struct pci_dev *pdev;
void *hwdev;
bool sriov_enabled;
unsigned int num_vfs;
unsigned long state;
};
int hinic_pci_sriov_disable(struct pci_dev *dev);
int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
int hinic_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
int hinic_ndo_set_vf_bw(struct net_device *netdev,
int vf, int min_tx_rate, int max_tx_rate);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/netdevice.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
#include "ossl_knl.h"
#include "hinic_hw.h"
#include "hinic_hw_mgmt.h"
#include "hinic_nic_io.h"
#include "hinic_nic_dev.h"
#include "hinic_qp.h"
#include "hinic_tx.h"
#include "hinic_dbg.h"
#define MIN_SKB_LEN 32
#define MAX_PAYLOAD_OFFSET 221
#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1))
#define TXQ_STATS_INC(txq, field) \
{ \
u64_stats_update_begin(&(txq)->txq_stats.syncp); \
(txq)->txq_stats.field++; \
u64_stats_update_end(&(txq)->txq_stats.syncp); \
}
void hinic_txq_get_stats(struct hinic_txq *txq,
struct hinic_txq_stats *stats)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
stats->bytes = txq_stats->bytes;
stats->packets = txq_stats->packets;
stats->busy = txq_stats->busy;
stats->wake = txq_stats->wake;
stats->dropped = txq_stats->dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
stats->big_udp_pkts = txq_stats->big_udp_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats)
{
u64_stats_update_begin(&txq_stats->syncp);
txq_stats->bytes = 0;
txq_stats->packets = 0;
txq_stats->busy = 0;
txq_stats->wake = 0;
txq_stats->dropped = 0;
txq_stats->big_frags_pkts = 0;
txq_stats->big_udp_pkts = 0;
txq_stats->ufo_pkt_unsupport = 0;
txq_stats->ufo_linearize_err = 0;
txq_stats->ufo_alloc_skb_err = 0;
txq_stats->skb_pad_err = 0;
txq_stats->frag_len_overflow = 0;
txq_stats->offload_cow_skb_err = 0;
txq_stats->alloc_cpy_frag_err = 0;
txq_stats->map_cpy_frag_err = 0;
txq_stats->map_frag_err = 0;
txq_stats->frag_size_err = 0;
txq_stats->unknown_tunnel_pkt = 0;
u64_stats_update_end(&txq_stats->syncp);
}
static void txq_stats_init(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
u64_stats_init(&txq_stats->syncp);
hinic_txq_clean_stats(txq_stats);
}
inline void hinic_set_buf_desc(struct hinic_sq_bufdesc *buf_descs,
dma_addr_t addr, u32 len)
{
buf_descs->hi_addr = cpu_to_be32(upper_32_bits(addr));
buf_descs->lo_addr = cpu_to_be32(lower_32_bits(addr));
buf_descs->len = cpu_to_be32(len);
}
static int tx_map_skb(struct hinic_nic_dev *nic_dev, struct sk_buff *skb,
struct hinic_txq *txq, struct hinic_tx_info *tx_info,
struct hinic_sq_bufdesc *buf_descs, u16 skb_nr_frags)
{
struct pci_dev *pdev = nic_dev->pdev;
struct hinic_dma_len *dma_len = tx_info->dma_len;
skb_frag_t *frag = NULL;
u16 base_nr_frags;
int j, i = 0;
int node, err = 0;
u32 nsize, cpy_nsize = 0;
u8 *vaddr, *cpy_buff = NULL;
if (unlikely(skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)) {
for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++)
cpy_nsize +=
skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
if (!cpy_nsize) {
TXQ_STATS_INC(txq, alloc_cpy_frag_err);
return -EINVAL;
}
node = dev_to_node(&nic_dev->pdev->dev);
if (node == NUMA_NO_NODE)
cpy_buff = kzalloc(cpy_nsize,
GFP_ATOMIC | __GFP_NOWARN);
else
cpy_buff = kzalloc_node(cpy_nsize,
GFP_ATOMIC | __GFP_NOWARN,
node);
if (!cpy_buff) {
TXQ_STATS_INC(txq, alloc_cpy_frag_err);
return -ENOMEM;
}
tx_info->cpy_buff = cpy_buff;
for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
nsize = skb_frag_size(frag);
vaddr = kmap_atomic(skb_frag_page(frag));
memcpy(cpy_buff, vaddr + frag->bv_offset, nsize);
kunmap_atomic(vaddr);
cpy_buff += nsize;
}
}
dma_len[0].dma = dma_map_single(&pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma_len[0].dma)) {
TXQ_STATS_INC(txq, map_frag_err);
err = -EFAULT;
goto map_single_err;
}
dma_len[0].len = skb_headlen(skb);
hinic_set_buf_desc(&buf_descs[0], dma_len[0].dma,
dma_len[0].len);
if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)
base_nr_frags = HINIC_MAX_SKB_NR_FRAGE - 1;
else
base_nr_frags = skb_nr_frags;
for (i = 0; i < base_nr_frags; ) {
frag = &(skb_shinfo(skb)->frags[i]);
nsize = skb_frag_size(frag);
i++;
dma_len[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
nsize, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma_len[i].dma)) {
TXQ_STATS_INC(txq, map_frag_err);
i--;
err = -EFAULT;
goto frag_map_err;
}
dma_len[i].len = nsize;
hinic_set_buf_desc(&buf_descs[i], dma_len[i].dma,
dma_len[i].len);
}
if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE) {
dma_len[HINIC_MAX_SKB_NR_FRAGE].dma =
dma_map_single(&pdev->dev, tx_info->cpy_buff,
cpy_nsize, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev,
dma_len[HINIC_MAX_SKB_NR_FRAGE].dma)) {
TXQ_STATS_INC(txq, map_cpy_frag_err);
err = -EFAULT;
goto fusion_map_err;
}
dma_len[HINIC_MAX_SKB_NR_FRAGE].len = cpy_nsize;
hinic_set_buf_desc(&buf_descs[HINIC_MAX_SKB_NR_FRAGE],
dma_len[HINIC_MAX_SKB_NR_FRAGE].dma,
dma_len[HINIC_MAX_SKB_NR_FRAGE].len);
}
return 0;
fusion_map_err:
frag_map_err:
for (j = 0; j < i;) {
j++;
dma_unmap_page(&pdev->dev, dma_len[j].dma,
dma_len[j].len, DMA_TO_DEVICE);
}
dma_unmap_single(&pdev->dev, dma_len[0].dma, dma_len[0].len,
DMA_TO_DEVICE);
map_single_err:
kfree(tx_info->cpy_buff);
tx_info->cpy_buff = NULL;
return err;
}
static inline void tx_unmap_skb(struct hinic_nic_dev *nic_dev,
struct sk_buff *skb,
struct hinic_dma_len *dma_len,
u16 valid_nr_frags)
{
struct pci_dev *pdev = nic_dev->pdev;
int i;
u16 nr_frags = valid_nr_frags;
if (nr_frags > HINIC_MAX_SKB_NR_FRAGE)
nr_frags = HINIC_MAX_SKB_NR_FRAGE;
for (i = 0; i < nr_frags; ) {
i++;
dma_unmap_page(&pdev->dev,
dma_len[i].dma,
dma_len[i].len, DMA_TO_DEVICE);
}
dma_unmap_single(&pdev->dev, dma_len[0].dma,
dma_len[0].len, DMA_TO_DEVICE);
}
union hinic_ip {
struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
};
union hinic_l4 {
struct tcphdr *tcp;
struct udphdr *udp;
unsigned char *hdr;
};
#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_ip *ip,
union hinic_l4 *l4,
enum tx_offload_type offload_type,
enum sq_l3_type *l3_type, u8 *l4_proto)
{
unsigned char *exthdr;
if (ip->v4->version == 4) {
*l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
IPV4_PKT_NO_CHKSUM_OFFLOAD : IPV4_PKT_WITH_CHKSUM_OFFLOAD;
*l4_proto = ip->v4->protocol;
} else if (ip->v4->version == 6) {
*l3_type = IPV6_PKT;
exthdr = ip->hdr + sizeof(*ip->v6);
*l4_proto = ip->v6->nexthdr;
if (exthdr != l4->hdr) {
__be16 frag_off = 0;
ipv6_skip_exthdr(skb, (int)(exthdr - skb->data),
l4_proto, &frag_off);
}
} else {
*l3_type = UNKNOWN_L3TYPE;
*l4_proto = 0;
}
}
static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
enum tx_offload_type offload_type, u8 l4_proto,
enum sq_l4offload_type *l4_offload,
u32 *l4_len, u32 *offset)
{
*offset = 0;
*l4_len = 0;
*l4_offload = OFFLOAD_DISABLE;
switch (l4_proto) {
case IPPROTO_TCP:
*l4_offload = TCP_OFFLOAD_ENABLE;
*l4_len = l4->tcp->doff * 4; /* doff in unit of 4B */
/* To keep same with TSO, payload offset begins from paylaod */
*offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
break;
case IPPROTO_UDP:
*l4_offload = UDP_OFFLOAD_ENABLE;
*l4_len = sizeof(struct udphdr);
*offset = TRANSPORT_OFFSET(l4->hdr, skb);
break;
case IPPROTO_SCTP:
/* only csum offload support sctp */
if (offload_type != TX_OFFLOAD_CSUM)
break;
*l4_offload = SCTP_OFFLOAD_ENABLE;
*l4_len = sizeof(struct sctphdr);
/* To keep same with UFO, payload offset
* begins from L4 header
*/
*offset = TRANSPORT_OFFSET(l4->hdr, skb);
break;
default:
break;
}
}
static int hinic_tx_csum(struct hinic_txq *txq, struct hinic_sq_task *task,
u32 *queue_info, struct sk_buff *skb)
{
union hinic_ip ip;
union hinic_l4 l4;
enum sq_l3_type l3_type;
enum sq_l4offload_type l4_offload;
u32 network_hdr_len;
u32 offset, l4_len;
u8 l4_proto;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
if (skb->encapsulation) {
u32 l4_tunnel_len;
u32 tunnel_type = TUNNEL_UDP_NO_CSUM;
ip.hdr = skb_network_header(skb);
if (ip.v4->version == 4) {
l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
l4_proto = ip.v4->protocol;
} else if (ip.v4->version == 6) {
unsigned char *exthdr;
__be16 frag_off;
l3_type = IPV6_PKT;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
l4.hdr = skb_transport_header(skb);
if (l4.hdr != exthdr)
ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto, &frag_off);
} else {
l3_type = UNKNOWN_L3TYPE;
l4_proto = IPPROTO_RAW;
}
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
switch (l4_proto) {
case IPPROTO_UDP:
l4_tunnel_len = skb_inner_network_offset(skb) -
skb_transport_offset(skb);
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
network_hdr_len = skb_inner_network_header_len(skb);
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
tunnel_type = NOT_TUNNEL;
l4_tunnel_len = 0;
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_transport_header(skb);
network_hdr_len = skb_network_header_len(skb);
break;
default:
TXQ_STATS_INC(txq, unknown_tunnel_pkt);
/* Unsupport tunnel packet, disable csum offload */
skb_checksum_help(skb);
return 0;
}
hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
} else {
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
network_hdr_len = skb_network_header_len(skb);
}
get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM,
&l3_type, &l4_proto);
get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto,
&l4_offload, &l4_len, &offset);
hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
return 1;
}
static __sum16 csum_magic(union hinic_ip *ip, unsigned short proto)
{
return (ip->v4->version == 4) ?
csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
}
static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info,
struct sk_buff *skb)
{
union hinic_ip ip;
union hinic_l4 l4;
enum sq_l3_type l3_type;
enum sq_l4offload_type l4_offload;
u32 network_hdr_len;
u32 offset, l4_len;
u32 ip_identify = 0;
u8 l4_proto;
int err;
if (!skb_is_gso(skb))
return 0;
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
if (skb->encapsulation) {
u32 l4_tunnel_len;
u32 tunnel_type = 0;
u32 gso_type = skb_shinfo(skb)->gso_type;
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
network_hdr_len = skb_inner_network_header_len(skb);
if (ip.v4->version == 4)
l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
else if (ip.v4->version == 6)
l3_type = IPV6_PKT;
else
l3_type = 0;
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
tunnel_type = TUNNEL_UDP_CSUM;
} else if (gso_type & SKB_GSO_UDP_TUNNEL) {
tunnel_type = TUNNEL_UDP_NO_CSUM;
}
l4_tunnel_len = skb_inner_network_offset(skb) -
skb_transport_offset(skb);
hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
} else {
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
network_hdr_len = skb_network_header_len(skb);
}
get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO,
&l3_type, &l4_proto);
if (l4_proto == IPPROTO_TCP)
l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto,
&l4_offload, &l4_len, &offset);
hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len,
offset, ip_identify, skb_shinfo(skb)->gso_size);
return 1;
}
static enum tx_offload_type hinic_tx_offload(struct hinic_txq *txq,
struct sk_buff *skb,
struct hinic_sq_task *task,
u32 *queue_info, u8 avd_flag)
{
enum tx_offload_type offload = 0;
int tso_cs_en;
u16 vlan_tag;
task->pkt_info0 = 0;
task->pkt_info1 = 0;
task->pkt_info2 = 0;
tso_cs_en = hinic_tso(task, queue_info, skb);
if (tso_cs_en < 0) {
offload = TX_OFFLOAD_INVALID;
return offload;
} else if (tso_cs_en) {
offload |= TX_OFFLOAD_TSO;
} else {
tso_cs_en = hinic_tx_csum(txq, task, queue_info, skb);
if (tso_cs_en)
offload |= TX_OFFLOAD_CSUM;
}
if (unlikely(skb_vlan_tag_present(skb))) {
vlan_tag = skb_vlan_tag_get(skb);
hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
vlan_tag >> VLAN_PRIO_SHIFT);
offload |= TX_OFFLOAD_VLAN;
}
if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
MAX_PAYLOAD_OFFSET)) {
offload = TX_OFFLOAD_INVALID;
return offload;
}
if (avd_flag == HINIC_TX_UFO_AVD)
task->pkt_info0 |= SQ_TASK_INFO0_SET(1, UFO_AVD);
if (offload) {
hinic_task_set_tx_offload_valid(task, skb_network_offset(skb));
task->pkt_info0 = be32_to_cpu(task->pkt_info0);
task->pkt_info1 = be32_to_cpu(task->pkt_info1);
task->pkt_info2 = be32_to_cpu(task->pkt_info2);
}
return offload;
}
static inline void __get_pkt_stats(struct hinic_tx_info *tx_info,
struct sk_buff *skb)
{
u32 ihs, hdr_len;
if (skb_is_gso(skb)) {
#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \
defined(HAVE_SK_BUFF_ENCAPSULATION))
if (skb->encapsulation) {
#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET
ihs = skb_inner_transport_offset(skb) +
inner_tcp_hdrlen(skb);
#else
ihs = (skb_inner_transport_header(skb) - skb->data) +
inner_tcp_hdrlen(skb);
#endif
} else {
#endif
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \
defined(HAVE_SK_BUFF_ENCAPSULATION))
}
#endif
hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs;
tx_info->num_bytes = skb->len + (u64)hdr_len;
} else {
tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
}
tx_info->num_pkts = 1;
}
inline u8 hinic_get_vlan_pri(struct sk_buff *skb)
{
u16 vlan_tci = 0;
int err;
err = vlan_get_tag(skb, &vlan_tci);
if (err)
return 0;
return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
static void *__try_to_get_wqe(struct net_device *netdev, u16 q_id,
int wqebb_cnt, u16 *pi, u8 *owner)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
void *wqe = NULL;
netif_stop_subqueue(netdev, q_id);
/* We need to check again in a case another CPU has just
* made room available.
*/
if (unlikely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >=
wqebb_cnt)) {
netif_start_subqueue(netdev, q_id);
/* there have enough wqebbs after queue is wake up */
wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id,
wqebb_cnt, pi, owner);
}
return wqe;
}
#define HINIC_FRAG_STATUS_OK 0
#define HINIC_FRAG_STATUS_IGNORE 1
static netdev_tx_t hinic_send_one_skb(struct sk_buff *skb,
struct net_device *netdev,
struct hinic_txq *txq,
u8 *flag, u8 avd_flag)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_tx_info *tx_info;
struct hinic_sq_wqe *wqe = NULL;
enum tx_offload_type offload = 0;
u16 q_id = txq->q_id;
u32 queue_info = 0;
u8 owner = 0;
u16 pi = 0;
int err, wqebb_cnt;
u16 num_sge = 0;
u16 original_nr_frags;
u16 new_nr_frags;
u16 i;
int frag_err = HINIC_FRAG_STATUS_OK;
/* skb->dev will not initialized when calling netdev_alloc_skb_ip_align
* and parameter of length is largger then PAGE_SIZE(under redhat7.3),
* but skb->dev will be used in vlan_get_tag or somewhere
*/
if (unlikely(!skb->dev))
skb->dev = netdev;
if (unlikely(skb->len < MIN_SKB_LEN)) {
if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) {
TXQ_STATS_INC(txq, skb_pad_err);
goto tx_skb_pad_err;
}
skb->len = MIN_SKB_LEN;
}
original_nr_frags = skb_shinfo(skb)->nr_frags;
new_nr_frags = original_nr_frags;
/* If size of lastest frags are all zero, should ignore this frags.
* If size of some frag in the middle is zero, should drop this skb.
*/
for (i = 0; i < original_nr_frags; i++) {
if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
frag_err == HINIC_FRAG_STATUS_OK)
continue;
if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
frag_err == HINIC_FRAG_STATUS_OK) {
frag_err = HINIC_FRAG_STATUS_IGNORE;
new_nr_frags = i + 1;
continue;
}
if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
frag_err == HINIC_FRAG_STATUS_IGNORE)
continue;
if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
frag_err == HINIC_FRAG_STATUS_IGNORE) {
TXQ_STATS_INC(txq, frag_size_err);
goto tx_drop_pkts;
}
}
num_sge = new_nr_frags + 1;
/* if skb->len is more than 65536B but num_sge is 1,
* driver will drop it
*/
if (unlikely(skb->len > HINIC_GSO_MAX_SIZE && num_sge == 1)) {
TXQ_STATS_INC(txq, frag_len_overflow);
goto tx_drop_pkts;
}
/* if sge number more than 17, driver will set 17 sges */
if (unlikely(num_sge > HINIC_MAX_SQ_SGE)) {
TXQ_STATS_INC(txq, big_frags_pkts);
num_sge = HINIC_MAX_SQ_SGE;
}
wqebb_cnt = HINIC_SQ_WQEBB_CNT(num_sge);
if (likely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >=
wqebb_cnt)) {
if (likely(wqebb_cnt == 1)) {
hinic_update_sq_pi(nic_dev->hwdev, q_id,
wqebb_cnt, &pi, &owner);
wqe = txq->tx_info[pi].wqe;
} else {
wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id,
wqebb_cnt, &pi, &owner);
}
} else {
wqe = __try_to_get_wqe(netdev, q_id, wqebb_cnt, &pi, &owner);
if (likely(!wqe)) {
TXQ_STATS_INC(txq, busy);
return NETDEV_TX_BUSY;
}
}
tx_info = &txq->tx_info[pi];
tx_info->skb = skb;
tx_info->wqebb_cnt = wqebb_cnt;
tx_info->valid_nr_frags = new_nr_frags;
__get_pkt_stats(tx_info, skb);
offload = hinic_tx_offload(txq, skb, &wqe->task, &queue_info, avd_flag);
if (unlikely(offload == TX_OFFLOAD_INVALID)) {
hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner);
TXQ_STATS_INC(txq, offload_cow_skb_err);
goto tx_drop_pkts;
}
err = tx_map_skb(nic_dev, skb, txq, tx_info, wqe->buf_descs,
new_nr_frags);
if (err) {
hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner);
goto tx_drop_pkts;
}
hinic_prepare_sq_ctrl(&wqe->ctrl, queue_info, num_sge, owner);
hinic_send_sq_wqe(nic_dev->hwdev, q_id, wqe, wqebb_cnt,
nic_dev->sq_cos_mapping[hinic_get_vlan_pri(skb)]);
return NETDEV_TX_OK;
tx_drop_pkts:
dev_kfree_skb_any(skb);
tx_skb_pad_err:
TXQ_STATS_INC(txq, dropped);
*flag = HINIC_TX_DROPPED;
return NETDEV_TX_OK;
}
netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 q_id = skb_get_queue_mapping(skb);
struct hinic_txq *txq;
u8 flag = 0;
if (unlikely(!nic_dev->heart_status)) {
dev_kfree_skb_any(skb);
HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop);
return NETDEV_TX_OK;
}
txq = &nic_dev->txqs[q_id];
return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD);
}
netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 q_id = skb_get_queue_mapping(skb);
struct hinic_txq *txq;
u8 flag = 0;
if (unlikely(!netif_carrier_ok(netdev) ||
!nic_dev->heart_status)) {
dev_kfree_skb_any(skb);
HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop);
return NETDEV_TX_OK;
}
if (unlikely(q_id >= nic_dev->num_qps)) {
txq = &nic_dev->txqs[0];
HINIC_NIC_STATS_INC(nic_dev, tx_invalid_qid);
goto tx_drop_pkts;
}
txq = &nic_dev->txqs[q_id];
return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD);
tx_drop_pkts:
dev_kfree_skb_any(skb);
u64_stats_update_begin(&txq->txq_stats.syncp);
txq->txq_stats.dropped++;
u64_stats_update_end(&txq->txq_stats.syncp);
return NETDEV_TX_OK;
}
static inline void tx_free_skb(struct hinic_nic_dev *nic_dev,
struct sk_buff *skb,
struct hinic_tx_info *tx_info)
{
tx_unmap_skb(nic_dev, skb, tx_info->dma_len, tx_info->valid_nr_frags);
kfree(tx_info->cpy_buff);
tx_info->cpy_buff = NULL;
dev_kfree_skb_any(skb);
}
static void free_all_tx_skbs(struct hinic_txq *txq)
{
struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
struct hinic_tx_info *tx_info;
u16 ci;
int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev,
txq->q_id) + 1;
while (free_wqebbs < txq->q_depth) {
ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id);
tx_info = &txq->tx_info[ci];
tx_free_skb(nic_dev, tx_info->skb, tx_info);
hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id,
tx_info->wqebb_cnt);
free_wqebbs += tx_info->wqebb_cnt;
}
}
int hinic_tx_poll(struct hinic_txq *txq, int budget)
{
struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
struct sk_buff *skb;
struct hinic_tx_info *tx_info;
u64 tx_bytes = 0, wake = 0;
int pkts = 0, nr_pkts = 0, wqebb_cnt = 0;
u16 hw_ci, sw_ci = 0, q_id = txq->q_id;
hw_ci = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id);
dma_rmb();
sw_ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id);
do {
tx_info = &txq->tx_info[sw_ci];
/* Whether all of the wqebb of this wqe is completed */
if (hw_ci == sw_ci || ((hw_ci - sw_ci) &
txq->q_mask) < tx_info->wqebb_cnt) {
break;
}
sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
prefetch(&txq->tx_info[sw_ci]);
wqebb_cnt += tx_info->wqebb_cnt;
skb = tx_info->skb;
tx_bytes += tx_info->num_bytes;
nr_pkts += tx_info->num_pkts;
pkts++;
tx_free_skb(nic_dev, skb, tx_info);
} while (likely(pkts < budget));
hinic_update_sq_local_ci(nic_dev->hwdev, q_id, wqebb_cnt);
if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) &&
hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= 1 &&
test_bit(HINIC_INTF_UP, &nic_dev->flags))) {
struct netdev_queue *netdev_txq =
netdev_get_tx_queue(txq->netdev, q_id);
__netif_tx_lock(netdev_txq, smp_processor_id());
/* To avoid re-waking subqueue with xmit_frame */
if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) {
netif_wake_subqueue(nic_dev->netdev, q_id);
wake++;
}
__netif_tx_unlock(netdev_txq);
}
u64_stats_update_begin(&txq->txq_stats.syncp);
txq->txq_stats.bytes += tx_bytes;
txq->txq_stats.packets += nr_pkts;
txq->txq_stats.wake += wake;
u64_stats_update_end(&txq->txq_stats.syncp);
return pkts;
}
int hinic_setup_tx_wqe(struct hinic_txq *txq)
{
struct net_device *netdev = txq->netdev;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_sq_wqe *wqe;
struct hinic_tx_info *tx_info;
u16 pi = 0;
int i;
u8 owner = 0;
for (i = 0; i < txq->q_depth; i++) {
tx_info = &txq->tx_info[i];
wqe = hinic_get_sq_wqe(nic_dev->hwdev, txq->q_id,
1, &pi, &owner);
if (!wqe) {
nicif_err(nic_dev, drv, netdev, "Failed to get SQ wqe\n");
break;
}
tx_info->wqe = wqe;
}
hinic_return_sq_wqe(nic_dev->hwdev, txq->q_id, txq->q_depth, owner);
return i;
}
int hinic_setup_all_tx_resources(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_txq *txq;
u64 tx_info_sz;
u16 i, q_id;
int err;
for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
txq = &nic_dev->txqs[q_id];
tx_info_sz = txq->q_depth * sizeof(*txq->tx_info);
if (!tx_info_sz) {
nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size txq%d info\n",
q_id);
err = -EINVAL;
goto init_txq_err;
}
txq->tx_info = kzalloc(tx_info_sz, GFP_KERNEL);
if (!txq->tx_info) {
nicif_err(nic_dev, drv, netdev, "Failed to allocate Tx:%d info\n",
q_id);
err = -ENOMEM;
goto init_txq_err;
}
err = hinic_setup_tx_wqe(txq);
if (err != txq->q_depth) {
nicif_err(nic_dev, drv, netdev, "Failed to setup Tx: %d wqe\n",
q_id);
q_id++;
goto init_txq_err;
}
}
return 0;
init_txq_err:
for (i = 0; i < q_id; i++) {
txq = &nic_dev->txqs[i];
kfree(txq->tx_info);
}
return err;
}
void hinic_free_all_tx_resources(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_txq *txq;
u16 q_id;
for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
txq = &nic_dev->txqs[q_id];
free_all_tx_skbs(txq);
kfree(txq->tx_info);
}
}
void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int up;
for (up = HINIC_DCB_UP_MAX - 1; up >= 0; up--)
nic_dev->sq_cos_mapping[up] = nic_dev->default_cos_id;
}
int hinic_sq_cos_mapping(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic_dcb_state dcb_state = {0};
u8 default_cos = 0;
int err;
if (HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
err = hinic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state);
if (err) {
hinic_info(nic_dev, drv, "Failed to get vf default cos\n");
return err;
}
default_cos = dcb_state.default_cos;
nic_dev->default_cos_id = default_cos;
hinic_set_sq_default_cos(nic_dev->netdev, default_cos);
} else {
default_cos = nic_dev->default_cos_id;
if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
memcpy(nic_dev->sq_cos_mapping, nic_dev->up_cos,
sizeof(nic_dev->sq_cos_mapping));
else
hinic_set_sq_default_cos(nic_dev->netdev, default_cos);
dcb_state.dcb_on = !!test_bit(HINIC_DCB_ENABLE,
&nic_dev->flags);
dcb_state.default_cos = default_cos;
memcpy(dcb_state.up_cos, nic_dev->sq_cos_mapping,
sizeof(dcb_state.up_cos));
err = hinic_set_dcb_state(nic_dev->hwdev, &dcb_state);
if (err)
hinic_info(nic_dev, drv, "Failed to set vf default cos\n");
}
return err;
}
int hinic_alloc_txqs(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
struct pci_dev *pdev = nic_dev->pdev;
struct hinic_txq *txq;
u16 q_id, num_txqs = nic_dev->max_qps;
u64 txq_size;
txq_size = num_txqs * sizeof(*nic_dev->txqs);
if (!txq_size) {
nic_err(&pdev->dev, "Cannot allocate zero size txqs\n");
return -EINVAL;
}
nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL);
if (!nic_dev->txqs) {
nic_err(&pdev->dev, "Failed to allocate txqs\n");
return -ENOMEM;
}
for (q_id = 0; q_id < num_txqs; q_id++) {
txq = &nic_dev->txqs[q_id];
txq->netdev = netdev;
txq->q_id = q_id;
txq->q_depth = nic_dev->sq_depth;
txq->q_mask = nic_dev->sq_depth - 1;
txq_stats_init(txq);
}
return 0;
}
void hinic_free_txqs(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
kfree(nic_dev->txqs);
}
/* should stop transmit any packets before calling this function */
#define HINIC_FLUSH_QUEUE_TIMEOUT 1000
static bool hinic_get_hw_handle_status(void *hwdev, u16 q_id)
{
u16 sw_pi = 0, hw_ci = 0;
sw_pi = hinic_dbg_get_sq_pi(hwdev, q_id);
hw_ci = hinic_get_sq_hw_ci(hwdev, q_id);
return sw_pi == hw_ci;
}
int hinic_stop_sq(struct hinic_txq *txq)
{
struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
unsigned long timeout;
int err;
timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
do {
if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id))
return 0;
usleep_range(900, 1000);
} while (time_before(jiffies, timeout));
/* force hardware to drop packets */
timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
do {
if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id))
return 0;
err = hinic_force_drop_tx_pkt(nic_dev->hwdev);
if (err)
break;
usleep_range(9900, 10000);
} while (time_before(jiffies, timeout));
/* Avoid msleep takes too long and get a fake result */
if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id))
return 0;
return -EFAULT;
}
void hinic_flush_txqs(struct net_device *netdev)
{
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u16 qid;
int err;
for (qid = 0; qid < nic_dev->num_qps; qid++) {
err = hinic_stop_sq(&nic_dev->txqs[qid]);
if (err)
nicif_err(nic_dev, drv, netdev,
"Failed to stop sq%d\n", qid);
}
} /*lint -e766*/
/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#ifndef HINIC_TX_H
#define HINIC_TX_H
enum tx_offload_type {
TX_OFFLOAD_TSO = BIT(0),
TX_OFFLOAD_CSUM = BIT(1),
TX_OFFLOAD_VLAN = BIT(2),
TX_OFFLOAD_INVALID = BIT(3),
};
struct hinic_txq_stats {
u64 packets;
u64 bytes;
u64 busy;
u64 wake;
u64 dropped;
u64 big_frags_pkts;
u64 big_udp_pkts;
/* Subdivision statistics show in private tool */
u64 ufo_pkt_unsupport;
u64 ufo_linearize_err;
u64 ufo_alloc_skb_err;
u64 skb_pad_err;
u64 frag_len_overflow;
u64 offload_cow_skb_err;
u64 alloc_cpy_frag_err;
u64 map_cpy_frag_err;
u64 map_frag_err;
u64 frag_size_err;
u64 unknown_tunnel_pkt;
struct u64_stats_sync syncp;
};
struct hinic_dma_len {
dma_addr_t dma;
u32 len;
};
#define MAX_SGE_NUM_PER_WQE 17
struct hinic_tx_info {
struct sk_buff *skb;
int wqebb_cnt;
int num_sge;
void *wqe;
u8 *cpy_buff;
u16 valid_nr_frags;
u16 num_pkts;
u64 num_bytes;
struct hinic_dma_len dma_len[MAX_SGE_NUM_PER_WQE];
};
struct hinic_txq {
struct net_device *netdev;
u16 q_id;
u16 q_depth;
u16 q_mask;
struct hinic_txq_stats txq_stats;
u64 last_moder_packets;
u64 last_moder_bytes;
struct hinic_tx_info *tx_info;
};
enum hinic_tx_xmit_status {
HINIC_TX_OK = 0,
HINIC_TX_DROPPED = 1,
HINIC_TX_BUSY = 2,
};
enum hinic_tx_avd_type {
HINIC_TX_NON_AVD = 0,
HINIC_TX_UFO_AVD = 1,
};
void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats);
void hinic_txq_get_stats(struct hinic_txq *txq,
struct hinic_txq_stats *stats);
netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int hinic_setup_all_tx_resources(struct net_device *netdev);
void hinic_free_all_tx_resources(struct net_device *netdev);
void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id);
int hinic_sq_cos_mapping(struct net_device *netdev);
int hinic_alloc_txqs(struct net_device *netdev);
void hinic_free_txqs(struct net_device *netdev);
int hinic_tx_poll(struct hinic_txq *txq, int budget);
u8 hinic_get_vlan_pri(struct sk_buff *skb);
void hinic_flush_txqs(struct net_device *netdev);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册