提交 79d2dfad 编写于 作者: C Chenguangli 提交者: Yang Yingliang

scsi/hifc: add hifc driver port manager module

driver inclusion
category: feature
bugzilla: NA

-----------------------------------------------------------------------

This module includes the tool channel for processing tool commands and
management commands for managing nport and hba resources.
Signed-off-by: NChenguangli <chenguangli2@huawei.com>
Reviewed-by: NZengweiliang <zengweiliang.zengweiliang@huawei.com>
Acked-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 08c048e9
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_lld.h"
#include "hifc_tool.h"
#include "hifc_dbgtool_knl.h"
struct ffm_intr_info {
u8 node_id;
/* error level of the interrupt source */
u8 err_level;
/* Classification by interrupt source properties */
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
};
#define DBGTOOL_MSG_MAX_SIZE 2048ULL
#define HIFC_SELF_CMD_UP2PF_FFM 0x26
void *g_card_node_array[MAX_CARD_NUM] = {0};
void *g_card_vir_addr[MAX_CARD_NUM] = {0};
u64 g_card_phy_addr[MAX_CARD_NUM] = {0};
/* lock for g_card_vir_addr */
struct mutex g_addr_lock;
int card_id;
/* dbgtool character device name, class name, dev path */
#define CHR_DEV_DBGTOOL "hifc_dbgtool_chr_dev"
#define CLASS_DBGTOOL "hifc_dbgtool_class"
#define DBGTOOL_DEV_PATH "/dev/hifc_dbgtool_chr_dev"
struct dbgtool_k_glb_info {
struct semaphore dbgtool_sem;
struct ffm_record_info *ffm;
};
dev_t dbgtool_dev_id; /* device id */
struct cdev dbgtool_chr_dev; /* struct of char device */
/*lint -save -e104 -e808*/
struct class *dbgtool_d_class; /* struct of char class */
/*lint -restore*/
int g_dbgtool_init_flag;
int g_dbgtool_ref_cnt;
static int dbgtool_knl_open(struct inode *pnode,
struct file *pfile)
{
return 0;
}
static int dbgtool_knl_release(struct inode *pnode,
struct file *pfile)
{
return 0;
}
static ssize_t dbgtool_knl_read(struct file *pfile,
char __user *ubuf,
size_t size,
loff_t *ppos)
{
return 0;
}
static ssize_t dbgtool_knl_write(struct file *pfile,
const char __user *ubuf,
size_t size,
loff_t *ppos)
{
return 0;
}
static bool is_valid_phy_addr(u64 offset)
{
int i;
for (i = 0; i < MAX_CARD_NUM; i++) {
if (offset == g_card_phy_addr[i])
return true;
}
return false;
}
int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long vmsize = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
phys_addr_t phy_addr;
if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) {
pr_err("Map size = %lu is bigger than alloc\n", vmsize);
return -EAGAIN;
}
if (offset && !is_valid_phy_addr((u64)offset) &&
!hifc_is_valid_bar_addr((u64)offset)) {
pr_err("offset is invalid");
return -EAGAIN;
}
/* old version of tool set vma->vm_pgoff to 0 */
phy_addr = offset ? offset : g_card_phy_addr[card_id];
if (!phy_addr) {
pr_err("Card_id = %d physical address is 0\n", card_id);
return -EAGAIN;
}
if (remap_pfn_range(vma, vma->vm_start,
(phy_addr >> PAGE_SHIFT),
vmsize, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/**
* dbgtool_knl_api_cmd_read - used for read operations
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_api_cmd_read(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
u8 *cmd;
u16 size;
void *ack;
u16 ack_size;
u32 pf_id;
void *hwdev;
pf_id = para->param.api_rd.pf_id;
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big\n", pf_id);
return -EFAULT;
}
/* obtaining pf_id chipif pointer */
hwdev = g_func_handle_array[pf_id];
if (!hwdev) {
pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id);
return -EFAULT;
}
/* alloc cmd and ack memory */
size = para->param.api_rd.size;
if (para->param.api_rd.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Read cmd size invalid or more than 2K\n");
return -EINVAL;
}
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc read cmd mem fail\n");
return -ENOMEM;
}
ack_size = para->param.api_rd.ack_size;
if (para->param.api_rd.ack_size == 0 ||
ack_size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Read cmd ack size is 0\n");
ret = -ENOMEM;
goto alloc_ack_mem_fail;
}
ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL);
if (!ack) {
pr_err("Alloc read ack mem fail\n");
ret = -ENOMEM;
goto alloc_ack_mem_fail;
}
/* cmd content copied from user-mode */
if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) {
pr_err("Copy cmd from user fail\n");
ret = -EFAULT;
goto copy_user_cmd_fail;
}
/* Invoke the api cmd interface read content*/
ret = hifc_api_cmd_read_ack(hwdev, para->param.api_rd.dest,
cmd, size, ack, ack_size);
if (ret) {
pr_err("Api send single cmd ack fail!\n");
goto api_rd_fail;
}
/* Copy the contents of the ack to the user state */
if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) {
pr_err("Copy ack to user fail\n");
ret = -EFAULT;
}
api_rd_fail:
copy_user_cmd_fail:
kfree(ack);
alloc_ack_mem_fail:
kfree(cmd);
return ret;
}
/**
* dbgtool_knl_api_cmd_write - used for write operations
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_api_cmd_write(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
u8 *cmd;
u16 size;
u32 pf_id;
void *hwdev;
pf_id = para->param.api_wr.pf_id;
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big\n", pf_id);
return -EFAULT;
}
/* obtaining chipif pointer according to pf_id */
hwdev = g_func_handle_array[pf_id];
if (!hwdev) {
pr_err("PF id(0x%x) handle null\n", pf_id);
return -EFAULT;
}
/* alloc cmd memory */
size = para->param.api_wr.size;
if (para->param.api_wr.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("Write cmd size invalid or more than 2K\n");
return -EINVAL;
}
cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
if (!cmd) {
pr_err("Alloc write cmd mem fail\n");
return -ENOMEM;
}
/* cmd content copied from user-mode */
if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) {
pr_err("Copy cmd from user fail\n");
ret = -EFAULT;
goto copy_user_cmd_fail;
}
/* api cmd interface is invoked to write the content */
ret = hifc_api_cmd_write_nack(hwdev, para->param.api_wr.dest,
cmd, size);
if (ret)
pr_err("Api send single cmd nack fail\n");
copy_user_cmd_fail:
kfree(cmd);
return ret;
}
void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx,
void **g_func_handle_array)
{
u32 func_idx;
struct hifc_hwdev *hwdev;
if (!dev_info) {
pr_err("Params error!\n");
return;
}
/* pf at most 16 */
for (func_idx = 0; func_idx < 16; func_idx++) {
hwdev = (struct hifc_hwdev *)g_func_handle_array[func_idx];
dev_info[func_idx].phy_addr = g_card_phy_addr[card_idx];
if (!hwdev) {
dev_info[func_idx].bar0_size = 0;
dev_info[func_idx].bus = 0;
dev_info[func_idx].slot = 0;
dev_info[func_idx].func = 0;
} else {
dev_info[func_idx].bar0_size =
pci_resource_len
(((struct pci_dev *)hwdev->pcidev_hdl), 0);
dev_info[func_idx].bus =
((struct pci_dev *)
hwdev->pcidev_hdl)->bus->number;
dev_info[func_idx].slot =
PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl)
->devfn);
dev_info[func_idx].func =
PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl)
->devfn);
}
}
}
/**
* dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para,
void **g_func_handle_array)
{
struct pf_dev_info dev_info[16] = { {0} };
unsigned char *tmp;
int i;
mutex_lock(&g_addr_lock);
if (!g_card_vir_addr[card_id]) {
g_card_vir_addr[card_id] =
(void *)__get_free_pages(GFP_KERNEL,
DBGTOOL_PAGE_ORDER);
if (!g_card_vir_addr[card_id]) {
pr_err("Alloc dbgtool api chain fail!\n");
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
memset(g_card_vir_addr[card_id], 0,
PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
g_card_phy_addr[card_id] =
virt_to_phys(g_card_vir_addr[card_id]);
if (!g_card_phy_addr[card_id]) {
pr_err("phy addr for card %d is 0\n", card_id);
free_pages((unsigned long)g_card_vir_addr[card_id],
DBGTOOL_PAGE_ORDER);
g_card_vir_addr[card_id] = NULL;
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
tmp = g_card_vir_addr[card_id];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
SetPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
}
mutex_unlock(&g_addr_lock);
chipif_get_all_pf_dev_info(dev_info, card_id, g_func_handle_array);
/* Copy the dev_info to user mode */
if (copy_to_user(para->param.dev_info, dev_info,
(unsigned int)sizeof(dev_info))) {
pr_err("Copy dev_info to user fail\n");
return -EFAULT;
}
return 0;
}
/**
* dbgtool_knl_ffm_info_rd - Read ffm information
* @para: the dbgtool parameter
* @dbgtool_info: the dbgtool info
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para,
struct dbgtool_k_glb_info *dbgtool_info)
{
/* Copy the ffm_info to user mode */
if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm,
(unsigned int)sizeof(struct ffm_record_info))) {
pr_err("Copy ffm_info to user fail\n");
return -EFAULT;
}
return 0;
}
/**
* dbgtool_knl_ffm_info_clr - Clear FFM information
* @para: unused
* @dbgtool_info: the dbgtool info
*/
void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para,
struct dbgtool_k_glb_info *dbgtool_info)
{
dbgtool_info->ffm->ffm_num = 0;
}
/**
* dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP
* @para: the dbgtool parameter
* @g_func_handle_array: global function handle
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_msg_to_up(struct dbgtool_param *para,
void **g_func_handle_array)
{
long ret = 0;
void *buf_in;
void *buf_out;
u16 out_size;
u8 pf_id;
if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) {
pr_err("User data(%d) more than 2KB\n",
para->param.msg2up.in_size);
return -EFAULT;
}
pf_id = para->param.msg2up.pf_id;
/* pf at most 16 */
if (pf_id >= 16) {
pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id);
return -EFAULT;
}
if (!g_func_handle_array[pf_id]) {
pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id);
return -EFAULT;
}
/* alloc buf_in and buf_out memory, apply for 2K */
buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL);
if (!buf_in) {
pr_err("Alloc buf_in mem fail\n");
return -ENOMEM;
}
buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0);
if (!buf_out) {
pr_err("Alloc buf_out mem fail\n");
ret = -ENOMEM;
goto alloc_buf_out_mem_fail;
}
/* copy buf_in from the user state */
if (copy_from_user(buf_in, para->param.msg2up.buf_in,
(unsigned long)para->param.msg2up.in_size)) {
pr_err("Copy buf_in from user fail\n");
ret = -EFAULT;
goto copy_user_buf_in_fail;
}
out_size = DBGTOOL_MSG_MAX_SIZE;
/* Invoke the pf2up communication interface */
ret = hifc_msg_to_mgmt_sync(g_func_handle_array[pf_id],
para->param.msg2up.mod,
para->param.msg2up.cmd,
buf_in,
para->param.msg2up.in_size,
buf_out,
&out_size,
0);
if (ret)
goto msg_2_up_fail;
/* Copy the out_size and buf_out content to user mode */
if (copy_to_user(para->param.msg2up.out_size, &out_size,
(unsigned int)sizeof(out_size))) {
pr_err("Copy out_size to user fail\n");
ret = -EFAULT;
goto copy_out_size_fail;
}
if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) {
pr_err("Copy buf_out to user fail\n");
ret = -EFAULT;
}
copy_out_size_fail:
msg_2_up_fail:
copy_user_buf_in_fail:
kfree(buf_out);
alloc_buf_out_mem_fail:
kfree(buf_in);
return ret;
}
long dbgtool_knl_free_mem(int id)
{
unsigned char *tmp;
int i;
mutex_lock(&g_addr_lock);
if (!g_card_vir_addr[id]) {
mutex_unlock(&g_addr_lock);
return 0;
}
tmp = g_card_vir_addr[id];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
ClearPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
free_pages((unsigned long)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER);
g_card_vir_addr[id] = NULL;
g_card_phy_addr[id] = 0;
mutex_unlock(&g_addr_lock);
return 0;
}
static int get_card_id_by_name(char *chip_name)
{
struct card_node *card_info = NULL;
int i;
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
if (!strncmp(chip_name, card_info->chip_name, IFNAMSIZ))
break;
}
if (i == MAX_CARD_NUM) {
pr_err("Can't find this card %s\n", chip_name);
return -EFAULT;
}
return i;
}
/*lint -save -e771 -e794*/
static long process_dbgtool_cmd(struct dbgtool_param *param, unsigned int cmd,
int idx)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct card_node *card_info = NULL;
unsigned int real_cmd;
long ret = 0;
card_id = idx;
card_info = (struct card_node *)g_card_node_array[idx];
dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
down(&dbgtool_info->dbgtool_sem);
real_cmd = _IOC_NR(cmd);
switch (real_cmd) {
case DBGTOOL_CMD_API_RD:
ret = dbgtool_knl_api_cmd_read(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_API_WR:
ret = dbgtool_knl_api_cmd_write(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_FFM_RD:
ret = dbgtool_knl_ffm_info_rd(param, dbgtool_info);
break;
case DBGTOOL_CMD_FFM_CLR:
dbgtool_knl_ffm_info_clr(param, dbgtool_info);
break;
case DBGTOOL_CMD_PF_DEV_INFO_GET:
ret = dbgtool_knl_pf_dev_info_get(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_MSG_2_UP:
ret = dbgtool_knl_msg_to_up(param,
card_info->func_handle_array);
break;
case DBGTOOL_CMD_FREE_MEM:
ret = dbgtool_knl_free_mem(idx);
break;
default:
pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd);
ret = -EFAULT;
}
up(&dbgtool_info->dbgtool_sem);
return ret;
}
/**
* dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry
* @pfile: the pointer to file
* @cmd: the command type
* @arg: user space
* Return: 0 - success, negative - failure
*/
long dbgtool_knl_unlocked_ioctl(struct file *pfile,
unsigned int cmd,
unsigned long arg)
{
struct dbgtool_param param;
int idx;
(void)memset(&param, 0, sizeof(param));
if (copy_from_user(&param, (void *)arg, sizeof(param))) {
pr_err("Copy param from user fail\n");
return -EFAULT;
}
param.chip_name[IFNAMSIZ - 1] = '\0';
idx = get_card_id_by_name(param.chip_name);
if (idx < 0)
return -EFAULT;
return process_dbgtool_cmd(&param, cmd, idx);
}
static struct card_node *get_card_node_by_hwdev(const void *handle)
{
struct card_node *card_info = NULL;
bool flag = false;
int i, j;
for (i = 0; i < MAX_CARD_NUM; i++) {
card_info = (struct card_node *)g_card_node_array[i];
if (!card_info)
continue;
for (j = 0; j < MAX_FUNCTION_NUM; j++) {
if (handle == card_info->func_handle_array[j]) {
flag = true;
break;
}
}
if (flag)
break;
}
if (i == MAX_CARD_NUM) {
pr_err("Id(%d) cant find this card\n", i);
return NULL;
}
return card_info;
}
/**
* ffm_intr_msg_record - FFM interruption records sent up
* @handle: the function handle
* @buf_in: the pointer to input buffer
* @in_size: input buffer size
* @buf_out: the pointer to outputput buffer
* @out_size: output buffer size
*/
void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct ffm_intr_info *intr;
u32 ffm_idx;
struct timex txc;
struct rtc_time rctm;
struct card_node *card_info = NULL;
card_info = get_card_node_by_hwdev(handle);
if (!card_info)
return;
dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
if (!dbgtool_info) {
pr_err("Dbgtool info is null\n");
return;
}
intr = (struct ffm_intr_info *)buf_in;
if (!dbgtool_info->ffm)
return;
ffm_idx = dbgtool_info->ffm->ffm_num;
if (ffm_idx < FFM_RECORD_NUM_MAX) {
pr_info("%s: recv intr, ffm_idx: %d\n", __func__, ffm_idx);
dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id;
dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level;
dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type;
dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr =
intr->err_csr_addr;
dbgtool_info->ffm->ffm[ffm_idx].err_csr_value =
intr->err_csr_value;
/* Obtain the current UTC time */
do_gettimeofday(&txc.time);
/* Calculate the time in date value to tm */
rtc_time_to_tm((unsigned long)txc.time.tv_sec +
60 * 60 * 8, &rctm);
/* tm_year starts from 1900; 0->1900, 1->1901, and so on */
dbgtool_info->ffm->ffm[ffm_idx].year =
(u16)(rctm.tm_year + 1900);
/* tm_mon starts from 0, 0 indicates January, and so on */
dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1;
dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday;
dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour;
dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min;
dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec;
dbgtool_info->ffm->ffm_num++;
}
}
/*lint -restore*/
/*lint -save -e785 -e438*/
static const struct file_operations dbgtool_file_operations = {
.owner = THIS_MODULE,
.open = dbgtool_knl_open,
.release = dbgtool_knl_release,
.read = dbgtool_knl_read,
.write = dbgtool_knl_write,
.unlocked_ioctl = dbgtool_knl_unlocked_ioctl,
.mmap = hifc_mem_mmap,
};
static int dbgtool_create_cdev(void)
{
struct device *pdevice;
int ret = 0;
/* alloc device id */
ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL);
if (ret) {
pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret);
return ret;
}
cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations);
ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1);
if (ret) {
pr_err("Add dgbtool dev fail, ret=0x%x\n", ret);
goto cdev_add_fail;
}
/*lint -save -e160*/
dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL);
/*lint -restore*/
if (IS_ERR(dbgtool_d_class)) {
pr_err("Create dgbtool class fail\n");
ret = -EFAULT;
goto cls_create_fail;
}
/* Export device information to user space
* (/sys/class/class name/device name)
*/
pdevice = device_create(dbgtool_d_class, NULL,
dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL);
if (IS_ERR(pdevice)) {
pr_err("Create dgbtool device fail\n");
ret = -EFAULT;
goto dev_create_fail;
}
return 0;
dev_create_fail:
class_destroy(dbgtool_d_class);
cls_create_fail:
cdev_del(&(dbgtool_chr_dev));
cdev_add_fail:
unregister_chrdev_region(dbgtool_dev_id, 1);
return ret;
}
/**
* dbgtool_knl_init - dbgtool character device init
* @hwdev: the pointer to hardware device
* @chip_node: the pointer to card node
* Return: 0 - success, negative - failure
*/
int dbgtool_knl_init(void *vhwdev, void *chip_node)
{
struct card_node *chip_info = (struct card_node *)chip_node;
struct dbgtool_k_glb_info *dbgtool_info;
struct hifc_hwdev *hwdev = vhwdev;
int ret = 0;
int id;
if (hifc_func_type(hwdev) == TYPE_VF)
return 0;
ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
if (ret) {
pr_err("Failed to sysfs create file\n");
return ret;
}
chip_info->func_handle_array[hifc_global_func_id(hwdev)] = hwdev;
hifc_comm_recv_mgmt_self_cmd_reg(hwdev, HIFC_SELF_CMD_UP2PF_FFM,
ffm_intr_msg_record);
if (chip_info->dbgtool_info) {
chip_info->func_num++;
return 0;
}
dbgtool_info = (struct dbgtool_k_glb_info *)
kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL);
if (!dbgtool_info) {
pr_err("Failed to allocate dbgtool_info\n");
ret = -EFAULT;
goto dbgtool_info_fail;
}
chip_info->dbgtool_info = dbgtool_info;
/* FFM init */
dbgtool_info->ffm = (struct ffm_record_info *)
kzalloc(sizeof(struct ffm_record_info),
GFP_KERNEL);
if (!dbgtool_info->ffm) {
pr_err("Failed to allocate cell contexts for a chain\n");
ret = -EFAULT;
goto dbgtool_info_ffm_fail;
}
sema_init(&dbgtool_info->dbgtool_sem, 1);
ret = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id);
if (ret < 0) {
pr_err("Failed to get hifc id\n");
goto sscanf_chdev_fail;
}
g_card_node_array[id] = chip_info;
chip_info->func_num++;
if (g_dbgtool_init_flag) {
g_dbgtool_ref_cnt++;
/* already initialized */
return 0;
}
ret = dbgtool_create_cdev();
if (ret)
goto alloc_chdev_fail;
g_dbgtool_init_flag = 1;
g_dbgtool_ref_cnt = 1;
mutex_init(&g_addr_lock);
return 0;
alloc_chdev_fail:
g_card_node_array[id] = NULL;
sscanf_chdev_fail:
kfree(dbgtool_info->ffm);
dbgtool_info_ffm_fail:
kfree(dbgtool_info);
dbgtool_info = NULL;
chip_info->dbgtool_info = NULL;
dbgtool_info_fail:
hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM);
chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL;
sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
return ret;
}
/**
* dbgtool_knl_deinit - dbgtool character device deinit
* @hwdev: the pointer to hardware device
* @chip_node: the pointer to card node
*/
void dbgtool_knl_deinit(void *vhwdev, void *chip_node)
{
struct dbgtool_k_glb_info *dbgtool_info;
struct card_node *chip_info = (struct card_node *)chip_node;
int id;
int err;
struct hifc_hwdev *hwdev = vhwdev;
if (hifc_func_type(hwdev) == TYPE_VF)
return;
hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM);
chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL;
sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
&chip_info->dbgtool_attr_file);
chip_info->func_num--;
if (chip_info->func_num)
return;
err = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id);
if (err < 0)
pr_err("Failed to get hifc id\n");
g_card_node_array[id] = NULL;
dbgtool_info = chip_info->dbgtool_info;
/* FFM deinit */
kfree(dbgtool_info->ffm);
dbgtool_info->ffm = NULL;
kfree(dbgtool_info);
chip_info->dbgtool_info = NULL;
(void)dbgtool_knl_free_mem(id);
if (g_dbgtool_init_flag) {
if ((--g_dbgtool_ref_cnt))
return;
}
if (!dbgtool_d_class)
return;
device_destroy(dbgtool_d_class, dbgtool_dev_id);
class_destroy(dbgtool_d_class);
dbgtool_d_class = NULL;
cdev_del(&(dbgtool_chr_dev));
unregister_chrdev_region(dbgtool_dev_id, 1);
g_dbgtool_init_flag = 0;
}
/*lint -restore*/
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __DBGTOOL_KNL_H__
#define __DBGTOOL_KNL_H__
enum dbg_tool_cmd {
DBGTOOL_CMD_API_RD = 0,
DBGTOOL_CMD_API_WR,
DBGTOOL_CMD_FFM_RD,
DBGTOOL_CMD_FFM_CLR,
DBGTOOL_CMD_PF_DEV_INFO_GET,
DBGTOOL_CMD_MSG_2_UP,
DBGTOOL_CMD_FREE_MEM,
DBGTOOL_CMD_NUM
};
struct api_cmd_rd {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
void *ack;
u16 ack_size;
};
struct api_cmd_wr {
u32 pf_id;
u8 dest;
u8 *cmd;
u16 size;
};
struct pf_dev_info {
u64 bar0_size;
u8 bus;
u8 slot;
u8 func;
u64 phy_addr;
};
/* Interrupt at most records, interrupt will be recorded in the FFM */
#define FFM_RECORD_NUM_MAX 64
struct ffm_intr_tm_info {
u8 node_id;
/* error level of the interrupt source */
u8 err_level;
/* Classification by interrupt source properties */
u16 err_type;
u32 err_csr_addr;
u32 err_csr_value;
u8 sec; /* second*/
u8 min; /* minute */
u8 hour; /* hour */
u8 mday; /* day */
u8 mon; /* month */
u16 year; /* year */
};
struct ffm_record_info {
u32 ffm_num;
struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX];
};
struct msg_2_up {
u8 pf_id; /* which pf sends messages to the up */
u8 mod;
u8 cmd;
void *buf_in;
u16 in_size;
void *buf_out;
u16 *out_size;
};
struct dbgtool_param {
union {
struct api_cmd_rd api_rd;
struct api_cmd_wr api_wr;
struct pf_dev_info *dev_info;
struct ffm_record_info *ffm_rd;
struct msg_2_up msg2up;
} param;
char chip_name[16];
};
#ifndef MAX_CARD_NUM
#define MAX_CARD_NUM 64
#endif
#define DBGTOOL_PAGE_ORDER 10
int dbgtool_knl_init(void *vhwdev, void *chip_node);
void dbgtool_knl_deinit(void *vhwdev, void *chip_node);
int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma);
void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id,
void **g_func_handle_array);
long dbgtool_knl_free_mem(int id);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/rtc.h>
#include <linux/aer.h>
#include <linux/debugfs.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_lld.h"
#include "hifc_dbgtool_knl.h"
#include "hifc_tool.h"
#define HIFC_PCI_CFG_REG_BAR 0
#define HIFC_PCI_INTR_REG_BAR 2
#define HIFC_PCI_DB_BAR 4
#define HIFC_SECOND_BASE 1000
#define HIFC_SYNC_YEAR_OFFSET 1900
#define HIFC_SYNC_MONTH_OFFSET 1
#define HIFC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver"
#define HIFCVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver"
MODULE_AUTHOR("Huawei Technologies CO., Ltd");
MODULE_DESCRIPTION(HIFC_DRV_DESC);
MODULE_VERSION(HIFC_DRV_VERSION);
MODULE_LICENSE("GPL");
#define HIFC_EVENT_PROCESS_TIMEOUT 10000
#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0)
#define SET_BIT(num, n) ((num) | (1UL << (n)))
#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n))))
#define MAX_CARD_ID 64
static u64 card_bit_map;
LIST_HEAD(g_hinic_chip_list);
enum hifc_lld_status {
HIFC_NODE_CHANGE = BIT(0),
};
struct hifc_lld_lock {
/* lock for chip list */
struct mutex lld_mutex;
unsigned long status;
atomic_t dev_ref_cnt;
};
struct hifc_lld_lock g_lld_lock;
#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */
#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */
#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */
/* node in chip_node will changed, tools or driver can't get node
* during this situation
*/
static void lld_lock_chip_node(void)
{
u32 loop_cnt;
mutex_lock(&g_lld_lock.lld_mutex);
loop_cnt = 0;
while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) {
if (!test_and_set_bit(HIFC_NODE_CHANGE, &g_lld_lock.status))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait for lld node change complete for %us\n",
loop_cnt / 1000);
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED)
pr_warn("Wait for lld node change complete timeout when try to get lld lock\n");
loop_cnt = 0;
while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) {
if (!atomic_read(&g_lld_lock.dev_ref_cnt))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait for lld dev unused for %us, reference count: %d\n",
loop_cnt / 1000,
atomic_read(&g_lld_lock.dev_ref_cnt));
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY)
pr_warn("Wait for lld dev unused timeout\n");
mutex_unlock(&g_lld_lock.lld_mutex);
}
static void lld_unlock_chip_node(void)
{
clear_bit(HIFC_NODE_CHANGE, &g_lld_lock.status);
}
/* When tools or other drivers want to get node of chip_node, use this function
* to prevent node be freed
*/
void lld_dev_hold(void)
{
u32 loop_cnt = 0;
/* ensure there have not any chip node in changing */
mutex_lock(&g_lld_lock.lld_mutex);
while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) {
if (!test_bit(HIFC_NODE_CHANGE, &g_lld_lock.status))
break;
loop_cnt++;
if (loop_cnt % 10000 == 0)
pr_warn("Wait lld node change complete for %us\n",
loop_cnt / 1000);
usleep_range(900, 1000);
}
if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT)
pr_warn("Wait lld node change complete timeout when try to hode lld dev\n");
atomic_inc(&g_lld_lock.dev_ref_cnt);
mutex_unlock(&g_lld_lock.lld_mutex);
}
void lld_dev_put(void)
{
atomic_dec(&g_lld_lock.dev_ref_cnt);
}
static void hifc_lld_lock_init(void)
{
mutex_init(&g_lld_lock.lld_mutex);
atomic_set(&g_lld_lock.dev_ref_cnt, 0);
}
extern int hifc_probe(struct hifc_lld_dev *lld_dev,
void **uld_dev, char *uld_dev_name);
static int attach_uld(struct hifc_pcidev *dev)
{
void *uld_dev = NULL;
int err;
mutex_lock(&dev->pdev_mutex);
if (dev->init_state < HIFC_INIT_STATE_HWDEV_INITED) {
sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n");
err = -EFAULT;
goto out_unlock;
}
err = hifc_stateful_init(dev->hwdev);
if (err)
goto out_unlock;
err = hifc_probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name);
if (err || !uld_dev) {
sdk_err(&dev->pcidev->dev,
"Failed to add object for driver to pcie device\n");
goto probe_failed;
}
dev->uld_dev = uld_dev;
mutex_unlock(&dev->pdev_mutex);
sdk_info(&dev->pcidev->dev,
"Attach driver to pcie device succeed\n");
return 0;
probe_failed:
hifc_stateful_deinit(dev->hwdev);
out_unlock:
mutex_unlock(&dev->pdev_mutex);
return err;
}
extern void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev);
static void detach_uld(struct hifc_pcidev *dev)
{
u32 cnt = 0;
mutex_lock(&dev->pdev_mutex);
while (cnt < HIFC_EVENT_PROCESS_TIMEOUT) {
if (!test_and_set_bit(SERVICE_T_FC, &dev->state))
break;
usleep_range(900, 1000);
cnt++;
}
hifc_remove(&dev->lld_dev, dev->uld_dev);
dev->uld_dev = NULL;
hifc_stateful_deinit(dev->hwdev);
if (cnt < HIFC_EVENT_PROCESS_TIMEOUT)
clear_bit(SERVICE_T_FC, &dev->state);
sdk_info(&dev->pcidev->dev,
"Detach driver from pcie device succeed\n");
mutex_unlock(&dev->pdev_mutex);
}
static void hifc_sync_time_to_fmw(struct hifc_pcidev *pdev_pri)
{
struct timeval tv = {0};
struct rtc_time rt_time = {0};
u64 tv_msec;
int err;
do_gettimeofday(&tv);
tv_msec = tv.tv_sec * HIFC_SECOND_BASE +
tv.tv_usec / HIFC_SECOND_BASE;
err = hifc_sync_time(pdev_pri->hwdev, tv_msec);
if (err) {
sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n",
err);
} else {
rtc_time_to_tm(tv.tv_sec, &rt_time);
sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n",
rt_time.tm_year + HIFC_SYNC_YEAR_OFFSET,
rt_time.tm_mon + HIFC_SYNC_MONTH_OFFSET,
rt_time.tm_mday, rt_time.tm_hour,
rt_time.tm_min, rt_time.tm_sec);
}
}
#define MAX_VER_FIELD_LEN 4
#define MAX_VER_SPLIT_NUM 4
struct mctp_hdr {
u16 resp_code;
u16 reason_code;
u32 manufacture_id;
u8 cmd_rsvd;
u8 major_cmd;
u8 sub_cmd;
u8 spc_field;
};
struct mctp_bdf_info {
struct mctp_hdr hdr; /* spc_field: pf index */
u8 rsvd;
u8 bus;
u8 device;
u8 function;
};
static void __mctp_set_hdr(struct mctp_hdr *hdr,
struct hifc_mctp_host_info *mctp_info)
{
u32 manufacture_id = 0x07DB;
hdr->cmd_rsvd = 0;
hdr->major_cmd = mctp_info->major_cmd;
hdr->sub_cmd = mctp_info->sub_cmd;
hdr->manufacture_id = cpu_to_be32(manufacture_id);
hdr->resp_code = cpu_to_be16(hdr->resp_code);
hdr->reason_code = cpu_to_be16(hdr->reason_code);
}
static void __mctp_get_bdf(struct hifc_pcidev *pci_adapter,
struct hifc_mctp_host_info *mctp_info)
{
struct pci_dev *pdev = pci_adapter->pcidev;
struct mctp_bdf_info *bdf_info = mctp_info->data;
bdf_info->bus = pdev->bus->number;
bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */
bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */
memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr));
__mctp_set_hdr(&bdf_info->hdr, mctp_info);
bdf_info->hdr.spc_field =
(u8)hifc_global_func_id_hw(pci_adapter->hwdev);
mctp_info->data_len = sizeof(*bdf_info);
}
#define MCTP_PUBLIC_SUB_CMD_BDF 0x1
static void __mctp_get_host_info(struct hifc_pcidev *dev,
struct hifc_mctp_host_info *mctp_info)
{
#define COMMAND_UNSUPPORTED 3
struct mctp_hdr *hdr;
if (((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) ==
MCTP_PUBLIC_SUB_CMD_BDF) {
__mctp_get_bdf(dev, mctp_info);
} else {
hdr = mctp_info->data;
hdr->reason_code = COMMAND_UNSUPPORTED;
__mctp_set_hdr(hdr, mctp_info);
mctp_info->data_len = sizeof(*hdr);
}
}
void *hifc_get_ppf_hwdev_by_pdev(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter;
struct card_node *chip_node;
struct hifc_pcidev *dev;
if (!pdev)
return NULL;
pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter)
return NULL;
chip_node = pci_adapter->chip_node;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (dev->hwdev && hifc_func_type(dev->hwdev) == TYPE_PPF) {
lld_dev_put();
return dev->hwdev;
}
}
lld_dev_put();
return NULL;
}
void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev,
struct hifc_event_info *event);
void hifc_event_process(void *adapter, struct hifc_event_info *event)
{
struct hifc_pcidev *dev = adapter;
if (event->type == HIFC_EVENT_FMW_ACT_NTC)
return hifc_sync_time_to_fmw(dev);
else if (event->type == HIFC_EVENT_MCTP_GET_HOST_INFO)
return __mctp_get_host_info(dev, &event->mctp_info);
if (test_and_set_bit(SERVICE_T_FC, &dev->state)) {
sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler is in detach\n",
event->type);
return;
}
hifc_event(&dev->lld_dev, dev->uld_dev, event);
clear_bit(SERVICE_T_FC, &dev->state);
}
static int mapping_bar(struct pci_dev *pdev, struct hifc_pcidev *pci_adapter)
{
u64 dwqe_addr;
pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, HIFC_PCI_CFG_REG_BAR);
if (!pci_adapter->cfg_reg_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map configuration regs\n");
return -ENOMEM;
}
pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
HIFC_PCI_INTR_REG_BAR);
if (!pci_adapter->intr_reg_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map interrupt regs\n");
goto map_intr_bar_err;
}
pci_adapter->db_base_phy = pci_resource_start(pdev, HIFC_PCI_DB_BAR);
pci_adapter->db_base = ioremap(pci_adapter->db_base_phy,
HIFC_DB_DWQE_SIZE);
if (!pci_adapter->db_base) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to map doorbell regs\n");
goto map_db_err;
}
dwqe_addr = pci_adapter->db_base_phy + HIFC_DB_DWQE_SIZE;
#if defined(__aarch64__)
/* arm do not support call ioremap_wc() */
pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, HIFC_DB_DWQE_SIZE,
__pgprot(PROT_DEVICE_nGnRnE));
#else
pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr,
HIFC_DB_DWQE_SIZE);
#endif /* end of "defined(__aarch64__)" */
if (!pci_adapter->dwqe_mapping) {
sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n");
goto mapping_dwqe_err;
}
return 0;
mapping_dwqe_err:
iounmap(pci_adapter->db_base);
map_db_err:
iounmap(pci_adapter->intr_reg_base);
map_intr_bar_err:
iounmap(pci_adapter->cfg_reg_base);
return -ENOMEM;
}
static void unmapping_bar(struct hifc_pcidev *pci_adapter)
{
#if defined(__aarch64__)
iounmap(pci_adapter->dwqe_mapping);
#else
io_mapping_free(pci_adapter->dwqe_mapping);
#endif /* end of "defined(__aarch64__)" */
iounmap(pci_adapter->db_base);
iounmap(pci_adapter->intr_reg_base);
iounmap(pci_adapter->cfg_reg_base);
}
static int alloc_chip_node(struct hifc_pcidev *pci_adapter)
{
struct card_node *chip_node;
unsigned char i;
unsigned char parent_bus_number = 0;
if (!pci_is_root_bus(pci_adapter->pcidev->bus))
parent_bus_number = pci_adapter->pcidev->bus->parent->number;
if (parent_bus_number != 0) {
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
if (chip_node->dp_bus_num == parent_bus_number) {
pci_adapter->chip_node = chip_node;
return 0;
}
}
}
for (i = 0; i < MAX_CARD_ID; i++) {
if (!FIND_BIT(card_bit_map, i)) {
card_bit_map = (u64)SET_BIT(card_bit_map, i);
break;
}
}
if (i == MAX_CARD_ID) {
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc card id\n");
return -EFAULT;
}
chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL);
if (!chip_node) {
card_bit_map = CLEAR_BIT(card_bit_map, i);
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc chip node\n");
return -ENOMEM;
}
chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL);
if (!(chip_node->dbgtool_attr_file.name)) {
kfree(chip_node);
card_bit_map = CLEAR_BIT(card_bit_map, i);
sdk_err(&pci_adapter->pcidev->dev,
"Failed to alloc dbgtool attr file name\n");
return -ENOMEM;
}
/* parent bus number */
chip_node->dp_bus_num = parent_bus_number;
snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i);
snprintf((char *)chip_node->dbgtool_attr_file.name,
IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i);
sdk_info(&pci_adapter->pcidev->dev,
"Add new chip %s to global list succeed\n",
chip_node->chip_name);
list_add_tail(&chip_node->node, &g_hinic_chip_list);
INIT_LIST_HEAD(&chip_node->func_list);
pci_adapter->chip_node = chip_node;
mutex_init(&chip_node->sfp_mutex);
return 0;
}
static void free_chip_node(struct hifc_pcidev *pci_adapter)
{
struct card_node *chip_node = pci_adapter->chip_node;
u32 id;
int err;
if (list_empty(&chip_node->func_list)) {
list_del(&chip_node->node);
sdk_info(&pci_adapter->pcidev->dev,
"Delete chip %s from global list succeed\n",
chip_node->chip_name);
err = sscanf(chip_node->chip_name, HIFC_CHIP_NAME "%u", &id);
if (err < 0)
sdk_err(&pci_adapter->pcidev->dev, "Failed to get hifc id\n");
card_bit_map = CLEAR_BIT(card_bit_map, id);
kfree(chip_node->dbgtool_attr_file.name);
kfree(chip_node);
}
}
static int config_pci_dma_mask(struct pci_dev *pdev)
{
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
sdk_err(&pdev->dev, "Failed to set DMA mask\n");
return err;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
sdk_warn(&pdev->dev,
"Couldn't set 64-bit coherent DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
sdk_err(&pdev->dev,
"Failed to set coherent DMA mask\n");
return err;
}
}
return 0;
}
static int hifc_pci_init(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = NULL;
int err;
pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
if (!pci_adapter) {
sdk_err(&pdev->dev,
"Failed to alloc pci device adapter\n");
return -ENOMEM;
}
pci_adapter->pcidev = pdev;
mutex_init(&pci_adapter->pdev_mutex);
pci_set_drvdata(pdev, pci_adapter);
err = pci_enable_device(pdev);
if (err) {
sdk_err(&pdev->dev, "Failed to enable PCI device\n");
goto pci_enable_err;
}
err = pci_request_regions(pdev, HIFC_DRV_NAME);
if (err) {
sdk_err(&pdev->dev, "Failed to request regions\n");
goto pci_regions_err;
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
err = config_pci_dma_mask(pdev);
if (err)
goto dma_mask_err;
return 0;
dma_mask_err:
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_regions_err:
pci_disable_device(pdev);
pci_enable_err:
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
return err;
}
static void hifc_pci_deinit(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
kfree(pci_adapter);
}
static int hifc_func_init(struct pci_dev *pdev,
struct hifc_pcidev *pci_adapter)
{
struct hifc_init_para init_para;
int err;
init_para.adapter_hdl = pci_adapter;
init_para.pcidev_hdl = pdev;
init_para.dev_hdl = &pdev->dev;
init_para.cfg_reg_base = pci_adapter->cfg_reg_base;
init_para.intr_reg_base = pci_adapter->intr_reg_base;
init_para.db_base = pci_adapter->db_base;
init_para.db_base_phy = pci_adapter->db_base_phy;
init_para.dwqe_mapping = pci_adapter->dwqe_mapping;
init_para.hwdev = &pci_adapter->hwdev;
init_para.chip_node = pci_adapter->chip_node;
init_para.ppf_hwdev = hifc_get_ppf_hwdev_by_pdev(pdev);
err = hifc_init_hwdev(&init_para);
if (err) {
pci_adapter->hwdev = NULL;
sdk_err(&pdev->dev, "Failed to initialize hardware device\n");
return -EFAULT;
}
pci_adapter->init_state = HIFC_INIT_STATE_HWDEV_INITED;
pci_adapter->lld_dev.pdev = pdev;
pci_adapter->lld_dev.hwdev = pci_adapter->hwdev;
hifc_event_register(pci_adapter->hwdev, pci_adapter,
hifc_event_process);
hifc_sync_time_to_fmw(pci_adapter);
lld_lock_chip_node();
err = dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node);
if (err) {
lld_unlock_chip_node();
sdk_err(&pdev->dev, "Failed to initialize dbgtool\n");
hifc_event_unregister(pci_adapter->hwdev);
return err;
}
lld_unlock_chip_node();
pci_adapter->init_state = HIFC_INIT_STATE_DBGTOOL_INITED;
attach_uld(pci_adapter);
sdk_info(&pdev->dev, "Pcie device probed\n");
pci_adapter->init_state = HIFC_INIT_STATE_ALL_INITED;
return 0;
}
static void hifc_func_deinit(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev);
/* When function deinit, disable mgmt initiative report events firstly,
* then flush mgmt work-queue.
*/
if (pci_adapter->init_state >= HIFC_INIT_STATE_ALL_INITED)
detach_uld(pci_adapter);
hifc_disable_mgmt_msg_report(pci_adapter->hwdev);
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_PART_INITED)
hifc_flush_mgmt_workq(pci_adapter->hwdev);
hifc_set_func_deinit_flag(pci_adapter->hwdev);
if (pci_adapter->init_state >= HIFC_INIT_STATE_DBGTOOL_INITED) {
lld_lock_chip_node();
dbgtool_knl_deinit(pci_adapter->hwdev, pci_adapter->chip_node);
lld_unlock_chip_node();
hifc_event_unregister(pci_adapter->hwdev);
}
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) {
/*Remove the current node from node-list first,
* then it's safe to free hwdev
*/
lld_lock_chip_node();
list_del(&pci_adapter->node);
lld_unlock_chip_node();
hifc_free_hwdev(pci_adapter->hwdev);
}
}
static void remove_func(struct hifc_pcidev *pci_adapter)
{
struct pci_dev *pdev = pci_adapter->pcidev;
switch (pci_adapter->init_state) {
case HIFC_INIT_STATE_ALL_INITED:
/*lint -fallthrough*/
case HIFC_INIT_STATE_DBGTOOL_INITED:
case HIFC_INIT_STATE_HWDEV_INITED:
case HIFC_INIT_STATE_HW_PART_INITED:
case HIFC_INIT_STATE_HW_IF_INITED:
case HIFC_INIT_STATE_PCI_INITED:
set_bit(HIFC_FUNC_IN_REMOVE, &pci_adapter->flag);
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED)
hifc_func_deinit(pdev);
lld_lock_chip_node();
if (pci_adapter->init_state < HIFC_INIT_STATE_HW_IF_INITED)
list_del(&pci_adapter->node);
nictool_k_uninit();
free_chip_node(pci_adapter);
lld_unlock_chip_node();
unmapping_bar(pci_adapter);
hifc_pci_deinit(pdev);
/*lint -fallthrough*/
break;
default:
break;
}
}
static void hifc_hwdev_remove(struct pci_dev *pdev)
{
struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev);
if (!pci_adapter)
return;
sdk_info(&pdev->dev, "Pcie device remove begin\n");
if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED)
hifc_detect_hw_present(pci_adapter->hwdev);
remove_func(pci_adapter);
sdk_info(&pdev->dev, "Pcie device removed\n");
}
static int hifc_hwdev_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct hifc_pcidev *pci_adapter;
int err;
sdk_info(&pdev->dev, "Pcie device probe begin\n");
err = hifc_pci_init(pdev);
if (err)
return err;
pci_adapter = pci_get_drvdata(pdev);
clear_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag);
clear_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag);
err = mapping_bar(pdev, pci_adapter);
if (err) {
sdk_err(&pdev->dev, "Failed to map bar\n");
goto map_bar_failed;
}
pci_adapter->id = *id;
/* if chip information of pcie function exist,
* add the function into chip
*/
lld_lock_chip_node();
err = alloc_chip_node(pci_adapter);
if (err) {
sdk_err(&pdev->dev,
"Failed to add new chip node to global list\n");
goto alloc_chip_node_fail;
}
err = nictool_k_init();
if (err) {
sdk_warn(&pdev->dev, "Failed to init nictool");
goto init_nictool_err;
}
list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list);
lld_unlock_chip_node();
pci_adapter->init_state = HIFC_INIT_STATE_PCI_INITED;
err = hifc_func_init(pdev, pci_adapter);
if (err)
goto func_init_err;
return 0;
func_init_err:
if (!test_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag))
set_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag);
return 0;
init_nictool_err:
free_chip_node(pci_adapter);
alloc_chip_node_fail:
lld_unlock_chip_node();
unmapping_bar(pci_adapter);
map_bar_failed:
hifc_pci_deinit(pdev);
sdk_err(&pdev->dev, "Pcie device probe failed\n");
return err;
}
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define HIFC_DEV_ID_1822_8G 0x0212
#define HIFC_DEV_ID_1822_16G 0x0203
#define HIFC_DEV_ID_1822_32G 0x0202
/*lint -save -e133 -e10*/
static const struct pci_device_id hifc_pci_table[] = {
{PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_8G), 0},
{PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_16G), 0},
{PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_32G), 0},
{0, 0}
};
/*lint -restore*/
MODULE_DEVICE_TABLE(pci, hifc_pci_table);
static void hifc_shutdown(struct pci_dev *pdev)
{
sdk_err(&pdev->dev, "Shutdown device\n");
pci_disable_device(pdev);
}
static struct pci_driver hifc_driver = {
.name = HIFC_DRV_NAME,
.id_table = hifc_pci_table,
.probe = hifc_hwdev_probe,
.remove = hifc_hwdev_remove,
.shutdown = hifc_shutdown,
};
extern int hifc_init_module(void);
extern void hifc_exit_module(void);
static int __init hifc_lld_init(void)
{
pr_info("%s - version %s\n", HIFC_DRV_DESC, HIFC_DRV_VERSION);
hifc_lld_lock_init();
hifc_init_module();
return pci_register_driver(&hifc_driver);
}
static void __exit hifc_lld_exit(void)
{
pci_unregister_driver(&hifc_driver);
hifc_exit_module();
}
module_init(hifc_lld_init);
module_exit(hifc_lld_exit);
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_LLD_H_
#define HIFC_LLD_H_
#include "unf_common.h"
#define HIFC_PCI_VENDOR_ID (0x19e5)
#define HIFC_DRV_NAME "hifc_sdk"
#define HIFC_CHIP_NAME "hifc"
#define HIFC_DRV_VERSION UNF_FC_VERSION
struct hifc_lld_dev {
struct pci_dev *pdev;
void *hwdev;
};
extern struct list_head g_hinic_chip_list;
/* Structure pcidev private*/
struct hifc_pcidev {
struct pci_dev *pcidev;
void *hwdev;
struct card_node *chip_node;
struct hifc_lld_dev lld_dev;
/* Record the service object address,
* such as hifc_dev and toe_dev, fc_dev
*/
void *uld_dev;
/* Record the service object name */
char uld_dev_name[IFNAMSIZ];
/* It is a the global variable for driver to manage
* all function device linked list
*/
struct list_head node;
void __iomem *cfg_reg_base;
void __iomem *intr_reg_base;
u64 db_base_phy;
void __iomem *db_base;
#if defined(__aarch64__)
void __iomem *dwqe_mapping;
#else
struct io_mapping *dwqe_mapping;
#endif
/* lock for attach/detach uld */
struct mutex pdev_mutex;
u32 init_state;
/* setted when uld driver processing event */
unsigned long state;
struct pci_device_id id;
unsigned long flag;
};
enum {
HIFC_FUNC_IN_REMOVE = BIT(0),
HIFC_FUNC_PRB_ERR = BIT(1),
HIFC_FUNC_PRB_DELAY = BIT(2),
};
enum hifc_init_state {
HIFC_INIT_STATE_NONE,
HIFC_INIT_STATE_PCI_INITED,
HIFC_INIT_STATE_HW_IF_INITED,
HIFC_INIT_STATE_HW_PART_INITED,
HIFC_INIT_STATE_HWDEV_INITED,
HIFC_INIT_STATE_DBGTOOL_INITED,
HIFC_INIT_STATE_ALL_INITED,
};
void lld_dev_put(void);
void lld_dev_hold(void);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "hifc_module.h"
#include "hifc_utils.h"
#include "hifc_hba.h"
#include "hifc_chipitf.h"
#include "hifc_portmng.h"
struct hifc_port_diag_op_s hifc_diag_op[] = {
{ UNF_PORT_DIAG_PORT_DETAIL, hifc_show_fc_port_detail },
{ UNF_PORT_DIAG_RD_WR_REG, hifc_rw_reg },
{ UNF_PORT_DIAG_BUTT, NULL }
};
char *wqe_type[HIFC_MAX_COUNTER_TYPE] = {
"TASK_TYPE_EMPTY",
"HIFC_SEND_IWRITE",
"HIFC_SEND_IREAD",
"HIFC_RECV_IRESP",
/* obsoleted */
"HIFC_RECV_TCMND",
/* FCP Read IO Control Command. */
"HIFC_SEND_TREAD",
/* FCP Write IO Control Command (XFER_RDY). */
"HIFC_SEND_TWRITE",
/* Target Mode send FCP_RESP of Read/Write */
"HIFC_SEND_TRESP",
/* Status for FCP_TREAD/FCP_TWRITE/FCP_TRESP */
"HIFC_RECV_TSTS",
"HIFC_SEND_ABTS",
"HIFC_SEND_IELS",
"HIFC_SEND_ITMF",
"HIFC_SEND_CLEAN_UP",
"HIFC_SEND_CLEAN_UP_ALL",
/* Receive unsolicited data */
"HIFC_RECV_UNSOLICITED",
"HIFC_RECV_ERR_WARN",
"HIFC_RECV_SESS_EN",
"HIFC_SEND_SESS_DIS",
"HIFC_SEND_SESS_DEL",
"HIFC_SEND_CQE_AVAILABLE",
/* Receive FCP_CMND From Remote Port and Transfer to driver. 20 */
"HIFC_RECV_TCMND",
/* Receive ELS From Remote Port and Transfer to driver. */
"HIFC_RECV_ELS_CMD",
/* Receive ELS From Remote Port and Transfer to driver. */
"HIFC_RECV_ABTS_CMD",
/* Receive immidiate data. */
"HIFC_RECV_IMMIDIATE",
/*
* ESL response. PLOGI_ACC, PRLI_ACC will carry the parent context
* parameter indication.
*/
"HIFC_SEND_ELS_RSP",
/* Status for ELS. */
"HIFC_RECV_ELS_RSP_STS",
/* ABTS response with abort flag. */
"HIFC_SEND_ABTS_RSP",
/* Status for ABTS. */
"HIFC_RECV_ABTS_RSP_STS",
/* Abort Command */
"HIFC_SEND_ABORT",
/* Status for ABORT. */
"HIFC_RECV_ABORT_STS",
"HIFC_SEND_ELS",
"HIFC_RECV_ELS_RSP",
/* GS request Command */
"HIFC_SEND_GS",
/* GS response. */
"HIFC_RECV_GS_RSP",
/* Status for offload req. */
"HIFC_RECV_SESS_EN_STS",
/* Status for session disable. */
"HIFC_RECV_SESS_DIS_STS",
/* Status for session delete. */
"HIFC_RECV_SESS_DEL_STS",
/* Status for ABORT. */
"HIFC_RECV_ABTS_RSP",
/* Buffer Clear */
"HIFC_SEND_BUFFER_CLEAR",
/* Status for Buffer Clear */
"HIFC_RECV_BUFFER_CLEAR_STS",
/* Flush Sq 40 */
"HIFC_SEND_FLUSH_SQ",
/* Status for FLUSH_SQ */
"HIFC_RECV_FLUSH_SQ_STS",
/* Reset session SQE type */
"HIFC_SEND_SESS_RESET",
/* Reset session SCQE type */
"HIFC_RECV_SESS_RESET_STS",
"HIFC_RECV_CQE_AVAILABLE_STS",
"HIFC_SEND_DUMP_EXCH",
"HIFC_SEND_INIT_SRQC",
"HIFC_SEND_CLEAR_SRQ",
"HIFC_RECV_CLEAR_SRQ_STS",
"HIFC_SEND_INIT_SCQC",
"HIFC_SEND_DEL_SCQC",
"HIFC_SEND_TMF_RESP",
"HIFC_SEND_DEL_SRQC",
"HIFC_RECV_IMMI_CONTINUE",
"HIFC_RECV_ITMF_RESP",
"HIFC_RECV_MARKER_STS",
"HIFC_SEND_TACK",
"HIFC_SEND_AEQERR",
"HIFC_RECV_ABTS_MARKER_STS"
};
char *scq_err_type[HIFC_MAX_COUNTER_TYPE] = {
"HIFC_CQE_COMPLETED",
"HIFC_SESS_HT_INSERT_FAIL",
"HIFC_SESS_HT_INSERT_DUPLICATE",
"HIFC_SESS_HT_BIT_SET_FAIL",
"HIFC_SESS_HT_DELETE_FAIL",
"HIFC_CQE_BUFFER_CLEAR_IO_COMPLETED",
"HIFC_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED",
"HIFC_CQE_SESSION_RST_CLEAR_IO_COMPLETED",
"HIFC_CQE_TMF_RSP_IO_COMPLETED",
"HIFC_CQE_TMF_IO_COMPLETED",
"HIFC_CQE_DRV_ABORT_IO_COMPLETED",
"HIFC_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED",
"HIFC_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED",
"HIFC_CQE_WQE_FLUSH_IO_COMPLETED",
"HIFC_ERROR_CODE_DATA_DIFX_FAILED",
"HIFC_ERROR_CODE_DATA_TASK_TYPE_INCORRECT",
"HIFC_ERROR_CODE_DATA_OOO_RO",
"HIFC_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS",
"HIFC_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD",
"HIFC_ERROR_CODE_FCP_CONF_NOT_SUPPORTED",
"HIFC_ERROR_CODE_FCP_RSP_OPENED_SEQ",
"HIFC_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE",
"HIFC_ERROR_CODE_XFER_PEND_XFER_SET",
"HIFC_ERROR_CODE_XFER_OOO_RO",
"HIFC_ERROR_CODE_XFER_NULL_BURST_LEN",
"HIFC_ERROR_CODE_REC_TIMER_EXPIRE",
"HIFC_ERROR_CODE_E_D_TIMER_EXPIRE",
"HIFC_ERROR_CODE_ABORT_TIMER_EXPIRE",
"HIFC_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH",
"HIFC_IMMI_CMDPKT_SETUP_FAIL",
"HIFC_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL",
"HIFC_ELS_GS_RSP_EXCH_CHECK_FAIL",
"HIFC_CQE_ELS_GS_SRQE_GET_FAIL",
"HIFC_CQE_DATA_DMA_REQ_FAIL",
"HIFC_CQE_SESSION_CLOSED",
"HIFC_SCQ_IS_FULL",
"HIFC_SRQ_IS_FULL",
"HIFC_DUCHILDCTX_SETUP_FAIL",
"HIFC_ERROR_INVALID_TXMFS",
"HIFC_OFFLOAD_LACKOF_SCQE_FAIL",
"HIFC_INVALID_TASK_ID",
"HIFC_INVALID_PKT_LEN",
"HIFC_CQE_ELS_GS_REQ_CLR_IO_COMPLETED",
"HIFC_CQE_ELS_RSP_CLR_IO_COMPLETED",
"HIFC_CODE_RESID_UNDER_ERR"
};
char *com_up_err_event_type[HIFC_MAX_COUNTER_TYPE] = {
"HIFC_EVENT_HEART_LOST",
};
char *aeq_err_type[HIFC_MAX_COUNTER_TYPE] = {
/* que_err_code */
"HIFC_SCQ_IS_FULL_ERR",
"HIFC_SRQ_IS_FULL_ERR",
/* wqe_fatal_err */
"HIFC_SQE_CHILD_SETUP_WQE_MSN_ERR",
"HIFC_SQE_CHILD_SETUP_WQE_GPA_ERR",
"HIFC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1",
"HIFC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2",
"HIFC_CLEAEQ_WQE_ERR",
"HIFC_WQEFETCH_WQE_MSN_ERR",
"HIFC_WQEFETCH_QUINFO_ERR",
/* ctx_fatal_err */
"HIFC_SCQE_ERR_BIT_ERR",
"HIFC_UPDMA_ADDR_REQ_SRQ_ERR",
"HIFC_SOLICHILDDMA_ADDR_REQ_ERR",
"HIFC_UNSOLICHILDDMA_ADDR_REQ_ERR",
"HIFC_SQE_CHILD_SETUP_QINFO_ERR_1",
"HIFC_SQE_CHILD_SETUP_QINFO_ERR_2",
"HIFC_CMDPKT_CHILD_SETUP_QINFO_ERR_1",
"HIFC_CMDPKT_CHILD_SETUP_QINFO_ERR_2",
"HIFC_CMDPKT_CHILD_SETUP_PMSN_ERR",
"HIFC_CLEAEQ_CTX_ERR",
"HIFC_WQEFETCH_CTX_ERR",
"HIFC_FLUSH_QPC_ERR_LQP",
"HIFC_FLUSH_QPC_ERR_SMF",
"HIFC_PREFETCH_QPC_ERR_1",
"HIFC_PREFETCH_QPC_ERR_2",
"HIFC_PREFETCH_QPC_ERR_3",
"HIFC_PREFETCH_QPC_ERR_4",
"HIFC_PREFETCH_QPC_ERR_5",
"HIFC_PREFETCH_QPC_ERR_6",
"HIFC_PREFETCH_QPC_ERR_7",
"HIFC_PREFETCH_QPC_ERR_8",
"HIFC_PREFETCH_QPC_ERR_9",
"HIFC_PREFETCH_QPC_ERR_10",
"HIFC_PREFETCH_QPC_ERR_11",
"HIFC_PREFETCH_QPC_ERR_DEFAULT",
"HIFC_CHILDHASH_INSERT_SW_ERR",
"HIFC_CHILDHASH_LOOKUP_SW_ERR",
"HIFC_CHILDHASH_DEL_SW_ERR",
"HIFC_FLOWHASH_INSERT_SW_ERR",
"HIFC_FLOWHASH_LOOKUP_SW_ERR",
"HIFC_FLOWHASH_DEL_SW_ERR",
};
char *err_event_type[HIFC_MAX_COUNTER_TYPE] = {
/* ERR type 0 Err value */
"HIFC_DRV_2_UP_PARA_ERR",
/* ERR type 1 Err value */
"HIFC_SFP_SPEED_ERR",
/* ERR type 2 Err value */
"HIFC_32GPUB_UA_RXESCH_FIFO_OF",
"HIFC_32GPUB_UA_RXESCH_FIFO_UCERR",
/* ERR type 3 Err value */
"HIFC_32G_UA_UATX_LEN_ABN",
"HIFC_32G_UA_RXAFIFO_OF",
"HIFC_32G_UA_TXAFIFO_OF",
"HIFC_32G_UA_RXAFIFO_UCERR",
"HIFC_32G_UA_TXAFIFO_UCERR",
/* ERR type 4 Err value */
"HIFC_32G_MAC_RX_BBC_FATAL",
"HIFC_32G_MAC_TX_BBC_FATAL",
"HIFC_32G_MAC_TXFIFO_UF",
"HIFC_32G_MAC_PCS_TXFIFO_UF",
"HIFC_32G_MAC_RXBBC_CRDT_TO",
"HIFC_32G_MAC_PCS_RXAFIFO_OF",
"HIFC_32G_MAC_PCS_TXFIFO_OF",
"HIFC_32G_MAC_FC2P_RXFIFO_OF",
"HIFC_32G_MAC_FC2P_TXFIFO_OF",
"HIFC_32G_MAC_FC2P_CAFIFO_OF",
"HIFC_32G_MAC_PCS_RXRSFECM_UCEER",
"HIFC_32G_MAC_PCS_RXAFIFO_UCEER",
"HIFC_32G_MAC_PCS_TXFIFO_UCEER",
"HIFC_32G_MAC_FC2P_RXFIFO_UCEER",
"HIFC_32G_MAC_FC2P_TXFIFO_UCEER",
/* ERR type 5 Err value */
"HIFC_NON32G_DFX_FC1_DFX_BF_FIFO",
"HIFC_NON32G_DFX_FC1_DFX_BP_FIFO",
"HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR",
"HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR",
"HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1",
"HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO",
"HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO",
"HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO",
"HIFC_NON32G_DFX_FC1_ERR_R_RDY",
/* ERR type 6 Err value */
"HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR",
};
unsigned int hifc_set_port_state(void *v_hba, void *v_para_in)
{
unsigned int ret = UNF_RETURN_ERROR;
enum unf_port_config_state_e port_state = UNF_PORT_CONFIG_STATE_START;
HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR);
port_state = *((enum unf_port_config_state_e *)v_para_in);
switch (port_state) {
case UNF_PORT_CONFIG_STATE_RESET:
ret = (unsigned int)hifc_port_reset(v_hba);
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Cannot set port_state(0x%x)", port_state);
break;
}
return ret;
}
unsigned int hifc_set_port_speed(void *v_hba, void *v_para_in)
{
unsigned long flags = 0;
unsigned int port_speed = 0;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR);
port_speed = *((unsigned int *)v_para_in);
if (port_speed > hba->max_support_speed) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Speed set(0x%x) exceed max speed(0x%x)",
port_speed,
hba->max_support_speed);
return UNF_RETURN_ERROR;
}
if ((port_speed >= HIFC_SPEED_16G) &&
(hba->port_topo_cfg == UNF_TOP_LOOP_MASK)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Cannot set speed(0x%x) in LOOP mode, check it",
port_speed);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&hba->hba_lock, flags);
hba->port_speed_cfg = port_speed;
spin_unlock_irqrestore(&hba->hba_lock, flags);
if (hifc_port_reset(hba) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't reset HBA",
hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_INFO,
"[info]HIFC port(0x%x) set port speed finished, configured speed: 0x%x",
hba->port_cfg.port_id, port_speed);
return RETURN_OK;
}
unsigned int hifc_set_max_support_speed(void *v_hba, void *para_in)
{
unsigned long flags = 0;
unsigned char max_support_speed = 0;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR);
max_support_speed = *((unsigned char *)para_in);
spin_lock_irqsave(&hba->hba_lock, flags);
hba->max_support_speed = max_support_speed;
spin_unlock_irqrestore(&hba->hba_lock, flags);
if (hifc_port_reset(hba) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't reset HBA",
hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]HIFC set port(0x%x) speed finished, configured max support speed: 0x%x",
hba->port_cfg.port_id, max_support_speed);
return RETURN_OK;
}
unsigned int hifc_set_loop_role(void *v_hba, void *para_in)
{
unsigned long flags = 0;
unsigned int loop_role = 0;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR);
loop_role = *((unsigned int *)para_in);
spin_lock_irqsave(&hba->hba_lock, flags);
hba->port_loop_role = loop_role;
spin_unlock_irqrestore(&hba->hba_lock, flags);
if (hifc_port_reset(hba) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't reset HBA",
hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_INFO,
"[info]HIFC port(0x%x) set loop role finished, configured loop role: 0x%x",
hba->port_cfg.port_id, loop_role);
return RETURN_OK;
}
unsigned int hifc_set_port_topo(void *v_hba, void *v_para_in)
{
unsigned long flags = 0;
unsigned int top = 0;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR);
top = *((unsigned int *)v_para_in);
if ((top == UNF_TOP_LOOP_MASK) &&
(hba->port_speed_cfg >= HIFC_SPEED_16G)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Cannot set to loop mode at speed(0x%x), check it",
hba->port_speed_cfg);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&hba->hba_lock, flags);
hba->port_topo_cfg = top;
spin_unlock_irqrestore(&hba->hba_lock, flags);
if (hifc_port_reset(hba) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't reset HBA",
hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]HIFC port(0x%x) set port topology finished, configured topology: 0x%x",
hba->port_cfg.port_id, top);
return RETURN_OK;
}
unsigned int hifc_set_port_fcp_conf(void *v_hba, void *para_in)
{
unsigned long flags = 0;
unsigned int fcp_conf = 0;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, para_in, return UNF_RETURN_ERROR);
fcp_conf = *((unsigned int *)para_in);
spin_lock_irqsave(&hba->hba_lock, flags);
hba->fcp_conf_cfg = fcp_conf;
spin_unlock_irqrestore(&hba->hba_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]HIFC set port(0x%x) FCP confirm finished, configured value: 0x%x",
hba->port_cfg.port_id, fcp_conf);
return RETURN_OK;
}
unsigned int hifc_set_port_bbscn(void *v_hba, void *para_in)
{
unsigned long flags = 0;
unsigned int bbscn = 0;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR);
bbscn = *((unsigned int *)para_in);
spin_lock_irqsave(&hba->hba_lock, flags);
hba->port_bbscn_cfg = bbscn;
spin_unlock_irqrestore(&hba->hba_lock, flags);
if (hifc_port_reset(hba) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't reset HBA",
hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]HIFC set port(0x%x) BBSCN finished, configured value: 0x%x",
hba->port_cfg.port_id, bbscn);
return RETURN_OK;
}
unsigned int hifc_show_fc_port_detail(void *v_hba, void *v_para)
{
struct hifc_fw_ver_detail_s version;
void *ver_buf = NULL;
struct unf_fw_version_s *fw_version = (struct unf_fw_version_s *)v_para;
memset(&version, 0, sizeof(struct hifc_fw_ver_detail_s));
ver_buf = (void *)(&version);
/* Obtain UP, ucode and boot version */
if (hifc_get_software_version(v_hba, ver_buf) != RETURN_OK)
return UNF_RETURN_ERROR;
if (fw_version->message_type == UNF_DEBUG_TYPE_MESSAGE)
memcpy(fw_version->fw_version, version.up_ver, HIFC_VER_LEN);
return RETURN_OK;
}
unsigned int hifc_port_diagnose(void *v_hba,
enum unf_port_diag_op_e op_code,
void *v_para)
{
unsigned int op_idx = 0;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]port diagnose succeed, opcode(0x%x), operation ID(0x%x)",
op_code, op_idx);
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_para, return UNF_RETURN_ERROR);
for (op_idx = 0; op_idx < sizeof(hifc_diag_op) /
sizeof(struct hifc_port_diag_op_s);
op_idx++) {
if (op_code == hifc_diag_op[op_idx].op_code) {
if (!hifc_diag_op[op_idx].pfn_hifc_operation) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"[err]Null operation for diagnose, opcode(0x%x), operation ID(0x%x)",
op_code, op_idx);
return UNF_RETURN_ERROR;
} else {
return hifc_diag_op[op_idx].pfn_hifc_operation(v_hba, v_para);
}
}
}
return RETURN_OK;
}
int hifc_dfx_get_rxtx_state(void *v_hba, void *v_buff_out)
{
int ret = RETURN_OK;
unsigned long long *counter_info = NULL;
unsigned int probe_index = 0;
unsigned int index = 0;
unsigned int total = 0;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out;
counter_info =
vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
if (!counter_info) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]malloc memory failed");
return UNF_RETURN_ERROR;
}
memset(counter_info, 0,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
probe_index = hba->probe_index;
total = sizeof(wqe_type) / sizeof(char *);
for (index = 0; index < total; index++) {
if (wqe_type[index])
counter_info[index] = HIFC_IO_STAT_READ(probe_index,
index);
}
memcpy(buff_out->unresult.result, counter_info,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
vfree(counter_info);
return ret;
}
int hifc_dfx_get_rxtx_error_state(void *v_hba, void *v_buff_out)
{
char *hba_err_type[HIFC_HBA_STAT_BUTT];
int ret = RETURN_OK;
unsigned long long *counter_info = NULL;
unsigned int probe_index = 0;
unsigned int index = 0;
unsigned int counter = 0;
unsigned int total = 0;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out;
counter_info =
vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
if (!counter_info) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]malloc memory failed");
return UNF_RETURN_ERROR;
}
memset(counter_info, 0,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
probe_index = hba->probe_index;
total = sizeof(wqe_type) / sizeof(char *);
for (index = 0; index < total; index++) {
if (wqe_type[index]) {
counter_info[counter] =
HIFC_ERR_IO_STAT_READ(probe_index, index);
counter++;
}
}
total = sizeof(hba_err_type) / sizeof(char *);
for (index = 0; index < total; index++) {
counter_info[counter] = hba_stat[probe_index][index];
counter++;
}
memcpy(buff_out->unresult.result, counter_info,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
vfree(counter_info);
return ret;
}
int hifc_dfx_get_error_state(void *v_hba, void *v_buff_out)
{
int ret = RETURN_OK;
unsigned long long *counter_info = NULL;
unsigned int probe_index = 0;
unsigned int index = 0;
unsigned int counter = 0;
unsigned int total = 0;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out;
counter_info =
vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
if (!counter_info) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]malloc memory failed");
return UNF_RETURN_ERROR;
}
memset(counter_info, 0,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
probe_index = hba->probe_index;
total = sizeof(scq_err_type) / sizeof(char *);
for (index = 1; index < total; index++) {
if (scq_err_type[index]) {
counter_info[counter] =
HIFC_SCQ_ERR_TYPE_STAT_READ(probe_index, index);
counter++;
}
}
total = sizeof(aeq_err_type) / sizeof(char *);
for (index = 0; index < total; index++) {
if (aeq_err_type[index]) {
counter_info[counter] =
HIFC_AEQ_ERR_TYPE_STAT_READ(probe_index, index);
counter++;
}
}
total = sizeof(err_event_type) / sizeof(char *);
for (index = 0; index < total; index++) {
if (err_event_type[index]) {
counter_info[counter] =
HIFC_UP_ERR_EVENT_STAT_READ(probe_index, index);
counter++;
}
}
total = sizeof(com_up_err_event_type) / sizeof(char *);
for (index = 0; index < total; index++) {
if (com_up_err_event_type[index]) {
counter_info[counter] =
HIFC_COM_UP_ERR_EVENT_STAT_READ(probe_index,
index);
counter++;
}
}
memcpy(buff_out->unresult.result, counter_info,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
vfree(counter_info);
return ret;
}
int hifc_dfx_get_link_state(void *v_hba, void *v_buff_out)
{
#define HIFC_LINK_UNKNOW 0
#define HIFC_LINK_UP 1
#define HIFC_LINK_DOWN 2
#define HIFC_FC_DELETE_CMND 3
#define HIFC_LINK_DOWN_REASON 4
int ret = RETURN_OK;
unsigned int index;
unsigned int counter_index;
unsigned int *counter_info = NULL;
unsigned int probe_index = 0;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out;
counter_info = vmalloc(sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE);
if (!counter_info) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]malloc memory failed");
return UNF_RETURN_ERROR;
}
memset(counter_info, 0, sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE);
probe_index = hba->probe_index;
counter_info[HIFC_LINK_UP] =
(unsigned int)link_event_stat[probe_index][HIFC_LINK_UP];
counter_info[HIFC_LINK_DOWN] =
(unsigned int)link_event_stat[probe_index][HIFC_LINK_DOWN];
counter_info[HIFC_FC_DELETE_CMND] =
(unsigned int)link_event_stat[probe_index][HIFC_FC_DELETE_CMND];
counter_info[HIFC_LINK_UNKNOW] =
(unsigned int)link_event_stat[probe_index][HIFC_LINK_UNKNOW];
for (index = 0; index < HIFC_MAX_LINK_REASON_CNT; index++) {
if (link_reason_stat[probe_index][index]) {
counter_index = HIFC_LINK_DOWN_REASON + index;
counter_info[counter_index] =
(unsigned int)
link_reason_stat[probe_index][index];
}
}
memcpy(buff_out->unresult.result, counter_info,
sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE);
vfree(counter_info);
return ret;
}
int hifc_dfx_dif_error(void *v_hba, void *v_buff_out, unsigned int v_clear)
{
#define HIFC_MAX_DIF_ERROR_COUNTER 8
int ret = RETURN_OK;
unsigned int index = 0;
unsigned int total = 0;
unsigned long long *counter_info = NULL;
unsigned int probe_index = 0;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out;
counter_info =
vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
if (!counter_info) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]malloc memory failed");
return UNF_RETURN_ERROR;
}
memset(counter_info, 0,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
probe_index = hba->probe_index;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"[info]The clear flag of DIF error counter is %u", v_clear);
if (!v_clear) {
total = HIFC_MAX_DIF_ERROR_COUNTER;
for (index = 1; index < total; index++)
counter_info[index - 1] =
HIFC_DIF_ERR_STAT_READ(probe_index, index);
memcpy(buff_out->unresult.result, counter_info,
sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64);
} else {
memset(dif_err_stat[probe_index], 0, sizeof(dif_err_stat[0]));
}
vfree(counter_info);
return ret;
}
int hifc_set_dfx_mode(void *v_hba, struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int mode;
struct hifc_adm_dfx_cmd_s *buff_in = NULL;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
HIFC_CHECK(INVALID_VALUE32, v_input, return UNF_RETURN_ERROR);
buff_in = v_input->buff_in;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out;
HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
v_input->in_size >= sizeof(struct hifc_adm_dfx_cmd_s),
return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
*v_input->out_size >= sizeof(struct hifc_adm_dfx_cmd_s),
return UNF_RETURN_ERROR);
buff_out->msg_head.status = HIFC_ADM_MSG_DONE;
mode = buff_in->cmd[0];
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Enter DFX mode(%u)", mode);
switch (mode) {
/* HBA WQE and SCQE statistic */
case HIFC_TX_RX_STATE_COUNTER:
ret = hifc_dfx_get_rxtx_state(v_hba, (void *)buff_out);
break;
/* TX and RX error counter, HBA counter */
case HIFC_TX_RX_ERROR_STATE_COUNTER:
ret = hifc_dfx_get_rxtx_error_state(v_hba, (void *)buff_out);
break;
/* SCQ, AEQ, uP, common uP error counter */
case HIFC_ERROR_STATE_COUNTER:
ret = hifc_dfx_get_error_state(v_hba, (void *)buff_out);
break;
case HIFC_LINK_STATE_COUNTER:
ret = hifc_dfx_get_link_state(v_hba, (void *)buff_out);
break;
case HIFC_HOST_COUNTER:
case HIFC_SESSION_COUNTER:
UNF_LOWLEVEL_TO_CM_HINICADM(ret,
((struct hifc_hba_s *)v_hba)->lport,
v_input);
break;
case HIFC_DIF_ERROR_COUNTER:
ret = hifc_dfx_dif_error(v_hba, (void *)buff_out,
buff_in->cmd[1]);
break;
default:
break;
}
if (ret != RETURN_OK) {
buff_out->msg_head.status = HIFC_ADM_MSG_FAILED;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Get DFX information failed, mode:0x%0x", mode);
}
buff_out->msg_head.size = sizeof(struct hifc_adm_dfx_cmd_s);
*v_input->out_size = sizeof(struct hifc_adm_dfx_cmd_s);
return ret;
}
unsigned int hifc_fec_mode(void *v_hba, struct unf_hinicam_pkg *v_input)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int fec_mode = 0;
struct hifc_adm_cmd_s *buff_in = NULL;
struct hifc_adm_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR);
buff_in = v_input->buff_in;
buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out;
HIFC_CHECK(INVALID_VALUE32, NULL != buff_in, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != buff_out, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
v_input->in_size >= sizeof(struct hifc_adm_cmd_s),
return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
*v_input->out_size >= sizeof(struct hifc_adm_cmd_s),
return UNF_RETURN_ERROR);
buff_out->msg_head.status = HIFC_ADM_MSG_DONE;
fec_mode = buff_in->cmd[0];
if (fec_mode < HIFC_QUERY_FEC_MODE) {
ret = hifc_mbx_set_fec((struct hifc_hba_s *)v_hba, fec_mode);
hba->fec_status = fec_mode;
if (ret != RETURN_OK) {
buff_out->msg_head.status = HIFC_ADM_MSG_FAILED;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Set fec mode(0x%x) failed", fec_mode);
return ret;
}
} else if (fec_mode == HIFC_QUERY_FEC_MODE) {
buff_out->cmd[0] = hba->fec_status;
ret = RETURN_OK;
}
buff_out->msg_head.size = sizeof(struct hifc_adm_msg_head_s);
*v_input->out_size = sizeof(struct hifc_adm_cmd_s);
return ret;
}
unsigned int hifc_set_hba_base_info(void *v_hba, void *v_para_in)
{
#define HIFC_MML_CLOSE_FEC 0
#define HIFC_MML_OPEN_FEC_VIA_TTS 1
#define HIFC_MML_OPEN_FEC_ONLY 2
struct unf_port_info_entry_s *port_info = 0;
struct hifc_hba_s *hba = v_hba;
unsigned long flags = 0;
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR);
port_info = (struct unf_port_info_entry_s *)v_para_in;
if (port_info->speed > hba->max_support_speed) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) Speed set(0x%x) exceed max speed(0x%x)",
hba->port_cfg.port_id, port_info->speed,
hba->max_support_speed);
return UNF_RETURN_ERROR;
}
if ((port_info->speed >= HIFC_SPEED_16G) &&
(port_info->topo == UNF_TOP_LOOP_MASK)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) Cannot set speed(0x%x) in LOOP mode, check it",
hba->port_cfg.port_id, port_info->speed);
return UNF_RETURN_ERROR;
}
if ((port_info->fec != HIFC_MML_CLOSE_FEC) &&
(port_info->fec != HIFC_MML_OPEN_FEC_VIA_TTS) &&
(port_info->fec != HIFC_MML_OPEN_FEC_ONLY)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Port(0x%x) parameter error! please input 0,1 or 2!",
hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
if (hifc_mbx_set_fec(hba, port_info->fec) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Port(0x%x) set FEC %u failed.\n",
hba->port_cfg.port_id,
port_info->fec);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&hba->hba_lock, flags);
hba->port_speed_cfg = port_info->speed;
hba->port_topo_cfg = port_info->topo;
hba->port_bbscn_cfg = port_info->bb_scn;
spin_unlock_irqrestore(&hba->hba_lock, flags);
return RETURN_OK;
}
unsigned int hifc_bbscn_mode(void *v_hba, struct unf_hinicam_pkg *v_input)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int bbscn_mode;
struct hifc_adm_cmd_s *buff_in = NULL;
struct hifc_adm_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR);
buff_in = v_input->buff_in;
buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out;
HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
v_input->in_size >= sizeof(struct hifc_adm_cmd_s),
return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
*v_input->out_size >= sizeof(struct hifc_adm_cmd_s),
return UNF_RETURN_ERROR);
buff_out->msg_head.status = HIFC_ADM_MSG_DONE;
bbscn_mode = buff_in->cmd[0];
if (bbscn_mode == HIFC_SET_BBSCN_VALUE) {
UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, v_input);
} else if (bbscn_mode == HIFC_QUERY_BBSCN_VALUE) {
ret = hifc_get_port_info((void *)hba);
if (hba->phy_link == UNF_PORT_LINK_UP) {
buff_out->cmd[0] = hba->active_bb_scn;
buff_out->cmd[1] = hba->port_bbscn_cfg;
} else {
buff_out->cmd[0] = UNF_FALSE;
buff_out->cmd[1] = hba->port_bbscn_cfg;
}
buff_out->msg_head.size = sizeof(struct hifc_adm_msg_head_s) +
sizeof(unsigned int);
}
if (ret != RETURN_OK) {
buff_out->msg_head.status = HIFC_ADM_MSG_FAILED;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Execute BBSCN mode(0x%x) failed", bbscn_mode);
return ret;
}
*v_input->out_size = sizeof(struct hifc_adm_cmd_s);
return ret;
}
unsigned int hifc_port_stat(void *v_hba, struct unf_hinicam_pkg *v_input)
{
struct hifc_adm_lsq_info_s *buff_in = NULL;
struct hifc_adm_lsq_info_s *buff_out = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
unsigned int rport_start = 0;
struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL;
unsigned int index = 0;
unsigned int queue_state[HIFC_QUEUE_STATE_BUTT] = { 0 };
struct hifc_parent_sq_info_s *sq = NULL;
int out_standing_cnt = 0;
unsigned int in_sq_cnt = 0;
HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR);
buff_in = v_input->buff_in;
buff_out = (struct hifc_adm_lsq_info_s *)v_input->buff_out;
HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
v_input->in_size >= sizeof(struct hifc_adm_lsq_info_s),
return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
*v_input->out_size >= sizeof(struct hifc_adm_lsq_info_s),
return UNF_RETURN_ERROR);
rport_start = buff_in->cmd[0];
parent_queue_mgr = hba->parent_queue_mgr;
if (!parent_queue_mgr) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port 0x%x Parent Queue Manager is Empty",
hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) {
if (parent_queue_mgr->parent_queues[index].offload_state <
HIFC_QUEUE_STATE_BUTT)
queue_state[parent_queue_mgr->parent_queues[index].offload_state]++;
}
buff_out->port_state.port_id = hba->port_cfg.port_id;
buff_out->port_state.rport_num =
(UNF_HIFC_MAXRPORT_NUM - queue_state[HIFC_QUEUE_STATE_FREE]);
buff_out->port_state.init = queue_state[HIFC_QUEUE_STATE_INITIALIZED];
buff_out->port_state.offloading =
queue_state[HIFC_QUEUE_STATE_OFFLOADING];
buff_out->port_state.offloaded =
queue_state[HIFC_QUEUE_STATE_OFFLOADED];
buff_out->port_state.destroying =
queue_state[HIFC_QUEUE_STATE_DESTROYING];
index = rport_start;
if ((index < UNF_HIFC_MAXRPORT_NUM) &&
(parent_queue_mgr->parent_queues[index].offload_state !=
HIFC_QUEUE_STATE_FREE)) {
sq = &parent_queue_mgr->parent_queues[index].parent_sq_info;
buff_out->sq.sq_id = index;
buff_out->sq.rport_index = sq->rport_index;
buff_out->sq.xid = sq->context_id;
buff_out->sq.cid = sq->cache_id;
buff_out->sq.sid = sq->local_port_id;
buff_out->sq.did = sq->remote_port_id;
buff_out->sq.vpid = parent_queue_mgr->parent_queues[index].parent_sq_info.vport_id;
buff_out->sq.cmd_local_queue_id = parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.local_queue_id;
buff_out->sq.cmd_cqm_queue_id = parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.cqm_queue_id;
buff_out->sq.sts_local_queue_id = parent_queue_mgr->parent_queues[index].parent_sts_scq_info.local_queue_id;
buff_out->sq.sts_cqm_queue_id = parent_queue_mgr->parent_queues[index].parent_sts_scq_info.cqm_queue_id;
buff_out->sq.cos =
parent_queue_mgr->parent_queues[index].queue_data_cos;
buff_out->sq.off_load =
parent_queue_mgr->parent_queues[index].offload_state;
out_standing_cnt = atomic_read(&sq->sqe_minus_cqe_cnt);
/* read memory barrier */
rmb();
in_sq_cnt = HIFC_QUEUE_MSN_OFFSET(HIFC_GET_QUEUE_CMSN(sq),
sq->last_pmsn);
/* read memory barrier */
rmb();
buff_out->sq.cmsn = HIFC_GET_QUEUE_CMSN(sq);
buff_out->sq.pmsn = sq->last_pmsn;
buff_out->sq.db_cnt = atomic_read(&sq->sq_dbl_cnt);
buff_out->sq.sqe_cnt = atomic_read(&sq->sq_wqe_cnt);
buff_out->sq.cqe_cnt = atomic_read(&sq->sq_cqe_cnt);
buff_out->sq.in_sq_cnt = in_sq_cnt;
buff_out->sq.in_chip_cnt = out_standing_cnt - (int)in_sq_cnt;
buff_out->mark = UNF_TRUE;
} else {
buff_out->mark = UNF_FALSE;
}
return RETURN_OK;
}
unsigned int hifc_port_info(struct unf_hinicam_pkg *v_input)
{
#define HIFC_INQUIRE_PORT_NUM_MODE 1
unsigned int ret = UNF_RETURN_ERROR;
unsigned int inquire_type;
unsigned int probe_total_num = 0;
unsigned int probe_index = 0;
unsigned int count = 0;
struct hifc_adm_cmd_s *buff_in = NULL;
struct hifc_adm_cmd_s *buff_out = NULL;
struct hifc_hba_s *hba = NULL;
HIFC_CHECK(INVALID_VALUE32, v_input, return UNF_RETURN_ERROR);
buff_in = v_input->buff_in;
buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out;
HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
v_input->in_size >= sizeof(struct hifc_adm_cmd_s),
return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32,
*v_input->out_size >= sizeof(struct hifc_adm_cmd_s),
return UNF_RETURN_ERROR);
hifc_get_total_probed_num(&probe_total_num);
/* First bit is used to obtain total probe number */
inquire_type = buff_in->cmd[0];
if (inquire_type == HIFC_INQUIRE_PORT_NUM_MODE) {
buff_out->cmd[0] = probe_total_num;
buff_out->msg_head.status = HIFC_ADM_MSG_DONE;
*v_input->out_size = sizeof(struct hifc_adm_cmd_s);
return RETURN_OK;
}
spin_lock(&probe_spin_lock);
for (probe_index = 0; probe_index < HIFC_MAX_PROBE_PORT_NUM;
probe_index++) {
/* Second bit is used to determine to obtain which port */
if (buff_in->cmd[1] == count)
break;
if (test_bit((int)probe_index,
(const unsigned long *)probe_bit_map))
count++;
}
spin_unlock(&probe_spin_lock);
if (probe_index == HIFC_MAX_PROBE_PORT_NUM) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Can't find port(0x%x) total port(0x%x)",
buff_in->cmd[1], probe_total_num);
buff_out->msg_head.status = HIFC_ADM_MSG_FAILED;
return ret;
}
hba = hifc_hba[probe_index];
/* Obtain buffer length applied from user */
v_input->in_size = buff_in->cmd[2];
if (!hba)
return UNF_RETURN_ERROR;
UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, v_input);
return ret;
}
int hifc_adm(void *uld_dev, unsigned int msg_formate, void *buffin,
unsigned int in_size, void *buff_out, unsigned int *out_size)
{
int ret = UNF_RETURN_ERROR;
struct hifc_hba_s *hba = NULL;
struct unf_hinicam_pkg adm_pkg = { 0 };
struct hifc_drv_version_s *ver_info;
char ver_str[HIFC_VER_INFO_SIZE] = { 0 };
HIFC_CHECK(INVALID_VALUE32, NULL != buff_out, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != buffin, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, NULL != out_size, return UNF_RETURN_ERROR);
adm_pkg.msg_format = msg_formate;
adm_pkg.buff_in = buffin;
adm_pkg.buff_out = buff_out;
adm_pkg.in_size = in_size;
adm_pkg.out_size = out_size;
if (msg_formate == HIFC_GET_DRIVER_VERSION) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Enter HIFC_GET_DRIVER_VERSION");
snprintf(ver_str, sizeof(ver_str), "%s %s", UNF_FC_VERSION,
__TIME_STR__);
ver_info = (struct hifc_drv_version_s *)buff_out;
HIFC_CHECK(INVALID_VALUE32,
*out_size >= sizeof(struct hifc_drv_version_s),
return UNF_RETURN_ERROR);
memcpy(ver_info->ver, ver_str, sizeof(ver_str));
*(unsigned int *)out_size = sizeof(struct hifc_drv_version_s);
return RETURN_OK;
}
if (msg_formate == HIFC_COMPAT_TEST) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Enter driver compatibility test");
/* UNF_TRUE: driver is compatible with hifcadm */
*(unsigned char *)buff_out = UNF_TRUE;
*(unsigned int *)out_size = sizeof(unsigned char);
return RETURN_OK;
}
HIFC_CHECK(INVALID_VALUE32, NULL != uld_dev, return UNF_RETURN_ERROR);
hba = (struct hifc_hba_s *)uld_dev;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Enter hifc_adm, msg_formate(0x%x)", msg_formate);
switch (msg_formate) {
case HIFC_DFX:
ret = hifc_set_dfx_mode((void *)hba, &adm_pkg);
break;
case HIFC_FEC_SET:
ret = (int)hifc_fec_mode((void *)hba, &adm_pkg);
break;
case HIFC_BBSCN:
ret = (int)hifc_bbscn_mode((void *)hba, &adm_pkg);
break;
case HIFC_PORTSTAT:
ret = (int)hifc_port_stat((void *)hba, &adm_pkg);
break;
case HIFC_ALL_INFO_OP:
ret = (int)hifc_port_info(&adm_pkg);
break;
default:
UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, &adm_pkg);
break;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Enter hifc_adm 0x%x", *adm_pkg.out_size);
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_PORTMNG_H__
#define __HIFC_PORTMNG_H__
#include "unf_common.h"
#include "hifc_module.h"
#include "hifc_hba.h"
#define HIFC_PORT_INFO_SIZE 10
#define HIFC_DFX_BACK_INFO_SIZE 406
#define HIFC_DFX_BACK_INFO_SIZE64 203
#define HIFC_GET_DRIVER_VERSION 16
#define HIFC_SET_BBSCN_VALUE 0
#define HIFC_QUERY_BBSCN_VALUE 1
#define HIFC_QUERY_FEC_MODE 2
#define FC_DFX_SEND_INFO_SIZE 5
#define FC_DFX_BACK_INFO_64 203
#define FC_DFX_BACK_INFO_32 406
#define FC_DFX_MAX_IO_RETURN_VALUE 0x12
#define FC_DFX_MAX_SCSI_CMD 0xFF
#define FC_DFX_SCSI_CMD_FIRST_GET 100
struct unf_adm_dfx_session_state {
unsigned char session1 : 4;
unsigned char session2 : 4;
};
struct session_counter_s {
u64 target_busy;
u64 host_busy;
u64 remote_port_wwpn;
u64 local_port_wwpn;
u32 device_alloc;
u32 device_destroy;
u32 scsi_state;
u32 remote_port_nportid;
u32 remote_port_state;
u32 remote_port_scsiid;
u32 remote_port_index;
u32 local_port_nportid;
u32 local_port_ini_state;
u32 local_port_state;
u32 port_id;
u32 host_id;
u32 target_id;
u32 abort_io;
u32 device_reset;
u32 target_reset;
u32 bus_reset;
u32 virtual_reset;
u32 abort_io_result;
u32 device_reset_result;
u32 target_reset_result;
u32 bus_reset_result;
u32 virtual_reset_result;
};
enum hifc_adm_msg_status_e {
HIFC_ADM_MSG_DONE = 0,
HIFC_ADM_MSG_INCOMPLETE,
HIFC_ADM_MSG_FAILED,
HIFC_ADM_MSG_BUTT
};
struct hifc_port_diag_op_s {
enum unf_port_diag_op_e op_code;
unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para);
};
enum hifc_adm_dfx_mod_e {
/* HBA WQE and SCQE statistic */
HIFC_TX_RX_STATE_COUNTER = 0,
/* TX and RX error counter, HBA counter */
HIFC_TX_RX_ERROR_STATE_COUNTER,
/* SCQ, AEQ, uP, common uP error counter */
HIFC_ERROR_STATE_COUNTER,
/* Link state counter */
HIFC_LINK_STATE_COUNTER,
/* Host counter */
HIFC_HOST_COUNTER,
/* session counter */
HIFC_SESSION_COUNTER,
/* DIF error counter */
HIFC_DIF_ERROR_COUNTER,
HIFC_ALL_DFX_TYPE = 50,
};
enum hifc_msg_format_e {
HIFC_DFX = 7,
HIFC_FEC_SET,
HIFC_BBSCN,
HIFC_PORTSTAT = 24,
HIFC_ALL_INFO_OP = 25,
HIFC_COMPAT_TEST = 0xFF
};
struct hifc_adm_msg_head_s {
unsigned int size;
unsigned short status;
unsigned short rsvd;
};
/* port state for fc_portstat */
struct hifc_adm_port_state {
unsigned int port_id;
unsigned int rport_num;
unsigned int init;
unsigned int offloading;
unsigned int offloaded;
unsigned int destroying;
};
/* SQ & IoStat for fc_portstat */
struct hifc_adm_sq {
unsigned int sq_id;
unsigned int rport_index;
unsigned int xid;
unsigned int cid;
unsigned int sid;
unsigned int did;
unsigned int vpid;
unsigned int cmd_local_queue_id;
unsigned int cmd_cqm_queue_id;
unsigned int sts_local_queue_id;
unsigned int sts_cqm_queue_id;
unsigned int cos;
unsigned int off_load;
unsigned int cmsn;
unsigned int pmsn;
unsigned int db_cnt;
unsigned int sqe_cnt;
unsigned int cqe_cnt;
unsigned int in_sq_cnt;
unsigned int in_chip_cnt;
};
/* hifcadm fc_portstat struct,that is used to show ListSqinfo from mml */
struct hifc_adm_lsq_info_s {
struct hifc_adm_msg_head_s msg_head;
unsigned int cmd[HIFC_PORT_INFO_SIZE];
struct hifc_adm_port_state port_state;
struct hifc_adm_sq sq;
unsigned int mark;
};
struct unf_adm_dfx_host_counter_s {
unsigned int host_num;
unsigned int port_id;
unsigned int scsi_session_add_success;
unsigned int scsi_session_add_failed;
unsigned int scsi_session_del_success;
unsigned int scsi_session_del_failed;
unsigned int device_alloc;
unsigned int device_destroy;
unsigned int session_loss_tmo;
unsigned int alloc_scsi_id;
unsigned int reuse_scsi_id;
unsigned int resume_scsi_id;
unsigned int add_start_work_failed;
unsigned int add_closing_work_failed;
unsigned int abort_io;
unsigned int device_reset;
unsigned int target_reset;
unsigned int bus_reset;
unsigned int virtual_reset;
unsigned int abort_io_result;
unsigned int device_reset_result;
unsigned int target_reset_result;
unsigned int bus_reset_result;
unsigned int virtual_reset_result;
struct unf_adm_dfx_session_state session_state[1024];
};
/* hifcadm fc_port struct */
struct hifc_adm_cmd_s {
struct hifc_adm_msg_head_s msg_head;
unsigned int cmd[HIFC_PORT_INFO_SIZE];
};
/* hifcadm fc_dfx struct */
struct hifc_adm_dfx_cmd_s {
struct hifc_adm_msg_head_s msg_head;
unsigned int cmd[HIFC_PORT_INFO_SIZE];
union {
unsigned long long result[HIFC_DFX_BACK_INFO_SIZE64];
struct unf_adm_dfx_host_counter_s host_cnt;
struct session_counter_s session_cnt;
unsigned long long scsi_cmd_in;
unsigned long long scsi_cmd_done;
unsigned long long target_busy;
unsigned long long host_busy;
} unresult;
};
unsigned int hifc_port_diagnose(void *v_hba, enum unf_port_diag_op_e op_code,
void *v_para);
unsigned int hifc_set_port_speed(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_bbscn(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_state(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_topo(void *v_hba, void *v_para_in);
unsigned int hifc_set_port_fcp_conf(void *v_hba, void *v_para_in);
unsigned int hifc_set_loop_role(void *v_hba, void *v_para_in);
unsigned int hifc_set_max_support_speed(void *v_hba, void *v_para_in);
unsigned int hifc_show_fc_port_detail(void *v_hba, void *v_para);
int hifc_adm(void *uld_dev, unsigned int msg_formate, void *buffin,
unsigned int in_size, void *buff_out, unsigned int *out_size);
unsigned int hifc_fec_mode(void *v_hba, struct unf_hinicam_pkg *v_input);
int hifc_set_dfx_mode(void *v_hba, struct unf_hinicam_pkg *v_input);
int hifc_dfx_get_link_state(void *v_hba, void *v_buff_out);
int hifc_dfx_get_error_state(void *v_hba, void *v_buff_out);
int hifc_dfx_get_rxtx_state(void *v_hba, void *v_buff_out);
unsigned int hifc_bbscn_mode(void *v_hba, struct unf_hinicam_pkg *v_input);
unsigned int hifc_port_stat(void *v_hba, struct unf_hinicam_pkg *v_input);
int hifc_dfx_dif_error(void *v_hba, void *v_buff_out, unsigned int v_clear);
unsigned int hifc_set_hba_base_info(void *v_hba, void *v_para_in);
#endif /* __HIFC_PORTMNG_H__ */
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <net/sock.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_lld.h"
#include "hifc_dbgtool_knl.h"
#include "hifc_tool.h"
#include "hifc_portmng.h"
#define HIADM_DEV_PATH "/dev/hifc_dev"
#define HIADM_DEV_CLASS "hifc_class"
#define HIADM_DEV_NAME "hifc_dev"
#define MAJOR_DEV_NUM 921
#define HIFC_CMDQ_BUF_MAX_SIZE 2048U
#define MSG_MAX_IN_SIZE (2048 * 1024)
#define MSG_MAX_OUT_SIZE (2048 * 1024)
static dev_t g_dev_id = {0};
static struct class *g_nictool_class;
static struct cdev g_nictool_cdev;
static int g_nictool_init_flag;
static int g_nictool_ref_cnt;
static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in)
{
if (!buf_in)
return;
if (nt_msg->module == SEND_TO_UCODE)
hifc_free_cmd_buf(hwdev, buf_in);
else
kfree(buf_in);
}
static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg,
u32 in_size, void **buf_in)
{
void *msg_buf;
if (!in_size)
return 0;
if (nt_msg->module == SEND_TO_UCODE) {
struct hifc_cmd_buf *cmd_buf;
if (in_size > HIFC_CMDQ_BUF_MAX_SIZE) {
pr_err("Cmdq in size(%u) more than 2KB\n", in_size);
return -ENOMEM;
}
cmd_buf = hifc_alloc_cmd_buf(hwdev);
if (!cmd_buf) {
pr_err("Alloc cmdq cmd buffer failed in %s\n",
__func__);
return -ENOMEM;
}
msg_buf = cmd_buf->buf;
*buf_in = (void *)cmd_buf;
cmd_buf->size = (u16)in_size;
} else {
if (in_size > MSG_MAX_IN_SIZE) {
pr_err("In size(%u) more than 2M\n", in_size);
return -ENOMEM;
}
msg_buf = kzalloc(in_size, GFP_KERNEL);
*buf_in = msg_buf;
}
if (!(*buf_in)) {
pr_err("Alloc buffer in failed\n");
return -ENOMEM;
}
if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) {
pr_err("%s:%d: Copy from user failed\n",
__func__, __LINE__);
free_buff_in(hwdev, nt_msg, *buf_in);
return -EFAULT;
}
return 0;
}
static void free_buff_out(void *hwdev, struct msg_module *nt_msg,
void *buf_out)
{
if (!buf_out)
return;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm)
hifc_free_cmd_buf(hwdev, buf_out);
else
kfree(buf_out);
}
static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg,
u32 out_size, void **buf_out)
{
if (!out_size)
return 0;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm) {
struct hifc_cmd_buf *cmd_buf;
if (out_size > HIFC_CMDQ_BUF_MAX_SIZE) {
pr_err("Cmdq out size(%u) more than 2KB\n", out_size);
return -ENOMEM;
}
cmd_buf = hifc_alloc_cmd_buf(hwdev);
*buf_out = (void *)cmd_buf;
} else {
if (out_size > MSG_MAX_OUT_SIZE) {
pr_err("out size(%u) more than 2M\n", out_size);
return -ENOMEM;
}
*buf_out = kzalloc(out_size, GFP_KERNEL);
}
if (!(*buf_out)) {
pr_err("Alloc buffer out failed\n");
return -ENOMEM;
}
return 0;
}
static int copy_buf_out_to_user(struct msg_module *nt_msg,
u32 out_size, void *buf_out)
{
int ret = 0;
void *msg_out;
if (nt_msg->module == SEND_TO_UCODE &&
!nt_msg->ucode_cmd.ucode_db.ucode_imm)
msg_out = ((struct hifc_cmd_buf *)buf_out)->buf;
else
msg_out = buf_out;
if (copy_to_user(nt_msg->out_buf, msg_out, out_size))
ret = -EFAULT;
return ret;
}
static int __get_card_usr_api_chain_mem(int card_idx)
{
#define DBGTOOL_PAGE_ORDER 10
unsigned char *tmp;
int i;
mutex_lock(&g_addr_lock);
card_id = card_idx;
if (!g_card_vir_addr[card_idx]) {
g_card_vir_addr[card_idx] =
(void *)__get_free_pages(GFP_KERNEL,
DBGTOOL_PAGE_ORDER);
if (!g_card_vir_addr[card_idx]) {
pr_err("Alloc api chain memory fail for card %d.\n",
card_idx);
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
memset(g_card_vir_addr[card_idx], 0,
PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
g_card_phy_addr[card_idx] =
virt_to_phys(g_card_vir_addr[card_idx]);
if (!g_card_phy_addr[card_idx]) {
pr_err("phy addr for card %d is 0.\n", card_idx);
free_pages((unsigned long)g_card_vir_addr[card_idx],
DBGTOOL_PAGE_ORDER);
g_card_vir_addr[card_idx] = NULL;
mutex_unlock(&g_addr_lock);
return -EFAULT;
}
tmp = g_card_vir_addr[card_idx];
for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
SetPageReserved(virt_to_page(tmp));
tmp += PAGE_SIZE;
}
}
mutex_unlock(&g_addr_lock);
return 0;
}
static int get_card_func_info(char *dev_name, struct msg_module *nt_msg)
{
struct hifc_card_func_info card_func_info = {0};
int id, err;
if (nt_msg->len_info.out_buff_len != sizeof(card_func_info) ||
nt_msg->len_info.in_buff_len != sizeof(card_func_info)) {
pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n",
nt_msg->len_info.out_buff_len,
nt_msg->len_info.in_buff_len,
sizeof(card_func_info));
return -EINVAL;
}
err = memcmp(dev_name, HIFC_CHIP_NAME, strlen(HIFC_CHIP_NAME));
if (err) {
pr_err("Invalid chip name %s\n", dev_name);
return err;
}
err = sscanf(dev_name, HIFC_CHIP_NAME "%d", &id);
if (err < 0) {
pr_err("Failed to get hifc id\n");
return err;
}
if (id >= MAX_CARD_NUM) {
pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1);
return -EINVAL;
}
hifc_get_card_func_info_by_card_name(dev_name, &card_func_info);
if (!card_func_info.num_pf) {
pr_err("None function found for %s\n", dev_name);
return -EFAULT;
}
err = __get_card_usr_api_chain_mem(id);
if (err) {
pr_err("Faile to get api chain memory for userspace %s\n",
dev_name);
return -EFAULT;
}
card_func_info.usr_api_phy_addr = g_card_phy_addr[id];
/* Copy the dev_info to user mode */
if (copy_to_user(nt_msg->out_buf, &card_func_info,
sizeof(card_func_info))) {
pr_err("Copy dev_info to user fail\n");
return -EFAULT;
}
return 0;
}
static bool is_mgmt_cmd_support(void *hwdev, unsigned int mod, u32 up_api_type)
{
if (FUNC_SUPPORT_MGMT(hwdev)) {
if (up_api_type == API_CLP) {
if (!hifc_is_hwdev_mod_inited
(hwdev, HIFC_HWDEV_CLP_INITED)) {
pr_err("CLP have not initialized\n");
return false;
}
} else if (!hifc_is_hwdev_mod_inited
(hwdev, HIFC_HWDEV_MGMT_INITED)) {
pr_err("MGMT have not initialized\n");
return false;
}
} else if (!hifc_is_hwdev_mod_inited
(hwdev, HIFC_HWDEV_MBOX_INITED)) {
pr_err("MBOX have not initialized\n");
return false;
}
return true;
}
static bool is_hwdev_cmd_support(unsigned int mod,
char *ifname, u32 up_api_type)
{
void *hwdev;
hwdev = hifc_get_hwdev_by_ifname(ifname);
if (!hwdev) {
pr_err("Can not get the device %s correctly\n", ifname);
return false;
}
switch (mod) {
case SEND_TO_UP:
case SEND_TO_SM:
return is_mgmt_cmd_support(hwdev, mod, up_api_type);
case SEND_TO_UCODE:
if (!hifc_is_hwdev_mod_inited(hwdev,
HIFC_HWDEV_CMDQ_INITED)) {
pr_err("CMDQ have not initialized\n");
return false;
}
break;
default:
return false;
}
return true;
}
static bool nictool_k_is_cmd_support(unsigned int mod,
char *ifname, u32 up_api_type)
{
enum hifc_init_state init_state =
hifc_get_init_state_by_ifname(ifname);
if (init_state == HIFC_INIT_STATE_NONE)
return false;
if (mod == HIFCADM_FC_DRIVER) {
if (init_state < HIFC_INIT_STATE_ALL_INITED) {
pr_err("HIFC driver have not initialized\n");
return false;
}
return true;
} else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) {
return is_hwdev_cmd_support(mod, ifname, up_api_type);
} else if (mod == SEND_TO_HW_DRIVER) {
if (init_state < HIFC_INIT_STATE_HWDEV_INITED) {
pr_err("Hwdev have not initialized\n");
return false;
}
return true;
}
return false;
}
static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size,
void **buf_in, u32 out_size, void **buf_out)
{
int ret;
ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in);
if (ret) {
pr_err("Alloc tool cmd buff in failed\n");
return ret;
}
ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out);
if (ret) {
pr_err("Alloc tool cmd buff out failed\n");
goto out_free_buf_in;
}
return 0;
out_free_buf_in:
free_buff_in(hwdev, nt_msg, *buf_in);
return ret;
}
static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg,
void *buf_in, void *buf_out)
{
free_buff_out(hwdev, nt_msg, buf_out);
free_buff_in(hwdev, nt_msg, buf_in);
}
static int get_all_chip_id_cmd(struct msg_module *nt_msg)
{
struct nic_card_id card_id;
hifc_get_all_chip_id((void *)&card_id);
if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) {
pr_err("Copy chip id to user failed\n");
return -EFAULT;
}
return 0;
}
static bool __is_pcidev_match_dev_name(const char *ifname,
struct hifc_pcidev *dev)
{
if (!strncmp(dev->uld_dev_name, ifname, IFNAMSIZ))
return true;
if ((dev->uld_dev) && (strlen(ifname) == 0))
return true;
return false;
}
struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname)
{
struct card_node *chip_node;
struct hifc_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (__is_pcidev_match_dev_name(ifname, dev)) {
lld_dev_put();
return dev;
}
}
}
lld_dev_put();
return NULL;
}
static void *get_support_uld_dev(struct msg_module *nt_msg)
{
struct hifc_pcidev *dev;
dev = hifc_get_pcidev_by_dev_name(nt_msg->device_name);
if (dev)
return dev->uld_dev;
return NULL;
}
static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
enum hifc_service_type type;
int ret = 0;
type = nt_msg->module - SEND_TO_SM;
if (type != SERVICE_T_FC) {
pr_err("err cmd type: %d\n", type);
return ret;
}
*out_size = sizeof(struct drv_version_info);
ret = hifc_adm(NULL, nt_msg->msg_formate, buf_in, in_size,
buf_out, out_size);
if (ret)
return ret;
if (copy_to_user(nt_msg->out_buf, buf_out, *out_size))
return -EFAULT;
return ret;
}
int send_to_service_driver(struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
enum hifc_service_type type;
void *uld_dev;
int ret = -EINVAL;
type = nt_msg->module - SEND_TO_SM;
if (type == SERVICE_T_FC) {
uld_dev = get_support_uld_dev(nt_msg);
if (!uld_dev)
return -EINVAL;
ret = hifc_adm(uld_dev,
nt_msg->msg_formate,
buf_in, in_size, buf_out,
out_size);
} else {
pr_err("Ioctl input module id: %d is incorrectly\n",
nt_msg->module);
}
return ret;
}
static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
int ret;
switch (nt_msg->module) {
case SEND_TO_HW_DRIVER:
ret = send_to_hw_driver(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_UP:
ret = send_to_up(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_UCODE:
ret = send_to_ucode(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
case SEND_TO_SM:
ret = send_to_sm(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
break;
default:
ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out,
out_size);
break;
}
return ret;
}
static bool hifc_is_special_handling_cmd(struct msg_module *nt_msg, int *ret)
{
bool handled = true;
if (nt_msg->module != SEND_TO_HW_DRIVER)
return false;
switch (nt_msg->msg_formate) {
case GET_CHIP_ID:
*ret = get_all_chip_id_cmd(nt_msg);
break;
case GET_CHIP_INFO:
*ret = get_card_func_info(nt_msg->device_name, nt_msg);
break;
default:
handled = false;
break;
}
return handled;
}
static int do_nictool_ioctl_cmd(void *hwdev, struct msg_module *nt_msg)
{
void *buf_out = NULL;
void *buf_in = NULL;
u32 out_size_expect;
u32 out_size, in_size;
int ret = 0;
out_size_expect = nt_msg->len_info.out_buff_len;
in_size = nt_msg->len_info.in_buff_len;
ret = alloc_tmp_buf(hwdev, nt_msg, in_size,
&buf_in, out_size_expect, &buf_out);
if (ret) {
pr_err("Alloc tmp buff failed\n");
return ret;
}
out_size = out_size_expect;
if ((nt_msg->msg_formate == GET_DRV_VERSION) &&
(nt_msg->module == HIFCADM_FC_DRIVER)) {
ret = get_service_drv_version(hwdev, nt_msg, buf_in,
in_size, buf_out, &out_size);
goto out_free_buf;
}
ret = nictool_exec_cmd(hwdev, nt_msg, buf_in,
in_size, buf_out, &out_size);
if (ret) {
pr_err("nictool_exec_cmd failed, mod:%d msg_formate:%d\n",
nt_msg->module, nt_msg->msg_formate);
goto out_free_buf;
}
if (out_size_expect && buf_out) {
ret = copy_buf_out_to_user(nt_msg, out_size_expect, buf_out);
if (ret)
pr_err("Copy information to user failed\n");
}
out_free_buf:
free_tmp_buf(hwdev, nt_msg, buf_in, buf_out);
return ret;
}
static long nictool_k_unlocked_ioctl(struct file *pfile,
unsigned int cmd, unsigned long arg)
{
void *hwdev;
struct msg_module nt_msg;
int ret = 0;
memset(&nt_msg, 0, sizeof(nt_msg));
if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) {
pr_err("Copy information from user failed\n");
return -EFAULT;
}
/* end with '\0' */
nt_msg.device_name[IFNAMSIZ - 1] = '\0';
hifc_tool_cnt_inc();
if (hifc_is_special_handling_cmd(&nt_msg, &ret))
goto out_free_lock;
if (nt_msg.module == HIFCADM_FC_DRIVER &&
nt_msg.msg_formate == GET_CHIP_ID)
get_fc_devname(nt_msg.device_name);
if (!nictool_k_is_cmd_support(nt_msg.module, nt_msg.device_name,
nt_msg.up_cmd.up_db.up_api_type)) {
ret = -EFAULT;
goto out_free_lock;
}
/* get the netdevice */
hwdev = hifc_get_hwdev_by_ifname(nt_msg.device_name);
if (!hwdev) {
pr_err("Can not get the device %s correctly\n",
nt_msg.device_name);
ret = -ENODEV;
goto out_free_lock;
}
ret = do_nictool_ioctl_cmd(hwdev, &nt_msg);
out_free_lock:
hifc_tool_cnt_dec();
return (long)ret;
}
static int nictool_k_open(struct inode *pnode, struct file *pfile)
{
return 0;
}
static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf,
size_t size, loff_t *ppos)
{
return 0;
}
static const struct file_operations fifo_operations = {
.owner = THIS_MODULE,
.open = nictool_k_open,
.read = nictool_k_read,
.write = nictool_k_write,
.unlocked_ioctl = nictool_k_unlocked_ioctl,
.mmap = hifc_mem_mmap,
};
int if_nictool_exist(void)
{
struct file *fp = NULL;
int exist = 0;
fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0);
if (IS_ERR(fp)) {
exist = 0;
} else {
(void)filp_close(fp, NULL);
exist = 1;
}
return exist;
}
/**
* nictool_k_init - initialize the hw interface
*/
int nictool_k_init(void)
{
int ret;
struct device *pdevice;
if (g_nictool_init_flag) {
g_nictool_ref_cnt++;
/* already initialized */
return 0;
}
if (if_nictool_exist()) {
pr_err("Nictool device exists\n");
return 0;
}
/* Device ID: primary device ID (12bit) |
* secondary device number (20bit)
*/
g_dev_id = MKDEV(MAJOR_DEV_NUM, 0);
/* Static device registration number */
ret = register_chrdev_region(g_dev_id, 1, HIADM_DEV_NAME);
if (ret < 0) {
ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME);
if (ret < 0) {
pr_err("Register nictool_dev fail(0x%x)\n", ret);
return ret;
}
}
/* Create equipment */
/*lint -save -e160*/
g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS);
/*lint -restore*/
if (IS_ERR(g_nictool_class)) {
pr_err("Create nictool_class fail\n");
ret = -EFAULT;
goto class_create_err;
}
/* Initializing the character device */
cdev_init(&g_nictool_cdev, &fifo_operations);
/* Add devices to the operating system */
ret = cdev_add(&g_nictool_cdev, g_dev_id, 1);
if (ret < 0) {
pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret);
goto cdev_add_err;
}
/* Export device information to user space
* (/sys/class/class name/device name)
*/
pdevice = device_create(g_nictool_class, NULL,
g_dev_id, NULL, HIADM_DEV_NAME);
if (IS_ERR(pdevice)) {
pr_err("Export nictool device information to user space fail\n");
ret = -EFAULT;
goto device_create_err;
}
g_nictool_init_flag = 1;
g_nictool_ref_cnt = 1;
pr_info("Register nictool_dev to system succeed\n");
return 0;
device_create_err:
cdev_del(&g_nictool_cdev);
cdev_add_err:
class_destroy(g_nictool_class);
class_create_err:
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
return ret;
}
void nictool_k_uninit(void)
{
if (g_nictool_init_flag) {
if ((--g_nictool_ref_cnt))
return;
}
g_nictool_init_flag = 0;
if (!g_nictool_class || IS_ERR(g_nictool_class))
return;
cdev_del(&g_nictool_cdev);
device_destroy(g_nictool_class, g_dev_id);
class_destroy(g_nictool_class);
g_nictool_class = NULL;
unregister_chrdev_region(g_dev_id, 1);
pr_info("Unregister nictool_dev succeed\n");
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_NICTOOL_H_
#define HIFC_NICTOOL_H_
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
/* completion timeout interval, unit is jiffies*/
#define UP_COMP_TIME_OUT_VAL 10000U
struct sm_in_st {
int node;
int id;
int instance;
};
struct sm_out_st {
u64 val1;
u64 val2;
};
struct up_log_msg_st {
u32 rd_len;
u32 addr;
};
struct csr_write_st {
u32 rd_len;
u32 addr;
u8 *data;
};
struct ipsurx_stats_info {
u32 addr;
u32 rd_cnt;
};
struct ucode_cmd_st {
union {
struct {
u32 comm_mod_type:8;
u32 ucode_cmd_type:4;
u32 cmdq_ack_type:3;
u32 ucode_imm:1;
u32 len:16;
} ucode_db;
u32 value;
};
};
struct up_cmd_st {
union {
struct {
u32 comm_mod_type:8;
u32 chipif_cmd:8;
u32 up_api_type:16;
} up_db;
u32 value;
};
};
struct _dcb_data {
u8 wr_flag;
u8 dcb_en;
u8 err;
u8 rsvd;
};
union _dcb_ctl {
struct _dcb_data dcb_data;
u32 data;
};
struct _pfc_data {
u8 pfc_en;
u8 pfc_priority;
u8 num_of_tc;
u8 err;
};
union _pfc {
struct _pfc_data pfc_data;
u32 data;
};
union _flag_com {
struct _ets_flag {
u8 flag_ets_enable:1;
u8 flag_ets_percent:1;
u8 flag_ets_cos:1;
u8 flag_ets_strict:1;
u8 rev:4;
} ets_flag;
u8 data;
};
struct _ets {
u8 ets_en;
u8 err;
u8 strict;
u8 tc[8];
u8 ets_percent[8];
union _flag_com flag_com;
};
#define API_CMD 0x1
#define API_CHAIN 0x2
#define API_CLP 0x3
struct msg_module {
char device_name[IFNAMSIZ];
unsigned int module;
union {
u32 msg_formate;
struct ucode_cmd_st ucode_cmd;
struct up_cmd_st up_cmd;
};
struct {
u32 in_buff_len;
u32 out_buff_len;
} len_info;
u32 res;
void *in_buff;
void *out_buf;
};
#define MAX_VER_INFO_LEN 128
struct drv_version_info {
char ver[MAX_VER_INFO_LEN];
};
struct chip_fault_stats {
int offset;
u8 chip_faults[MAX_DRV_BUF_SIZE];
};
struct hifc_wqe_info {
int q_id;
void *slq_handle;
unsigned int wqe_id;
};
struct hifc_tx_hw_page {
u64 phy_addr;
u64 *map_addr;
};
struct hifc_dbg_sq_info {
u16 q_id;
u16 pi;
u16 ci;/* sw_ci */
u16 fi;/* hw_ci */
u32 q_depth;
u16 pi_reverse;
u16 weqbb_size;
u8 priority;
u16 *ci_addr;
u64 cla_addr;
void *slq_handle;
struct hifc_tx_hw_page direct_wqe;
struct hifc_tx_hw_page db_addr;
u32 pg_idx;
u32 glb_sq_id;
};
struct hifc_dbg_rq_info {
u16 q_id;
u16 glb_rq_id;
u16 hw_pi;
u16 ci; /* sw_ci */
u16 sw_pi;
u16 wqebb_size;
u16 q_depth;
u16 buf_len;
void *slq_handle;
u64 ci_wqe_page_addr;
u64 ci_cla_tbl_addr;
u16 msix_idx;
u32 msix_vector;
};
#ifndef BUSINFO_LEN
#define BUSINFO_LEN (32)
#endif
struct pf_info {
char name[IFNAMSIZ];
char bus_info[BUSINFO_LEN];
u32 pf_type;
};
#ifndef MAX_SIZE
#define MAX_SIZE (16)
#endif
struct card_info {
struct pf_info pf[MAX_SIZE];
u32 pf_num;
};
struct nic_card_id {
u32 id[MAX_SIZE];
u32 num;
};
struct func_pdev_info {
u64 bar0_phy_addr;
u64 bar0_size;
u64 rsvd1[4];
};
struct hifc_card_func_info {
u32 num_pf;
u32 rsvd0;
u64 usr_api_phy_addr;
struct func_pdev_info pdev_info[MAX_SIZE];
};
#ifndef NIC_UP_CMD_UPDATE_FW
#define NIC_UP_CMD_UPDATE_FW (114)
#endif
#ifndef MAX_CARD_NUM
#define MAX_CARD_NUM (64)
#endif
extern void *g_card_node_array[MAX_CARD_NUM];
extern void *g_card_vir_addr[MAX_CARD_NUM];
extern u64 g_card_phy_addr[MAX_CARD_NUM];
extern struct mutex g_addr_lock;
extern int card_id;
struct hifc_nic_loop_mode {
u32 loop_mode;
u32 loop_ctrl;
};
struct hifc_nic_poll_weight {
int poll_weight;
};
enum hifc_homologues_state {
HIFC_HOMOLOGUES_OFF = 0,
HIFC_HOMOLOGUES_ON = 1,
};
struct hifc_homologues {
enum hifc_homologues_state homo_state;
};
struct hifc_pf_info {
u32 isvalid;
u32 pf_id;
};
enum module_name {
SEND_TO_NIC_DRIVER = 1,
SEND_TO_HW_DRIVER,
SEND_TO_UCODE,
SEND_TO_UP,
SEND_TO_SM,
HIFCADM_FC_DRIVER = 10,
};
enum driver_cmd_type {
FUNC_TYPE = 12,
GET_FUNC_IDX,
GET_DRV_VERSION = 16,
GET_HW_STATS = 18,
CLEAR_HW_STATS,
GET_CHIP_FAULT_STATS = 21,
GET_CHIP_ID = 25,
GET_SINGLE_CARD_INFO,
GET_FIRMWARE_ACTIVE_STATUS,
GET_DEVICE_ID = 29,
IS_DRV_IN_VM = 44,
GET_CHIP_INFO = 48,
GET_PF_ID = 52,
PORT_ID = 0x42
};
enum api_chain_cmd_type {
API_CSR_READ,
API_CSR_WRITE
};
enum sm_cmd_type {
SM_CTR_RD32 = 1,
SM_CTR_RD64_PAIR,
SM_CTR_RD64
};
int nictool_k_init(void);
void nictool_k_uninit(void);
int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size);
int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
int send_to_up(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
int send_to_ucode(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size);
void get_fc_devname(char *devname);
void *hifc_get_hwdev_by_ifname(char *ifname);
enum hifc_init_state hifc_get_init_state_by_ifname(char *ifname);
void hifc_get_all_chip_id(void *id_info);
void hifc_tool_cnt_dec(void);
void hifc_tool_cnt_inc(void);
int hifc_get_device_id(void *hwdev, u16 *dev_id);
int hifc_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid);
bool hifc_is_valid_bar_addr(u64 offset);
void hifc_get_card_info(void *hwdev, void *bufin);
struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname);
void hifc_get_card_func_info_by_card_name(
const char *chip_name, struct hifc_card_func_info *card_func);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <net/sock.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_hwif.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#include "hifc_cfg.h"
#include "hifc_lld.h"
#include "hifc_sml.h"
#include "hifc_tool.h"
static atomic_t tool_used_cnt;
typedef int (*hw_driv_module)(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size);
struct hw_drv_module_handle {
enum driver_cmd_type driv_cmd_name;
hw_driv_module driv_func;
};
u8 hifc_physical_port_id(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev) {
pr_err("Hwdev pointer is NULL for getting physical port id\n");
return 0;
}
return dev->cfg_mgmt->svc_cap.port_id;
}
int hifc_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd,
void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
{
struct hifc_hwdev *dev = hwdev;
int err;
if (!dev)
return -EINVAL;
if (!dev->chip_present_flag)
return -EPERM;
if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CLP_INITED))
return -EPERM;
err = hifc_pf_clp_to_mgmt(dev, mod, cmd, buf_in,
in_size, buf_out, out_size);
return err;
}
static int get_func_type(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 func_typ;
func_typ = hifc_func_type(hwdev);
if (!buf_out || *out_size != sizeof(u16)) {
pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
*(u16 *)buf_out = func_typ;
return 0;
}
static int get_func_id(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 func_id;
if (!buf_out || *out_size != sizeof(u16)) {
pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
func_id = hifc_global_func_id_hw(hwdev);
*(u16 *)buf_out = func_id;
*out_size = sizeof(u16);
return 0;
}
static int get_drv_version(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct drv_version_info *ver_info;
char ver_str[MAX_VER_INFO_LEN] = {0};
if (*out_size != sizeof(*ver_info)) {
pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(*ver_info));
return -EFAULT;
}
snprintf(ver_str, sizeof(ver_str), "%s %s",
HIFC_DRV_VERSION, __TIME_STR__);
ver_info = (struct drv_version_info *)buf_out;
memcpy(ver_info->ver, ver_str, sizeof(ver_str));
return 0;
}
static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
return 0;
}
static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
return 0;
}
static void hifc_get_chip_fault_stats(const void *hwdev,
u8 *chip_fault_stats, int offset)
{
int copy_len = offset + MAX_DRV_BUF_SIZE - HIFC_CHIP_FAULT_SIZE;
if (offset < 0 || offset > HIFC_CHIP_FAULT_SIZE) {
pr_err("Invalid chip offset value: %d\n",
offset);
return;
}
if (offset + MAX_DRV_BUF_SIZE <= HIFC_CHIP_FAULT_SIZE)
memcpy(chip_fault_stats,
((struct hifc_hwdev *)hwdev)->chip_fault_stats + offset,
MAX_DRV_BUF_SIZE);
else
memcpy(chip_fault_stats,
((struct hifc_hwdev *)hwdev)->chip_fault_stats + offset,
copy_len);
}
static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
int offset = 0;
struct chip_fault_stats *fault_info;
if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) ||
in_size != sizeof(*fault_info)) {
pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(*fault_info));
return -EFAULT;
}
fault_info = (struct chip_fault_stats *)buf_in;
offset = fault_info->offset;
fault_info = (struct chip_fault_stats *)buf_out;
hifc_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset);
return 0;
}
static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
return 0;
}
static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
if (!buf_in || !buf_out || in_size != sizeof(struct card_info) ||
*out_size != sizeof(struct card_info)) {
pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(struct card_info));
return -EFAULT;
}
hifc_get_card_info(hwdev, buf_out);
*out_size = in_size;
return 0;
}
#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30
static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u32 loop_cnt = 0;
while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) {
if (!hifc_get_mgmt_channel_status(hwdev))
return 0;
msleep(1000);
loop_cnt++;
}
if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT)
return -ETIMEDOUT;
return 0;
}
static int get_device_id(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
u16 dev_id;
int err;
if (!buf_out || !buf_in || *out_size != sizeof(u16) ||
in_size != sizeof(u16)) {
pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
*out_size, sizeof(u16));
return -EFAULT;
}
err = hifc_get_device_id(hwdev, &dev_id);
if (err)
return err;
*((u32 *)buf_out) = dev_id;
*out_size = in_size;
return 0;
}
bool hifc_is_in_host(void)
{
struct card_node *chip_node;
struct hifc_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->init_state > HIFC_INIT_STATE_PCI_INITED) {
lld_dev_put();
return true;
}
}
}
lld_dev_put();
return false;
}
static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
bool in_host;
if (!buf_out || (*out_size != sizeof(u8)))
return -EINVAL;
in_host = hifc_is_in_host();
if (in_host)
*((u8 *)buf_out) = 0;
else
*((u8 *)buf_out) = 1;
return 0;
}
static int get_pf_id(void *hwdev, void *buf_in, u32 in_size,
void *buf_out, u32 *out_size)
{
struct hifc_pf_info *pf_info;
u32 port_id = 0;
int err;
if (!buf_out || (*out_size != sizeof(*pf_info)) ||
!buf_in || in_size != sizeof(u32))
return -EINVAL;
port_id = *((u32 *)buf_in);
pf_info = (struct hifc_pf_info *)buf_out;
err = hifc_get_pf_id(hwdev, port_id, &pf_info->pf_id,
&pf_info->isvalid);
if (err)
return err;
*out_size = sizeof(*pf_info);
return 0;
}
static struct hw_drv_module_handle hw_driv_module_cmd_handle[] = {
{FUNC_TYPE, get_func_type},
{GET_FUNC_IDX, get_func_id},
{GET_DRV_VERSION, get_drv_version},
{GET_HW_STATS, get_hw_stats},
{CLEAR_HW_STATS, clear_hw_stats},
{GET_CHIP_FAULT_STATS, get_chip_faults_stats},
{GET_CHIP_ID, get_chip_id_test},
{GET_SINGLE_CARD_INFO, get_single_card_info},
{GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status},
{GET_DEVICE_ID, get_device_id},
{IS_DRV_IN_VM, is_driver_in_vm},
{GET_PF_ID, get_pf_id},
};
int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
int index, num_cmds = sizeof(hw_driv_module_cmd_handle) /
sizeof(hw_driv_module_cmd_handle[0]);
enum driver_cmd_type cmd_type;
int err = 0;
if (!nt_msg) {
pr_err("Input param invalid!\n");
return -EINVAL;
}
cmd_type = (enum driver_cmd_type)(nt_msg->msg_formate);
for (index = 0; index < num_cmds; index++) {
if (cmd_type ==
hw_driv_module_cmd_handle[index].driv_cmd_name) {
err = hw_driv_module_cmd_handle[index].driv_func
(hwdev, buf_in,
in_size, buf_out, out_size);
break;
}
}
if (index == num_cmds)
return -EINVAL;
return err;
}
typedef int (*sm_module)(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out);
static int sm_rd32(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out)
{
u32 val1;
int ret;
ret = hifc_sm_ctr_rd32(hwdev, node, instance, id, &val1);
if (ret) {
pr_err("Get sm ctr information (32 bits)failed!\n");
val1 = 0xffffffff;
}
buf_out->val1 = val1;
return ret;
}
static int sm_rd64_pair(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out)
{
u64 val1 = 0, val2 = 0;
int ret;
ret = hifc_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2);
if (ret) {
pr_err("Get sm ctr information (64 bits pair)failed!\n");
val1 = 0xffffffff;
}
buf_out->val1 = val1;
buf_out->val2 = val2;
return ret;
}
static int sm_rd64(void *hwdev, u32 id, u8 instance,
u8 node, struct sm_out_st *buf_out)
{
u64 val1;
int ret;
ret = hifc_sm_ctr_rd64(hwdev, node, instance, id, &val1);
if (ret) {
pr_err("Get sm ctr information (64 bits)failed!\n");
val1 = 0xffffffff;
}
buf_out->val1 = val1;
return ret;
}
struct sm_module_handle {
enum sm_cmd_type sm_cmd_name;
sm_module sm_func;
};
struct sm_module_handle sm_module_cmd_handle[] = {
{SM_CTR_RD32, sm_rd32},
{SM_CTR_RD64_PAIR, sm_rd64_pair},
{SM_CTR_RD64, sm_rd64}
};
int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
struct sm_in_st *sm_in = buf_in;
struct sm_out_st *sm_out = buf_out;
u32 msg_formate;
int index, num_cmds = sizeof(sm_module_cmd_handle) /
sizeof(sm_module_cmd_handle[0]);
int ret = 0;
if ((!nt_msg) || (!buf_in) || (!buf_out) ||
(in_size != sizeof(*sm_in)) ||
(*out_size != sizeof(*sm_out))) {
pr_err("Input param invalid!\n");
return -EINVAL;
}
msg_formate = nt_msg->msg_formate;
for (index = 0; index < num_cmds; index++) {
if (msg_formate == sm_module_cmd_handle[index].sm_cmd_name)
ret = sm_module_cmd_handle[index].sm_func(hwdev,
(u32)sm_in->id,
(u8)sm_in->instance,
(u8)sm_in->node, sm_out);
}
if (ret)
pr_err("Get sm information fail!\n");
*out_size = sizeof(struct sm_out_st);
return ret;
}
static u32 get_up_timeout_val(enum hifc_mod_type mod, u8 cmd)
{
#define UP_UPDATEFW_TIME_OUT_VAL 20000U
if (mod == HIFC_MOD_L2NIC && cmd == NIC_UP_CMD_UPDATE_FW)
return UP_UPDATEFW_TIME_OUT_VAL;
else
return UP_COMP_TIME_OUT_VAL;
}
static int api_csr_write(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out,
u32 *out_size)
{
struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
int ret = 0;
u32 rd_len;
u32 rd_addr;
u32 rd_cnt = 0;
u32 offset = 0;
u8 node_id;
u32 i;
u8 *data;
if (!buf_in || in_size != sizeof(*csr_write_msg))
return -EINVAL;
rd_len = csr_write_msg->rd_len;
rd_addr = csr_write_msg->addr;
node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
if (rd_len % 4) {
pr_err("Csr length must be a multiple of 4\n");
return -EFAULT;
}
rd_cnt = rd_len / 4;
data = kzalloc(rd_len, GFP_KERNEL);
if (!data) {
pr_err("No more memory\n");
return -EFAULT;
}
if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) {
pr_err("Copy information from user failed\n");
kfree(data);
return -EFAULT;
}
for (i = 0; i < rd_cnt; i++) {
ret = hifc_api_csr_wr32(hwdev, node_id,
rd_addr + offset,
*((u32 *)(data + offset)));
if (ret) {
pr_err("Csr wr fail, ret: %d, node_id: %d, csr addr: 0x%08x\n",
ret, rd_addr + offset, node_id);
kfree(data);
return ret;
}
offset += 4;
}
*out_size = 0;
kfree(data);
return ret;
}
static int api_csr_read(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in;
int ret = 0;
u32 rd_len;
u32 rd_addr;
u32 rd_cnt = 0;
u32 offset = 0;
u8 node_id;
u32 i;
if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) ||
*out_size != up_log_msg->rd_len)
return -EINVAL;
rd_len = up_log_msg->rd_len;
rd_addr = up_log_msg->addr;
node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
rd_cnt = rd_len / 4;
if (rd_len % 4)
rd_cnt++;
for (i = 0; i < rd_cnt; i++) {
ret = hifc_api_csr_rd32(hwdev, node_id,
rd_addr + offset,
(u32 *)(((u8 *)buf_out) + offset));
if (ret) {
pr_err("Csr rd fail, err: %d, node_id: %d, csr addr: 0x%08x\n",
ret, node_id, rd_addr + offset);
return ret;
}
offset += 4;
}
*out_size = rd_len;
return ret;
}
int send_to_up(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
int ret = 0;
if ((!nt_msg) || (!hwdev) || (!buf_in) || (!buf_out)) {
pr_err("Input param invalid!\n");
return -EINVAL;
}
if ((nt_msg->up_cmd.up_db.up_api_type == API_CMD) ||
(nt_msg->up_cmd.up_db.up_api_type == API_CLP)) {
enum hifc_mod_type mod;
u8 cmd;
u32 timeout;
mod = (enum hifc_mod_type)nt_msg->up_cmd.up_db.comm_mod_type;
cmd = nt_msg->up_cmd.up_db.chipif_cmd;
timeout = get_up_timeout_val(mod, cmd);
if (nt_msg->up_cmd.up_db.up_api_type == API_CMD)
ret = hifc_msg_to_mgmt_sync(hwdev, mod, cmd,
buf_in, (u16)in_size,
buf_out, (u16 *)out_size,
timeout);
else
ret = hifc_clp_to_mgmt(hwdev, mod, cmd,
buf_in, (u16)in_size,
buf_out, (u16 *)out_size);
if (ret) {
pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n",
mod, cmd);
return ret;
}
} else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) {
if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) {
ret = api_csr_write(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
return ret;
}
ret = api_csr_read(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
}
return ret;
}
int send_to_ucode(void *hwdev, struct msg_module *nt_msg, void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
int ret = 0;
if ((!nt_msg) || (!hwdev) || (!buf_in)) {
pr_err("Input param invalid!\n");
return -EINVAL;
}
if (nt_msg->ucode_cmd.ucode_db.ucode_imm) {
ret = hifc_cmdq_direct_resp
(hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
nt_msg->ucode_cmd.ucode_db.comm_mod_type,
nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
buf_in, buf_out, 0);
if (ret)
pr_err("Send direct cmdq err: %d!\n", ret);
} else {
ret = hifc_cmdq_detail_resp
(hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
nt_msg->ucode_cmd.ucode_db.comm_mod_type,
nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
buf_in, buf_out, 0);
if (ret)
pr_err("Send detail cmdq err: %d!\n", ret);
}
return ret;
}
void hifc_tool_cnt_inc(void)
{
atomic_inc(&tool_used_cnt);
}
void hifc_tool_cnt_dec(void)
{
atomic_dec(&tool_used_cnt);
}
static bool __is_pcidev_match_chip_name(const char *ifname,
struct hifc_pcidev *dev,
struct card_node *chip_node,
enum func_type type)
{
if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) {
if (type == TYPE_UNKNOWN) {
if (dev->init_state < HIFC_INIT_STATE_HW_PART_INITED)
return false;
} else {
if (dev->init_state >=
HIFC_INIT_STATE_HW_PART_INITED &&
hifc_func_type(dev->hwdev) != type)
return false;
}
return true;
}
return false;
}
static struct hifc_pcidev *_get_pcidev_by_chip_name(char *ifname,
enum func_type type)
{
struct card_node *chip_node;
struct hifc_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (__is_pcidev_match_chip_name(ifname, dev, chip_node,
type)) {
lld_dev_put();
return dev;
}
}
}
lld_dev_put();
return NULL;
}
static struct hifc_pcidev *hifc_get_pcidev_by_chip_name(char *ifname)
{
struct hifc_pcidev *dev, *dev_hw_init;
/* find hw init device first */
dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN);
if (dev_hw_init) {
if (hifc_func_type(dev_hw_init->hwdev) == TYPE_PPF)
return dev_hw_init;
}
dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF);
if (dev) {
if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state))
return dev_hw_init;
return dev;
}
dev = _get_pcidev_by_chip_name(ifname, TYPE_PF);
if (dev) {
if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state))
return dev_hw_init;
return dev;
}
return NULL;
}
static struct hifc_pcidev *hifc_get_pcidev_by_ifname(char *ifname)
{
struct hifc_pcidev *dev;
/* support search hwdev by chip name, net device name,
* or fc device name
*/
/* Find pcidev by chip_name first */
dev = hifc_get_pcidev_by_chip_name(ifname);
if (dev)
return dev;
/* If ifname not a chip name,
* find pcidev by FC name or netdevice name
*/
return hifc_get_pcidev_by_dev_name(ifname);
}
void *hifc_get_hwdev_by_ifname(char *ifname)
{
struct hifc_pcidev *dev;
if (!ifname) {
pr_err("Input param invalid!\n");
return NULL;
}
dev = hifc_get_pcidev_by_ifname(ifname);
if (dev)
return dev->hwdev;
return NULL;
}
enum hifc_init_state hifc_get_init_state_by_ifname(char *ifname)
{
struct hifc_pcidev *dev;
if (!ifname) {
pr_err("Input param invalid!\n");
return HIFC_INIT_STATE_NONE;
}
dev = hifc_get_pcidev_by_ifname(ifname);
if (dev)
return dev->init_state;
pr_err("Can not get device %s\n", ifname);
return HIFC_INIT_STATE_NONE;
}
void get_fc_devname(char *devname)
{
struct card_node *chip_node;
struct hifc_pcidev *dev;
if (!devname) {
pr_err("Input param invalid!\n");
return;
}
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->init_state < HIFC_INIT_STATE_ALL_INITED)
continue;
if (dev->uld_dev) {
strlcpy(devname, (char *)dev->uld_dev_name,
IFNAMSIZ);
lld_dev_put();
return;
}
}
}
lld_dev_put();
}
void hifc_get_all_chip_id(void *id_info)
{
struct nic_card_id *card_id = (struct nic_card_id *)id_info;
struct card_node *chip_node;
int i = 0;
int id, err;
if (!card_id) {
pr_err("Input param invalid!\n");
return;
}
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
err = sscanf(chip_node->chip_name, HIFC_CHIP_NAME "%d", &id);
if (err < 0)
pr_err("Failed to get hifc id\n");
card_id->id[i] = id;
i++;
}
lld_dev_put();
card_id->num = i;
}
static struct card_node *hifc_get_chip_node_by_hwdev(const void *hwdev)
{
struct card_node *chip_node = NULL;
struct card_node *node_tmp = NULL;
struct hifc_pcidev *dev;
if (!hwdev)
return NULL;
lld_dev_hold();
list_for_each_entry(node_tmp, &g_hinic_chip_list, node) {
if (!chip_node) {
list_for_each_entry(dev, &node_tmp->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (dev->hwdev == hwdev) {
chip_node = node_tmp;
break;
}
}
}
}
lld_dev_put();
return chip_node;
}
int hifc_get_device_id(void *hwdev, u16 *dev_id)
{
struct card_node *chip_node = NULL;
struct hifc_pcidev *dev;
u16 vendor_id = 0;
u16 device_id = 0;
if ((!dev_id) || (!hwdev)) {
pr_err("Input param invalid!\n");
return -ENODEV;
}
chip_node = hifc_get_chip_node_by_hwdev(hwdev);
if (!chip_node)
return -ENODEV;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
pci_read_config_word(dev->pcidev, 0, &vendor_id);
if (vendor_id == HIFC_PCI_VENDOR_ID) {
pci_read_config_word(dev->pcidev, 2, &device_id);
break;
}
}
lld_dev_put();
*dev_id = device_id;
return 0;
}
int hifc_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid)
{
struct card_node *chip_node = NULL;
struct hifc_pcidev *dev;
if ((!isvalid) || (!pf_id) || (!hwdev)) {
pr_err("Input param invalid!\n");
return -ENODEV;
}
chip_node = hifc_get_chip_node_by_hwdev(hwdev);
if (!chip_node)
return -ENODEV;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (hifc_physical_port_id(dev->hwdev) == port_id) {
*pf_id = hifc_global_func_id(dev->hwdev);
*isvalid = 1;
break;
}
}
lld_dev_put();
return 0;
}
bool hifc_is_valid_bar_addr(u64 offset)
{
struct card_node *chip_node = NULL;
struct hifc_pcidev *dev;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
if (offset == pci_resource_start(dev->pcidev, 0)) {
lld_dev_put();
return true;
}
}
}
lld_dev_put();
return false;
}
void hifc_get_card_func_info_by_card_name(
const char *chip_name, struct hifc_card_func_info *card_func)
{
struct card_node *chip_node = NULL;
struct hifc_pcidev *dev;
struct func_pdev_info *pdev_info;
if ((!card_func) || (!chip_name)) {
pr_err("Input param invalid!\n");
return;
}
card_func->num_pf = 0;
lld_dev_hold();
list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ))
continue;
list_for_each_entry(dev, &chip_node->func_list, node) {
if (hifc_func_type(dev->hwdev) == TYPE_VF)
continue;
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
continue;
pdev_info = &card_func->pdev_info[card_func->num_pf];
pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0);
pdev_info->bar0_phy_addr =
pci_resource_start(dev->pcidev, 0);
card_func->num_pf++;
if (card_func->num_pf >= MAX_SIZE)
break;
}
}
lld_dev_put();
}
static bool __is_func_valid(struct hifc_pcidev *dev)
{
if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag))
return false;
if (dev->init_state < HIFC_INIT_STATE_HWDEV_INITED)
return false;
return true;
}
void hifc_get_card_info(void *hwdev, void *bufin)
{
struct card_node *chip_node = NULL;
struct card_info *info = (struct card_info *)bufin;
struct hifc_pcidev *dev;
u32 idx = 0;
if ((!bufin) || (!hwdev)) {
pr_err("Input param invalid!\n");
return;
}
info->pf_num = 0;
chip_node = hifc_get_chip_node_by_hwdev(hwdev);
if (!chip_node)
return;
lld_dev_hold();
list_for_each_entry(dev, &chip_node->func_list, node) {
if (!__is_func_valid(dev))
continue;
strlcpy(info->pf[idx].name, dev->uld_dev_name, IFNAMSIZ);
info->pf[idx].pf_type = (u32)BIT(SERVICE_T_FC);
strlcpy(info->pf[idx].bus_info, pci_name(dev->pcidev),
sizeof(info->pf[idx].bus_info));
info->pf_num++;
idx++;
}
lld_dev_put();
}
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_event.h"
#include "unf_lport.h"
#include "unf_exchg.h"
#include "unf_portman.h"
#include "unf_rport.h"
#include "unf_io.h"
#include "unf_service.h"
#include "unf_rport.h"
#include "unf_npiv.h"
#include "hifc_portmng.h"
#define UNF_LOOP_STOP_NEED_WAIT 0
#define UNF_LOOP_STOP_NO_NEED_WAIT 1
#define UNF_MAX_SAVE_ENTRY_NUM 60
#define UNF_CHECK_CONFIG_SPEED_BY_SFSSPEED(sfs_speed, cfg_speed) \
((sfs_speed) < (cfg_speed) || (sfs_speed) == UNF_PORT_SFP_SPEED_ERR)
#define UNF_LPORT_CHIP_ERROR(lport) \
((lport)->pcie_error_cnt.pcie_error_count[UNF_PCIE_FATALERRORDETECTED])
struct unf_global_lport_s global_lport_mgr;
static unsigned int unf_port_link_up(struct unf_lport_s *v_lport,
void *v_in_put);
static unsigned int unf_port_link_down(struct unf_lport_s *v_lport,
void *v_in_put);
static unsigned int unf_port_abnormal_reset(struct unf_lport_s *v_lport,
void *v_in_put);
static unsigned int unf_port_reset_start(struct unf_lport_s *v_lport,
void *v_in_put);
static unsigned int unf_port_reset_end(struct unf_lport_s *v_lport,
void *v_in_put);
static unsigned int unf_port_nop(struct unf_lport_s *v_lport, void *v_in_put);
static unsigned int unf_port_clean_done(struct unf_lport_s *v_lport,
void *v_in_put);
static unsigned int unf_port_begin_remove(struct unf_lport_s *v_lport,
void *v_in_put);
static unsigned int unf_port_release_rport_index(struct unf_lport_s *v_lport,
void *v_in_put);
static int unf_cm_port_info_get(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_port_speed_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_topo_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_port_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_get_port_sfp_info(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_get_all_port_info(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_clear_error_code_sum(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_bbscn_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_get_io_dfx_statistics(struct unf_lport_s *v_pstLPort,
struct unf_hinicam_pkg *v_input);
static int unf_cm_set_vport(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input);
static int unf_cm_link_delay_get(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_save_data_mode(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_set_dif(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_select_dif_mode(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_adm_show_xchg(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_adm_link_time_out_opt(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static int unf_cm_adm_log_level_opt(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_in_put);
static struct unf_port_action_s lport_action[] = {
{ UNF_PORT_LINK_UP, unf_port_link_up },
{ UNF_PORT_LINK_DOWN, unf_port_link_down },
{ UNF_PORT_RESET_START, unf_port_reset_start },
{ UNF_PORT_RESET_END, unf_port_reset_end },
{ UNF_PORT_NOP, unf_port_nop },
{ UNF_PORT_CLEAN_DONE, unf_port_clean_done },
{ UNF_PORT_BEGIN_REMOVE, unf_port_begin_remove },
{ UNF_PORT_RELEASE_RPORT_INDEX, unf_port_release_rport_index },
{ UNF_PORT_ABNORMAL_RESET, unf_port_abnormal_reset },
};
static struct unf_hifcadm_action_s unf_hifcadm_action[] = {
{ UNF_PORT_SET_OP, unf_cm_port_set },
{ UNF_TOPO_SET_OP, unf_cm_topo_set },
{ UNF_SPEED_SET_OP, unf_cm_port_speed_set },
{ UNF_INFO_GET_OP, unf_cm_port_info_get },
{ UNF_INFO_CLEAR_OP, unf_cm_clear_error_code_sum },
{ UNF_SFP_INFO_OP, unf_get_port_sfp_info },
{ UNF_ALL_INFO_OP, unf_cm_get_all_port_info },
{ UNF_BBSCN, unf_cm_bbscn_set },
{ UNF_DFX, unf_get_io_dfx_statistics },
{ UNF_VPORT, unf_cm_set_vport },
{ UNF_LINK_DELAY, unf_cm_link_delay_get },
{ UNF_SAVA_DATA, unf_cm_save_data_mode },
{ UNF_DIF, unf_cm_set_dif },
{ UNF_DIF_CONFIG, unf_cm_select_dif_mode },
{ UNF_SHOW_XCHG, unf_cm_adm_show_xchg },
{ FC_LINK_TMO_OPT, unf_cm_adm_link_time_out_opt },
{ FC_DRV_LOG_OPT, unf_cm_adm_log_level_opt },
};
static void unf_destroy_dirty_rport(struct unf_lport_s *v_lport,
int v_show_only)
{
unsigned int dirty_rport = 0;
UNF_REFERNCE_VAR(dirty_rport);
/* for whole L_Port */
if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) {
dirty_rport = v_lport->rport_pool.rport_pool_count;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) has %u dirty RPort(s)",
v_lport->port_id, dirty_rport);
/* free R_Port pool memory & bitmap */
if (v_show_only == UNF_FALSE) {
vfree(v_lport->rport_pool.rport_pool_add);
v_lport->rport_pool.rport_pool_add = NULL;
vfree(v_lport->rport_pool.pul_rpi_bitmap);
v_lport->rport_pool.pul_rpi_bitmap = NULL;
}
}
UNF_REFERNCE_VAR(dirty_rport);
}
void unf_show_dirty_port(int v_show_only, unsigned int *v_ditry_port_num)
{
struct list_head *node = NULL;
struct list_head *node_next = NULL;
struct unf_lport_s *lport = NULL;
unsigned long flags = 0;
unsigned int port_num = 0;
UNF_CHECK_VALID(0x2200, UNF_TRUE, NULL != v_ditry_port_num, return);
/* for each dirty L_Port from global L_Port list */
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_safe(node, node_next, &global_lport_mgr.list_dirty_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) has dirty data(0x%x)",
lport->port_id, lport->dirty_flag);
/* Destroy dirty L_Port's exchange(s) & R_Port(s) */
unf_destroy_dirty_xchg(lport, v_show_only);
unf_destroy_dirty_rport(lport, v_show_only);
/* Delete (dirty L_Port) list entry if necessary */
if (v_show_only == UNF_FALSE) {
list_del_init(node);
vfree(lport);
}
port_num++;
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
*v_ditry_port_num = port_num;
}
int unf_send_event(unsigned int port_id,
unsigned int syn_flag,
void *argc_in,
void *argc_out,
int (*p_func)(void *argc_in, void *argc_out))
{
struct unf_lport_s *lport = NULL;
struct unf_cm_event_report *event = NULL;
int ret = 0;
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO,
"Cannot find LPort(0x%x).", port_id);
return UNF_RETURN_ERROR;
}
if (unf_lport_refinc(lport) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"LPort(0x%x) is removing, no need process.",
lport->port_id);
return UNF_RETURN_ERROR;
}
if (unlikely((!lport->event_mgr.pfn_unf_get_free_event) ||
(!lport->event_mgr.pfn_unf_post_event) ||
(!lport->event_mgr.pfn_unf_release_event))) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Event function is NULL.");
unf_lport_ref_dec_to_destroy(lport);
return UNF_RETURN_ERROR;
}
if (lport->b_port_removing == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"LPort(0x%x) is removing, no need process.",
lport->port_id);
unf_lport_ref_dec_to_destroy(lport);
return UNF_RETURN_ERROR;
}
event = lport->event_mgr.pfn_unf_get_free_event((void *)lport);
if (!event) {
unf_lport_ref_dec_to_destroy(lport);
return UNF_RETURN_ERROR;
}
init_completion(&event->event_comp);
event->lport = lport;
event->event_asy_flag = syn_flag;
event->pfn_unf_event_task = p_func;
event->para_in = argc_in;
event->para_out = argc_out;
lport->event_mgr.pfn_unf_post_event(lport, event);
if (event->event_asy_flag) {
/* You must wait for the other party to return. Otherwise,
*the linked list may be in disorder.
*/
wait_for_completion(&event->event_comp);
ret = (int)event->result;
lport->event_mgr.pfn_unf_release_event(lport, event);
} else {
ret = RETURN_OK;
}
unf_lport_ref_dec_to_destroy(lport);
return ret;
}
void unf_lport_update_topo(struct unf_lport_s *v_lport,
enum unf_act_topo_e v_enactive_topo)
{
unsigned long flag = 0;
UNF_CHECK_VALID(0x2210, UNF_TRUE, NULL != v_lport, return);
if ((v_enactive_topo > UNF_ACT_TOP_UNKNOWN) ||
(v_enactive_topo < UNF_ACT_TOP_PUBLIC_LOOP)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) set invalid topology(0x%x) with current value(0x%x)",
v_lport->nport_id, v_enactive_topo,
v_lport->en_act_topo);
return;
}
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
v_lport->en_act_topo = v_enactive_topo;
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
}
void unf_set_lport_removing(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x2216, UNF_TRUE, (v_lport), return);
v_lport->fc_port = NULL;
v_lport->b_port_removing = UNF_TRUE;
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_0_SET_REMOVING;
}
unsigned int unf_release_local_port(void *v_lport)
{
struct unf_lport_s *lport = v_lport;
struct completion local_port_free_completion =
COMPLETION_INITIALIZER(local_port_free_completion);
UNF_CHECK_VALID(0x2217, UNF_TRUE, (lport),
return UNF_RETURN_ERROR);
lport->lport_free_completion = &local_port_free_completion;
unf_set_lport_removing(lport);
unf_lport_ref_dec(lport);
wait_for_completion(lport->lport_free_completion);
/* for dirty case */
if (lport->dirty_flag == 0)
vfree(lport);
return RETURN_OK;
}
static void unf_free_all_esgl_pages(struct unf_lport_s *v_lport)
{
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
unsigned int alloc_idx;
UNF_CHECK_VALID(0x2218, UNF_TRUE, (v_lport), return);
spin_lock_irqsave(&v_lport->esgl_pool.esgl_pool_lock, flag);
list_for_each_safe(node, next_node,
&v_lport->esgl_pool.list_esgl_pool) {
list_del(node);
}
spin_unlock_irqrestore(&v_lport->esgl_pool.esgl_pool_lock, flag);
if (v_lport->esgl_pool.esgl_buf_list.buflist) {
for (alloc_idx = 0;
alloc_idx < v_lport->esgl_pool.esgl_buf_list.buf_num;
alloc_idx++) {
if (v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr) {
dma_free_coherent(&v_lport->low_level_func.dev->dev,
v_lport->esgl_pool.esgl_buf_list.buf_size,
v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr,
v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].paddr);
v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr = NULL;
}
}
kfree(v_lport->esgl_pool.esgl_buf_list.buflist);
v_lport->esgl_pool.esgl_buf_list.buflist = NULL;
}
}
static unsigned int unf_init_esgl_pool(struct unf_lport_s *v_lport)
{
struct unf_esgl_s *esgl = NULL;
unsigned int ret = RETURN_OK;
unsigned int index = 0;
unsigned int buf_total_size;
unsigned int buf_num;
unsigned int alloc_idx;
unsigned int cur_buf_idx = 0;
unsigned int cur_buf_offset = 0;
unsigned int buf_cnt_perhugebuf;
UNF_CHECK_VALID(0x2219, UNF_TRUE, NULL != v_lport,
return UNF_RETURN_ERROR);
v_lport->esgl_pool.esgl_pool_count =
v_lport->low_level_func.lport_cfg_items.max_io;
spin_lock_init(&v_lport->esgl_pool.esgl_pool_lock);
INIT_LIST_HEAD(&v_lport->esgl_pool.list_esgl_pool);
v_lport->esgl_pool.esgl_pool_addr =
vmalloc((size_t)((v_lport->esgl_pool.esgl_pool_count) *
sizeof(struct unf_esgl_s)));
if (!v_lport->esgl_pool.esgl_pool_addr) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR,
"LPort(0x%x) cannot allocate ESGL Pool.",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
esgl = (struct unf_esgl_s *)v_lport->esgl_pool.esgl_pool_addr;
memset(esgl, 0, ((v_lport->esgl_pool.esgl_pool_count) *
sizeof(struct unf_esgl_s)));
buf_total_size =
(unsigned int)(PAGE_SIZE * v_lport->esgl_pool.esgl_pool_count);
v_lport->esgl_pool.esgl_buf_list.buf_size =
buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE :
buf_total_size;
buf_cnt_perhugebuf =
v_lport->esgl_pool.esgl_buf_list.buf_size / PAGE_SIZE;
buf_num = v_lport->esgl_pool.esgl_pool_count %
buf_cnt_perhugebuf ? v_lport->esgl_pool.esgl_pool_count /
buf_cnt_perhugebuf + 1 : v_lport->esgl_pool.esgl_pool_count /
buf_cnt_perhugebuf;
v_lport->esgl_pool.esgl_buf_list.buflist =
(struct buff_list_s *)
kmalloc(buf_num * sizeof(struct buff_list_s), GFP_KERNEL);
v_lport->esgl_pool.esgl_buf_list.buf_num = buf_num;
if (!v_lport->esgl_pool.esgl_buf_list.buflist) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Allocate Esgl pool buf list failed out of memory");
goto free_buff;
}
memset(v_lport->esgl_pool.esgl_buf_list.buflist, 0,
buf_num * sizeof(struct buff_list_s));
for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) {
v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr =
dma_alloc_coherent(
&v_lport->low_level_func.dev->dev,
v_lport->esgl_pool.esgl_buf_list.buf_size,
&v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].paddr,
GFP_KERNEL);
if (!v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr)
goto free_buff;
memset(v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr,
0, v_lport->esgl_pool.esgl_buf_list.buf_size);
}
/* allocates the Esgl page, and the DMA uses the */
for (index = 0; index < v_lport->esgl_pool.esgl_pool_count; index++) {
if ((index != 0) && !(index % buf_cnt_perhugebuf))
cur_buf_idx++;
cur_buf_offset =
(unsigned int)
(PAGE_SIZE * (index % buf_cnt_perhugebuf));
esgl->page.page_address =
(unsigned long long)v_lport->esgl_pool.esgl_buf_list.buflist[cur_buf_idx].vaddr +
cur_buf_offset;
esgl->page.page_size = PAGE_SIZE;
esgl->page.esgl_phyaddr =
v_lport->esgl_pool.esgl_buf_list.buflist[cur_buf_idx].paddr +
cur_buf_offset;
list_add_tail(&esgl->entry_esgl,
&v_lport->esgl_pool.list_esgl_pool);
esgl++;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[EVENT]Allocate bufnum:%u, buf_total_size:%u", buf_num,
buf_total_size);
return ret;
free_buff:
unf_free_all_esgl_pages(v_lport);
vfree(v_lport->esgl_pool.esgl_pool_addr);
return UNF_RETURN_ERROR;
}
static void unf_free_esgl_pool(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x2220, UNF_TRUE, (v_lport), return);
unf_free_all_esgl_pages(v_lport);
v_lport->esgl_pool.esgl_pool_count = 0;
if (v_lport->esgl_pool.esgl_pool_addr) {
vfree(v_lport->esgl_pool.esgl_pool_addr);
v_lport->esgl_pool.esgl_pool_addr = NULL;
}
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL;
}
struct unf_lport_s *unf_find_lport_by_port_id(unsigned int v_port_id)
{
struct unf_lport_s *lport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
unsigned int port_id = v_port_id & (~PORTID_VPINDEX_MASK);
unsigned short vport_index = (v_port_id & PORTID_VPINDEX_MASK) >>
PORTID_VPINDEX_SHIT;
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_safe(node, next_node,
&global_lport_mgr.list_lport_list_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
if ((port_id == lport->port_id) &&
(lport->b_port_removing != UNF_TRUE)) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return unf_cm_lookup_vport_by_vp_index(lport,
vport_index);
}
}
list_for_each_safe(node, next_node,
&global_lport_mgr.list_intergrad_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
if ((port_id == lport->port_id) &&
(lport->b_port_removing != UNF_TRUE)) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return unf_cm_lookup_vport_by_vp_index(lport,
vport_index);
}
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
return NULL;
}
unsigned int unf_is_vport_valid(struct unf_lport_s *v_lport,
struct unf_lport_s *v_vport)
{
struct unf_lport_s *lport = NULL;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1977, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1977, UNF_TRUE, v_vport, return UNF_RETURN_ERROR);
lport = v_lport;
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) vport pool is NULL",
lport->port_id);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport == v_vport && vport->b_port_removing != UNF_TRUE) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return RETURN_OK;
}
}
list_for_each_safe(node, next_node, &lport->list_intergrad_vports) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport == v_vport && vport->b_port_removing != UNF_TRUE) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return RETURN_OK;
}
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
return UNF_RETURN_ERROR;
}
unsigned int unf_is_lport_valid(struct unf_lport_s *v_lport)
{
struct unf_lport_s *lport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_safe(node, next_node,
&global_lport_mgr.list_lport_list_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
if ((v_lport == lport) &&
(lport->b_port_removing != UNF_TRUE)) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return RETURN_OK;
}
if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return RETURN_OK;
}
}
list_for_each_safe(node, next_node,
&global_lport_mgr.list_intergrad_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
if ((v_lport == lport) &&
(lport->b_port_removing != UNF_TRUE)) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return RETURN_OK;
}
if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return RETURN_OK;
}
}
list_for_each_safe(node, next_node,
&global_lport_mgr.list_destroy_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
if ((v_lport == lport) &&
(lport->b_port_removing != UNF_TRUE)) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return RETURN_OK;
}
if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return RETURN_OK;
}
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
return UNF_RETURN_ERROR;
}
static void unf_clean_link_down_io(struct unf_lport_s *v_lport,
int v_clean_flag)
{
/* Clean L_Port/V_Port Link Down I/O: Set Abort Tag */
UNF_CHECK_VALID(0x2225, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x2685, UNF_TRUE,
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io,
return);
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io(v_lport,
UNF_XCHG_TYPE_INI, v_clean_flag);
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io(v_lport,
UNF_XCHG_TYPE_SFS, v_clean_flag);
}
unsigned int unf_fc_port_link_event(void *v_lport, unsigned int v_events,
void *v_input)
{
struct unf_lport_s *lport = NULL;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int index = 0;
if (unlikely(!v_lport))
return UNF_RETURN_ERROR;
lport = (struct unf_lport_s *)v_lport;
ret = unf_lport_refinc(lport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) is removing and do nothing",
lport->port_id);
return RETURN_OK;
}
/* process port event */
while (index < (sizeof(lport_action) /
sizeof(struct unf_port_action_s))) {
if (v_events == lport_action[index].action) {
ret = lport_action[index].fn_unf_action(lport, v_input);
unf_lport_ref_dec_to_destroy(lport);
return ret;
}
index++;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) receive unknown event(0x%x)",
lport->port_id, v_events);
unf_lport_ref_dec_to_destroy(lport);
return ret;
}
void unf_port_mgmt_init(void)
{
memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport_s));
INIT_LIST_HEAD(&global_lport_mgr.list_lport_list_head);
INIT_LIST_HEAD(&global_lport_mgr.list_intergrad_head);
INIT_LIST_HEAD(&global_lport_mgr.list_destroy_head);
INIT_LIST_HEAD(&global_lport_mgr.list_dirty_head);
spin_lock_init(&global_lport_mgr.global_lport_list_lock);
UNF_SET_NOMAL_MODE(global_lport_mgr.dft_mode);
global_lport_mgr.b_start_work = UNF_TRUE;
}
void unf_port_mgmt_deinit(void)
{
if (global_lport_mgr.lport_sum != 0)
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]There are %u port pool memory giveaway",
global_lport_mgr.lport_sum);
memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport_s));
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Common port manager exit succeed");
}
static void unf_port_register(struct unf_lport_s *v_lport)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x2230, UNF_TRUE, (v_lport), return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Register LPort(0x%p), port ID(0x%x).",
v_lport, v_lport->port_id);
/* Add to the global management linked list header */
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_add_tail(&v_lport->entry_lport,
&global_lport_mgr.list_lport_list_head);
global_lport_mgr.lport_sum++;
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
}
static void unf_port_unregister(struct unf_lport_s *v_lport)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x2703, UNF_TRUE, (v_lport), return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Unregister LPort(0x%p), port ID(0x%x).",
v_lport, v_lport->port_id);
/* Remove from the global management linked list header */
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_del(&v_lport->entry_lport);
global_lport_mgr.lport_sum--;
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
}
static int unf_port_switch(struct unf_lport_s *v_lport,
unsigned int v_switch_flag)
{
struct unf_lport_s *lport = v_lport;
int ret = UNF_RETURN_ERROR;
int switch_flag = UNF_FALSE;
UNF_CHECK_VALID(0x2261, UNF_TRUE, lport, return UNF_RETURN_ERROR);
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x)'s config(switch) function is NULL",
lport->port_id);
return UNF_RETURN_ERROR;
}
switch_flag = v_switch_flag ? UNF_TRUE : UNF_FALSE;
ret = (int)lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SET_PORT_SWITCH, (void *)&switch_flag);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x) switch %s failed",
lport->port_id,
v_switch_flag ? "On" : "Off");
return UNF_RETURN_ERROR;
}
lport->b_switch_state = (enum int_e)switch_flag;
return RETURN_OK;
}
int unf_port_start_work(struct unf_lport_s *v_lport)
{
unsigned long flag = 0;
struct unf_fw_version_s fw_version = { 0 };
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2231, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
if (v_lport->en_start_work_state != UNF_START_WORK_STOP) {
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
return RETURN_OK;
}
v_lport->en_start_work_state = UNF_START_WORK_COMPLETE;
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_diagnose) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
fw_version.message_type = UNF_DEBUG_TYPE_MESSAGE;
ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_diagnose(
(void *)v_lport->fc_port,
UNF_PORT_DIAG_PORT_DETAIL, &fw_version);
if (ret != RETURN_OK)
v_lport->fw_version[0] = '\0';
else
memcpy(v_lport->fw_version, fw_version.fw_version,
HIFC_VER_LEN);
unf_cm_get_save_info(v_lport);
/* switch sfp to start work */
(void)unf_port_switch(v_lport, UNF_TRUE);
return RETURN_OK;
}
static unsigned int unf_lport_init_lw_fun_op(
struct unf_lport_s *v_lport,
struct unf_low_level_function_op_s *low_level_op)
{
UNF_CHECK_VALID(0x2235, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2236, UNF_TRUE, (low_level_op),
return UNF_RETURN_ERROR);
v_lport->port_id = low_level_op->lport_cfg_items.port_id;
v_lport->port_name = low_level_op->sys_port_name;
v_lport->node_name = low_level_op->sys_node_name;
v_lport->options = low_level_op->lport_cfg_items.port_mode;
v_lport->en_act_topo = UNF_ACT_TOP_UNKNOWN;
memcpy(&v_lport->low_level_func, low_level_op,
sizeof(struct unf_low_level_function_op_s));
return RETURN_OK;
}
void unf_lport_release_lw_fun_op(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x2237, UNF_TRUE, v_lport, return);
memset(&v_lport->low_level_func, 0,
sizeof(struct unf_low_level_function_op_s));
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE;
}
struct unf_lport_s *unf_find_lport_by_scsi_host_id(unsigned int scsi_host_id)
{
struct list_head *node = NULL, *next_node = NULL;
struct list_head *vp_node = NULL, *next_vp_node = NULL;
struct unf_lport_s *lport = NULL;
struct unf_lport_s *vport = NULL;
unsigned long flags = 0;
unsigned long vpool_flags = 0;
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_safe(node, next_node,
&global_lport_mgr.list_lport_list_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
if (scsi_host_id ==
UNF_GET_SCSI_HOST_ID((lport->host_info.p_scsi_host))) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return lport;
}
/* support NPIV */
if (lport->vport_pool) {
spin_lock_irqsave(&lport->vport_pool->vport_pool_lock,
vpool_flags);
list_for_each_safe(vp_node, next_vp_node,
&lport->list_vports_head) {
vport = list_entry(vp_node, struct unf_lport_s,
entry_vport);
if (scsi_host_id ==
UNF_GET_SCSI_HOST_ID(vport->host_info.p_scsi_host)) {
spin_unlock_irqrestore(
&lport->vport_pool->vport_pool_lock,
vpool_flags);
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return vport;
}
}
spin_unlock_irqrestore(
&lport->vport_pool->vport_pool_lock, vpool_flags);
}
}
list_for_each_safe(node, next_node,
&global_lport_mgr.list_intergrad_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
if (scsi_host_id ==
UNF_GET_SCSI_HOST_ID(lport->host_info.p_scsi_host)) {
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return lport;
}
/* support NPIV */
if (lport->vport_pool) {
spin_lock_irqsave(&lport->vport_pool->vport_pool_lock,
vpool_flags);
list_for_each_safe(vp_node, next_vp_node,
&lport->list_vports_head) {
vport = list_entry(vp_node, struct unf_lport_s,
entry_vport);
if (scsi_host_id ==
UNF_GET_SCSI_HOST_ID(vport->host_info.p_scsi_host)) {
spin_unlock_irqrestore(
&lport->vport_pool->vport_pool_lock,
vpool_flags);
spin_unlock_irqrestore(
&global_lport_mgr.global_lport_list_lock,
flags);
return vport;
}
}
spin_unlock_irqrestore(
&lport->vport_pool->vport_pool_lock, vpool_flags);
}
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can not find port by scsi_host_id(0x%x), may be removing",
scsi_host_id);
return NULL;
}
unsigned int unf_init_scsi_id_table(struct unf_lport_s *v_lport)
{
struct unf_rport_scsi_id_image_s *rport_scsi_id_image = NULL;
struct unf_wwpn_rport_info_s *wwpn_port_info = NULL;
unsigned int idx;
UNF_CHECK_VALID(0x2238, UNF_TRUE, (v_lport),
return UNF_RETURN_ERROR);
rport_scsi_id_image = &v_lport->rport_scsi_table;
rport_scsi_id_image->max_scsi_id = UNF_MAX_SCSI_ID;
/* If the number of remote connections supported by the L_Port is 0,
* an exception occurs
*/
if (rport_scsi_id_image->max_scsi_id == 0) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x), supported maximum login is zero.",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
rport_scsi_id_image->wwn_rport_info_table =
vmalloc(rport_scsi_id_image->max_scsi_id *
sizeof(struct unf_wwpn_rport_info_s));
if (!rport_scsi_id_image->wwn_rport_info_table) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) can't allocate SCSI ID Table(0x%x).",
v_lport->port_id, rport_scsi_id_image->max_scsi_id);
return UNF_RETURN_ERROR;
}
memset(rport_scsi_id_image->wwn_rport_info_table, 0,
rport_scsi_id_image->max_scsi_id *
sizeof(struct unf_wwpn_rport_info_s));
wwpn_port_info = rport_scsi_id_image->wwn_rport_info_table;
for (idx = 0; idx < rport_scsi_id_image->max_scsi_id; idx++) {
INIT_DELAYED_WORK(&wwpn_port_info->loss_tmo_work,
unf_sesion_loss_timeout);
INIT_LIST_HEAD(&wwpn_port_info->fc_lun_list);
wwpn_port_info->lport = v_lport;
wwpn_port_info->target_id = INVALID_VALUE32;
wwpn_port_info++;
}
spin_lock_init(&rport_scsi_id_image->scsi_image_table_lock);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Port(0x%x) supported maximum login is %d.",
v_lport->port_id, rport_scsi_id_image->max_scsi_id);
return RETURN_OK;
}
void unf_destroy_scsi_id_table(struct unf_lport_s *v_lport)
{
struct unf_rport_scsi_id_image_s *rport_scsi_id_image = NULL;
struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL;
unsigned int i = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2239, UNF_TRUE, (v_lport), return);
rport_scsi_id_image = &v_lport->rport_scsi_table;
if (rport_scsi_id_image->wwn_rport_info_table) {
for (i = 0; i < UNF_MAX_SCSI_ID; i++) {
wwpn_rport_info =
&rport_scsi_id_image->wwn_rport_info_table[i];
UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id,
&wwpn_rport_info->loss_tmo_work,
"loss tmo Timer work");
if (wwpn_rport_info->dfx_counter)
vfree(wwpn_rport_info->dfx_counter);
}
/* just for pc_lint */
if (ret)
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_INFO,
"Port(0x%x) cancel loss tmo work success",
v_lport->port_id);
vfree(rport_scsi_id_image->wwn_rport_info_table);
rport_scsi_id_image->wwn_rport_info_table = NULL;
}
rport_scsi_id_image->max_scsi_id = 0;
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE;
}
static unsigned int unf_lport_init(
struct unf_lport_s *v_lport,
void *private_data,
struct unf_low_level_function_op_s *low_level_op)
{
unsigned int ret = RETURN_OK;
int ret_value = RETURN_ERROR_S32;
char work_queue_name[16];
unf_init_portparms(v_lport);
/* Associating LPort with FCPort */
v_lport->fc_port = private_data;
/* VpIndx=0 is reserved for Lport, and rootLport points to its own */
v_lport->vp_index = 0;
v_lport->root_lport = v_lport;
v_lport->chip_info = NULL;
/* Initialize the units related to L_Port and lw func */
ret = unf_lport_init_lw_fun_op(v_lport, low_level_op);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) initialize lowlevel function unsuccessful.",
v_lport->port_id);
return ret;
}
/* Init Linkevent workqueue */
ret_value = snprintf(work_queue_name, sizeof(work_queue_name),
"%x_lkq", (unsigned int)v_lport->port_id);
UNF_FUNCTION_RETURN_CHECK(ret_value, (int)sizeof(work_queue_name));
v_lport->link_event_wq = create_singlethread_workqueue(work_queue_name);
if (!v_lport->link_event_wq) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Port(0x%x) creat link event work queue failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
ret_value = snprintf(work_queue_name, sizeof(work_queue_name),
"%x_xchgwq", (unsigned int)v_lport->port_id);
UNF_FUNCTION_RETURN_CHECK(ret_value, (int)sizeof(work_queue_name));
v_lport->xchg_wq = create_workqueue(work_queue_name);
if (!v_lport->xchg_wq) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Port(0x%x) creat Exchg work queue failed",
v_lport->port_id);
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
return UNF_RETURN_ERROR;
}
/* scsi table (R_Port) required for initializing INI
* Initialize the scsi id Table table to manage the
* mapping between SCSI ID, WWN, and Rport.
*/
ret = unf_init_scsi_id_table(v_lport);
if (ret != RETURN_OK) {
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
return ret;
}
/* Initialize the EXCH resource */
ret = unf_alloc_xchg_resource(v_lport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) can't allocate exchange resource.",
v_lport->port_id);
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
unf_destroy_scsi_id_table(v_lport);
return ret;
}
/* Initialize the ESGL resource pool used by Lport */
ret = unf_init_esgl_pool(v_lport);
if (ret != RETURN_OK) {
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
unf_free_all_xchg_mgr(v_lport);
unf_destroy_scsi_id_table(v_lport);
return ret;
}
/* Initialize the disc manager under Lport */
ret = unf_init_disc_mgr(v_lport);
if (ret != RETURN_OK) {
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
unf_free_esgl_pool(v_lport);
unf_free_all_xchg_mgr(v_lport);
unf_destroy_scsi_id_table(v_lport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) initialize discover manager unsuccessful.",
v_lport->port_id);
return ret;
}
/* Initialize the LPort manager */
ret = unf_init_lport_mgr_temp(v_lport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) initialize RPort manager unsuccessful.",
v_lport->port_id);
goto RELEASE_LPORT;
}
/* Initialize the EXCH manager */
ret = unf_init_xchg_mgr_temp(v_lport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) initialize exchange manager unsuccessful.",
v_lport->port_id);
goto RELEASE_LPORT;
}
/* Initialize the resources required by the event processing center */
ret = unf_init_event_center(v_lport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) initialize event center unsuccessful.",
v_lport->port_id);
goto RELEASE_LPORT;
}
/* Initialize the initialization status of Lport */
unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL);
/* Initialize the Lport route test case */
ret = unf_init_lport_route(v_lport);
if (ret != RETURN_OK) {
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
(void)unf_event_center_destroy(v_lport);
unf_disc_mgr_destroy(v_lport);
unf_free_esgl_pool(v_lport);
unf_free_all_xchg_mgr(v_lport);
unf_destroy_scsi_id_table(v_lport);
return ret;
}
/* Thesupports the initialization stepof the NPIV */
ret = unf_init_vport_pool(v_lport);
if (ret != RETURN_OK) {
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
unf_destroy_lport_route(v_lport);
(void)unf_event_center_destroy(v_lport);
unf_disc_mgr_destroy(v_lport);
unf_free_esgl_pool(v_lport);
unf_free_all_xchg_mgr(v_lport);
unf_destroy_scsi_id_table(v_lport);
return ret;
}
/* qualifier rport callback */
v_lport->pfn_unf_qualify_rport = unf_rport_set_qualifier_key_reuse;
v_lport->pfn_unf_tmf_abnormal_recovery =
unf_tmf_timeout_recovery_special;
return RETURN_OK;
RELEASE_LPORT:
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
unf_disc_mgr_destroy(v_lport);
unf_free_esgl_pool(v_lport);
unf_free_all_xchg_mgr(v_lport);
unf_destroy_scsi_id_table(v_lport);
return ret;
}
static void unf_destroy_card_thread(struct unf_lport_s *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_chip_manage_info_s *chip_info = NULL;
struct list_head *list = NULL;
struct list_head *list_tmp = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned long event_lock_flag = 0;
unsigned long flag = 0;
UNF_CHECK_VALID(0x2249, UNF_TRUE, (v_lport), return);
/* If the thread cannot be found, apply for a new thread. */
chip_info = v_lport->chip_info;
if (!chip_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) has no event thread.", v_lport->port_id);
return;
}
event_mgr = &v_lport->event_mgr;
spin_lock_irqsave(&chip_info->chip_event_list_lock, flag);
if (!list_empty(&chip_info->list_head)) {
list_for_each_safe(list, list_tmp, &chip_info->list_head) {
event_node = list_entry(list,
struct unf_cm_event_report,
list_entry);
/* The LPort under the global event node is null. */
if (v_lport == event_node->lport) {
list_del_init(&event_node->list_entry);
if (event_node->event_asy_flag ==
UNF_EVENT_SYN) {
event_node->result = UNF_RETURN_ERROR;
complete(&event_node->event_comp);
}
spin_lock_irqsave(&event_mgr->port_event_lock,
event_lock_flag);
event_mgr->free_event_count++;
list_add_tail(&event_node->list_entry,
&event_mgr->list_free_event);
spin_unlock_irqrestore(
&event_mgr->port_event_lock,
event_lock_flag);
}
}
}
spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flag);
/* If the number of events introduced by the event thread is 0,
* it indicates that no interface is used. In this case, thread
* resources need to be consumed
*/
if (atomic_dec_and_test(&chip_info->ref_cnt)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) destroy slot(%u) chip(0x%x) event thread succeed.",
v_lport->port_id, chip_info->slot_id,
chip_info->chip_id);
chip_info->b_thread_exit = UNF_TRUE;
wake_up_process(chip_info->data_thread);
kthread_stop(chip_info->data_thread);
chip_info->data_thread = NULL;
spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag);
list_del_init(&chip_info->list_chip_thread_entry);
card_thread_mgr.card_sum--;
spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock,
flag);
vfree(chip_info);
}
v_lport->chip_info = NULL;
}
unsigned int unf_lport_deinit(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x2246, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR);
/* If the card is unloaded normally, the thread is stopped once.
* The problem does not occur if you stop the thread again.
*/
unf_destroy_lport_route(v_lport);
/* minus the reference count of the card event;
* the last port deletes the card thread
*/
unf_destroy_card_thread(v_lport);
flush_workqueue(v_lport->link_event_wq);
destroy_workqueue(v_lport->link_event_wq);
v_lport->link_event_wq = NULL;
/* Release Event Processing Center */
(void)unf_event_center_destroy(v_lport);
/* Release the Vport resource pool */
unf_free_vport_pool(v_lport);
/* Destroying the Xchg Manager */
unf_xchg_mgr_destroy(v_lport);
/* Release Esgl pool */
unf_free_esgl_pool(v_lport);
/* reliability review :Disc should release after Xchg.
* Destroy the disc manager
*/
unf_disc_mgr_destroy(v_lport);
/* Release Xchg Mg template */
unf_release_xchg_mgr_temp(v_lport);
/* Release the Lport Mg template */
unf_release_lport_mgr_temp(v_lport);
/* Destroy the ScsiId Table */
unf_destroy_scsi_id_table(v_lport);
flush_workqueue(v_lport->xchg_wq);
destroy_workqueue(v_lport->xchg_wq);
v_lport->xchg_wq = NULL;
/* Deregister SCSI Host */
unf_unregister_scsi_host(v_lport);
/* Releasing the lw Interface Template */
unf_lport_release_lw_fun_op(v_lport);
v_lport->fc_port = NULL;
return RETURN_OK;
}
static int unf_card_event_process(void *v_arg)
{
struct list_head *node = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned long flags = 0;
struct unf_chip_manage_info_s *chip_info =
(struct unf_chip_manage_info_s *)v_arg;
UNF_REFERNCE_VAR(v_arg);
set_user_nice(current, 4);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Slot(%u) chip(0x%x) enter event thread.",
chip_info->slot_id, chip_info->chip_id);
while (!kthread_should_stop()) {
if (chip_info->b_thread_exit == UNF_TRUE)
break;
spin_lock_irqsave(&chip_info->chip_event_list_lock, flags);
if (list_empty(&chip_info->list_head) == UNF_TRUE) {
spin_unlock_irqrestore(&chip_info->chip_event_list_lock,
flags);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((long)msecs_to_jiffies(1000));
} else {
node = (&chip_info->list_head)->next;
list_del_init(node);
chip_info->list_num--;
event_node = list_entry(node,
struct unf_cm_event_report,
list_entry);
spin_unlock_irqrestore(&chip_info->chip_event_list_lock,
flags);
unf_handle_event(event_node);
}
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR,
"Slot(%u) chip(0x%x) exit event thread.",
chip_info->slot_id, chip_info->chip_id);
return RETURN_OK;
}
static unsigned int unf_creat_chip_thread(struct unf_lport_s *v_lport)
{
unsigned long flag = 0;
struct unf_chip_manage_info_s *chip_info = NULL;
UNF_CHECK_VALID(0x2250, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR);
/* If the thread cannot be found, apply for a new thread. */
chip_info = (struct unf_chip_manage_info_s *)vmalloc(
sizeof(struct unf_chip_manage_info_s));
if (!chip_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) cannot allocate thread memory.",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
memset(chip_info, 0, sizeof(struct unf_chip_manage_info_s));
memcpy(&chip_info->chip_info, &v_lport->low_level_func.chip_info,
sizeof(struct unf_chip_info_s));
chip_info->slot_id =
UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(v_lport->port_id);
chip_info->chip_id = v_lport->low_level_func.chip_id;
chip_info->list_num = 0;
chip_info->sfp_9545_fault = UNF_FALSE;
chip_info->sfp_power_fault = UNF_FALSE;
atomic_set(&chip_info->ref_cnt, 1);
atomic_set(&chip_info->card_loop_test_flag, UNF_FALSE);
spin_lock_init(&chip_info->card_loop_back_state_lock);
INIT_LIST_HEAD(&chip_info->list_head);
spin_lock_init(&chip_info->chip_event_list_lock);
chip_info->b_thread_exit = UNF_FALSE;
chip_info->data_thread =
kthread_create(unf_card_event_process, chip_info,
"%x_et", v_lport->port_id);
if (IS_ERR(chip_info->data_thread) ||
(!chip_info->data_thread)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) creat event thread(0x%p) unsuccessful.",
v_lport->port_id, chip_info->data_thread);
vfree(chip_info);
return UNF_RETURN_ERROR;
}
v_lport->chip_info = chip_info;
wake_up_process(chip_info->data_thread);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Port(0x%x) creat slot(%u) chip(0x%x) event thread succeed.",
v_lport->port_id, chip_info->slot_id, chip_info->chip_id);
spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag);
list_add_tail(&chip_info->list_chip_thread_entry,
&card_thread_mgr.list_card_list_head);
card_thread_mgr.card_sum++;
spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag);
return RETURN_OK;
}
static unsigned int unf_find_chip_thread(struct unf_lport_s *v_lport)
{
unsigned long flag = 0;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_chip_manage_info_s *chip_info = NULL;
unsigned int ret = UNF_RETURN_ERROR;
spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag);
list_for_each_safe(node, next_node,
&card_thread_mgr.list_card_list_head) {
chip_info = list_entry(node, struct unf_chip_manage_info_s,
list_chip_thread_entry);
if ((chip_info->chip_id == v_lport->low_level_func.chip_id) &&
(chip_info->slot_id == UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(v_lport->port_id))) {
atomic_inc(&chip_info->ref_cnt);
v_lport->chip_info = chip_info;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT,
UNF_MAJOR,
"Port(0x%x) find card(%u) chip(0x%x) event thread succeed.",
v_lport->port_id, chip_info->slot_id,
chip_info->chip_id);
spin_unlock_irqrestore(
&card_thread_mgr.global_card_list_lock, flag);
return RETURN_OK;
}
}
spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag);
ret = unf_creat_chip_thread(v_lport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) creat event thread unsuccessful. Destroy LPort.",
v_lport->port_id);
return UNF_RETURN_ERROR;
} else {
return RETURN_OK;
}
}
static int unf_cm_get_mac_adr(void *argc_in, void *argc_out)
{
struct unf_lport_s *lport = NULL;
struct unf_get_chip_info_argout *chp_info = NULL;
UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_in, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_out, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)argc_in;
chp_info = (struct unf_get_chip_info_argout *)argc_out;
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
" LPort is null.");
return UNF_RETURN_ERROR;
}
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
lport->port_id);
return UNF_RETURN_ERROR;
}
if (lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
lport->fc_port,
UNF_PORT_CFG_GET_MAC_ADDR, chp_info) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) get .", lport->port_id);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
static unsigned int unf_build_lport_wwn(struct unf_lport_s *v_lport)
{
struct unf_get_chip_info_argout v_wwn = { 0 };
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2403, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR);
ret = (unsigned int)unf_send_event(v_lport->port_id,
UNF_EVENT_SYN,
(void *)v_lport,
(void *)&v_wwn,
unf_cm_get_mac_adr);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"UNF_BuildSysWwn SendEvent(UNF_PortGetMacAdr) fail.");
return UNF_RETURN_ERROR;
}
/* save card mode: UNF_FC_SERVER_BOARD_32_G(6):32G;
* UNF_FC_SERVER_BOARD_16_G(7):16G MODE
*/
v_lport->card_type = v_wwn.board_type;
/* update port max speed */
if (v_wwn.board_type == UNF_FC_SERVER_BOARD_32_G)
v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G;
else if (v_wwn.board_type == UNF_FC_SERVER_BOARD_16_G)
v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_16_G;
else if (v_wwn.board_type == UNF_FC_SERVER_BOARD_8_G)
v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_8_G;
else
v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G;
return RETURN_OK;
}
void *unf_lport_create_and_init(
void *private_data,
struct unf_low_level_function_op_s *low_level_op)
{
struct unf_lport_s *lport = NULL;
unsigned int ret = UNF_RETURN_ERROR;
if (!private_data) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Private Data is NULL");
return NULL;
}
if (!low_level_op) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LowLevel port(0x%p) function is NULL", private_data);
return NULL;
}
/* 1. vmalloc & Memset L_Port */
lport = vmalloc(sizeof(struct unf_lport_s));
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Alloc LPort memory failed.");
return NULL;
}
memset(lport, 0, sizeof(struct unf_lport_s));
/* 2. L_Port Init */
if (unf_lport_init(lport, private_data, low_level_op) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort initialize unsuccessful.");
vfree(lport);
return NULL;
}
/* 4. Get or Create Chip Thread Chip_ID & Slot_ID */
ret = unf_find_chip_thread(lport);
if (ret != RETURN_OK) {
(void)unf_lport_deinit(lport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%x) Find Chip thread unsuccessful. Destroy LPort.",
lport->port_id);
vfree(lport);
return NULL;
}
/* 5. Registers with in the port management global linked list */
unf_port_register(lport);
/* update WWN */
if (unf_build_lport_wwn(lport) != RETURN_OK) {
unf_port_unregister(lport);
(void)unf_lport_deinit(lport);
vfree(lport);
return NULL;
}
unf_init_link_lose_tmo(lport);
/* initialize Scsi Host */
if (unf_register_scsi_host(lport) != RETURN_OK) {
unf_port_unregister(lport);
(void)unf_lport_deinit(lport);
vfree(lport);
return NULL;
}
/* 7. Here, start work now */
if (global_lport_mgr.b_start_work == UNF_TRUE) {
if (unf_port_start_work(lport) != RETURN_OK) {
unf_port_unregister(lport);
(void)unf_lport_deinit(lport);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT,
UNF_WARN,
"[warn]Port(0x%x) start work failed",
lport->port_id);
vfree(lport);
return NULL;
}
}
UNF_REFERNCE_VAR(lport);
return lport;
}
static int unf_lport_destroy(void *v_lport, void *v_arg_out)
{
struct unf_lport_s *lport = NULL;
unsigned long flags = 0;
if (!v_lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort is NULL.");
return UNF_RETURN_ERROR;
}
UNF_REFERNCE_VAR(v_arg_out);
lport = (struct unf_lport_s *)v_lport;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"Destroy LPort(0x%p), ID(0x%x).",
lport, lport->port_id);
/* NPIV Ensure that all Vport are deleted */
unf_destroy_all_vports(lport);
lport->destroy_step = UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT;
(void)unf_lport_deinit(v_lport);
/* The port is removed from the destroy linked list.
* The next step is to release the memory
*/
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_del(&lport->entry_lport);
/* If the port has dirty memory, the port is mounted to the
* linked list of dirty ports
*/
if (lport->dirty_flag)
list_add_tail(&lport->entry_lport,
&global_lport_mgr.list_dirty_head);
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
if (lport->lport_free_completion) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Complete LPort(0x%p), port ID(0x%x)'s Free Completion.",
lport, lport->port_id);
complete(lport->lport_free_completion);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"LPort(0x%p), port ID(0x%x)'s Free Completion is NULL.",
lport, lport->port_id);
dump_stack();
}
return RETURN_OK;
}
unsigned int unf_lport_refinc(struct unf_lport_s *v_lport)
{
unsigned long lport_flags = 0;
UNF_CHECK_VALID(0x2208, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags);
if (atomic_read(&v_lport->lport_ref_cnt) <= 0) {
spin_unlock_irqrestore(&v_lport->lport_state_lock,
lport_flags);
return UNF_RETURN_ERROR;
}
atomic_inc(&v_lport->lport_ref_cnt);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%p) port_id(0x%x) reference count is %d",
v_lport, v_lport->port_id,
atomic_read(&v_lport->lport_ref_cnt));
spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags);
return RETURN_OK;
}
void unf_lport_ref_dec(struct unf_lport_s *v_lport)
{
unsigned long flags = 0;
unsigned long lport_flags = 0;
UNF_CHECK_VALID(0x2209, UNF_TRUE, v_lport, return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"LPort(0x%p), port ID(0x%x), reference count is %d.",
v_lport, v_lport->port_id,
atomic_read(&v_lport->lport_ref_cnt));
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags);
if (atomic_dec_and_test(&v_lport->lport_ref_cnt)) {
spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags);
list_del(&v_lport->entry_lport);
global_lport_mgr.lport_sum--;
/* attaches the lport to the destroy linked list for dfx */
list_add_tail(&v_lport->entry_lport,
&global_lport_mgr.list_destroy_head);
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
(void)unf_lport_destroy(v_lport, NULL);
} else {
spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags);
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
}
}
static int unf_reset_port(void *v_arg_in, void *v_arg_out)
{
struct unf_reset_port_argin *arg_in =
(struct unf_reset_port_argin *)v_arg_in;
struct unf_lport_s *lport = NULL;
unsigned int ret = UNF_RETURN_ERROR;
enum unf_port_config_state_e port_state = UNF_PORT_CONFIG_STATE_RESET;
UNF_REFERNCE_VAR(v_arg_out);
UNF_CHECK_VALID(0x2262, UNF_TRUE, arg_in, return UNF_RETURN_ERROR);
lport = unf_find_lport_by_port_id(arg_in->port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Not find LPort(0x%x).", arg_in->port_id);
return UNF_RETURN_ERROR;
}
/* reset port */
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
lport->port_id);
return UNF_RETURN_ERROR;
}
lport->en_act_topo = UNF_ACT_TOP_UNKNOWN;
lport->speed = UNF_PORT_SPEED_UNKNOWN;
lport->fabric_node_name = 0;
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SET_PORT_STATE, (void *)&port_state);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Reset port(0x%x) unsuccessful.", lport->port_id);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
static int unf_sfp_switch(unsigned int v_port_id, int v_turn_on)
{
struct unf_lport_s *lport = NULL;
int turn_on = v_turn_on;
int ret = UNF_RETURN_ERROR;
unsigned long flag = 0;
if (global_lport_mgr.b_start_work == UNF_FALSE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x) not start work, ignored command:turn %s.",
v_port_id, (v_turn_on == UNF_TRUE) ? "ON" : "OFF");
return RETURN_OK;
}
lport = unf_find_lport_by_port_id(v_port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Not find LPort(0x%x).", v_port_id);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&lport->lport_state_lock, flag);
if (lport->en_start_work_state != UNF_START_WORK_COMPLETE) {
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]LPort(0x%x) not start work, ignored command:turn %s.",
v_port_id, (v_turn_on == UNF_TRUE) ? "ON" : "OFF");
return RETURN_OK;
}
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x)'s corresponding function is NULL.",
v_port_id);
return UNF_RETURN_ERROR;
}
ret = (int)lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SET_SFP_SWITCH,
(void *)&turn_on);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x) switch SFP+ %s unsuccessful.",
v_port_id, v_turn_on ? "On" : "Off");
return UNF_RETURN_ERROR;
}
lport->b_switch_state = (enum int_e)turn_on;
return RETURN_OK;
}
static int unf_sfp_switch_event(void *v_argc_in, void *v_argc_out)
{
struct unf_set_sfp_argin *in = (struct unf_set_sfp_argin *)v_argc_in;
UNF_REFERNCE_VAR(v_argc_out);
UNF_CHECK_VALID(0x2267, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR);
return unf_sfp_switch(in->port_id, in->turn_on);
}
int unf_cm_sfp_switch(unsigned int v_port_id, int v_bturn_on)
{
struct unf_set_sfp_argin in = { 0 };
in.port_id = v_port_id;
in.turn_on = v_bturn_on;
return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in,
(void *)NULL, unf_sfp_switch_event);
}
static int unf_get_port_speed(void *v_argc_in, void *v_argc_out)
{
unsigned int *speed = (unsigned int *)v_argc_out;
struct unf_low_level_port_mgr_op_s *port_mgr = NULL;
struct unf_lport_s *lport = NULL;
int ret = 0;
unsigned int port_id = *(unsigned int *)v_argc_in;
UNF_CHECK_VALID(0x2268, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2269, UNF_TRUE, v_argc_out, return UNF_RETURN_ERROR);
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Cannot Find LPort by (0x%x).", port_id);
return UNF_RETURN_ERROR;
}
port_mgr = &lport->low_level_func.port_mgr_op;
if (!port_mgr->pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
port_id);
return UNF_RETURN_ERROR;
}
if (lport->link_up == UNF_PORT_LINK_UP)
ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port,
UNF_PORT_CFG_GET_SPEED_ACT, (void *)speed);
else
*speed = UNF_PORT_SPEED_UNKNOWN;
return ret;
}
static int unf_cm_get_port_speed(unsigned int v_port_id, unsigned int *v_speed)
{
UNF_CHECK_VALID(0x2270, UNF_TRUE, v_speed, return UNF_RETURN_ERROR);
return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id,
(void *)v_speed, unf_get_port_speed);
}
static int unf_set_port_speed(void *v_argc_in, void *v_argc_out)
{
unsigned int ret = RETURN_OK;
struct unf_set_speed_argin *in =
(struct unf_set_speed_argin *)v_argc_in;
struct unf_lport_s *lport = NULL;
UNF_REFERNCE_VAR(v_argc_out);
UNF_CHECK_VALID(0x2271, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR);
lport = unf_find_lport_by_port_id(in->port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Cannot Find LPort by (0x%x).", in->port_id);
return UNF_RETURN_ERROR;
}
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
in->port_id);
return UNF_RETURN_ERROR;
}
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SET_SPEED, (void *)in->speed);
return (int)ret;
}
int unf_cm_set_port_speed(unsigned int v_port_id, unsigned int *v_speed)
{
struct unf_set_speed_argin in = { 0 };
in.port_id = v_port_id;
in.speed = v_speed;
return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in,
(void *)NULL, unf_set_port_speed);
}
static int unf_get_port_topo(void *argc_in, void *argc_out)
{
struct unf_lport_s *lport = NULL;
struct unf_get_topo_argout *out = NULL;
struct unf_low_level_port_mgr_op_s *port_mgr = NULL;
int ret = UNF_TRUE;
unsigned int port_id = 0;
UNF_CHECK_VALID(0x2283, UNF_TRUE, argc_in, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2284, UNF_TRUE, argc_out, return UNF_RETURN_ERROR);
port_id = *(unsigned int *)argc_in;
out = (struct unf_get_topo_argout *)argc_out;
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Not find LPort(0x%x).", port_id);
return UNF_RETURN_ERROR;
}
port_mgr = &lport->low_level_func.port_mgr_op;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
port_mgr->pfn_ll_port_config_get,
return UNF_RETURN_ERROR);
if (lport->link_up == UNF_PORT_LINK_UP) {
ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port,
UNF_PORT_CFG_GET_TOPO_ACT, (void *)out->en_act_topo);
if (ret != RETURN_OK)
return ret;
} else {
*out->en_act_topo = UNF_ACT_TOP_UNKNOWN;
}
ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port,
UNF_PORT_CFG_GET_TOPO_CFG, (void *)out->topo_cfg);
return ret;
}
int unf_cm_get_port_topo(unsigned int v_port_id, unsigned int *v_topo_cfg,
enum unf_act_topo_e *v_en_act_topo)
{
struct unf_get_topo_argout out = { 0 };
UNF_CHECK_VALID(0x2286, UNF_TRUE, v_topo_cfg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2287, UNF_TRUE, v_en_act_topo,
return UNF_RETURN_ERROR);
out.en_act_topo = v_en_act_topo;
out.topo_cfg = v_topo_cfg;
return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id,
(void *)&out, unf_get_port_topo);
}
static int unf_set_port_topo(void *argc_in, void *argc_out)
{
struct unf_lport_s *lport = NULL;
struct unf_set_topo_argin *in = NULL;
enum int_e *b_arg_out = (enum int_e *)argc_out;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2257, UNF_TRUE, argc_out, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2288, UNF_TRUE, argc_in, return UNF_RETURN_ERROR);
in = (struct unf_set_topo_argin *)argc_in;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
(in->topo == UNF_TOP_LOOP_MASK) ||
(in->topo == UNF_TOP_P2P_MASK) ||
(in->topo == UNF_TOP_AUTO_MASK),
return UNF_RETURN_ERROR);
lport = unf_find_lport_by_port_id(in->port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Not find LPort(0x%x).", in->port_id);
return UNF_RETURN_ERROR;
}
UNF_CHECK_VALID(
INVALID_VALUE32, UNF_TRUE,
lport->low_level_func.port_mgr_op.pfn_ll_port_config_set,
return UNF_RETURN_ERROR);
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SET_TOPO, (void *)&in->topo);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Can't set port topology.");
return UNF_RETURN_ERROR;
}
lport->low_level_func.lport_cfg_items.port_topology = in->topo;
*b_arg_out = lport->b_switch_state;
return RETURN_OK;
}
int unf_cm_set_port_topo(unsigned int v_port_id, unsigned int v_topo)
{
struct unf_set_topo_argin in = { 0 };
int ret = UNF_RETURN_ERROR;
enum int_e b_switch_state = UNF_FALSE;
in.port_id = v_port_id;
in.topo = v_topo;
ret = unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in,
(void *)&b_switch_state, unf_set_port_topo);
return ret;
}
int unf_set_port_bbscn(void *argc_in, void *argc_out)
{
struct unf_lport_s *lport = NULL;
struct unf_set_bbscn_argin *in = NULL;
unsigned int ret = UNF_RETURN_ERROR;
UNF_REFERNCE_VAR(argc_out);
UNF_CHECK_VALID(0x2300, UNF_TRUE, argc_in, return UNF_RETURN_ERROR);
in = (struct unf_set_bbscn_argin *)argc_in;
lport = unf_find_lport_by_port_id(in->port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Not find LPort(0x%x).", in->port_id);
return UNF_RETURN_ERROR;
}
UNF_CHECK_VALID(
INVALID_VALUE32, UNF_TRUE,
lport->low_level_func.port_mgr_op.pfn_ll_port_config_set,
return UNF_RETURN_ERROR);
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SET_BBSCN, (void *)&in->bb_scn);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Cannot set port BB_SC_N.");
return UNF_RETURN_ERROR;
}
/* update bbsn cfg to Lport */
lport->low_level_func.lport_cfg_items.bb_scn = in->bb_scn;
return RETURN_OK;
}
int unf_cm_set_port_bbscn(unsigned int v_port_id, unsigned int v_bbscn)
{
struct unf_set_bbscn_argin in = { 0 };
in.port_id = v_port_id;
in.bb_scn = v_bbscn;
return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in,
(void *)NULL, unf_set_port_bbscn);
}
unsigned int unf_get_error_code_sum(struct unf_lport_s *v_lport,
struct unf_err_code_s *v_fc_err_code)
{
struct unf_low_level_port_mgr_op_s *port_mgr = NULL;
struct unf_lport_s *lport = v_lport;
unsigned int ret = UNF_RETURN_ERROR;
struct unf_err_code_s fc_err_code;
UNF_CHECK_VALID(0x2328, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2329, UNF_TRUE, v_fc_err_code,
return UNF_RETURN_ERROR);
memset(&fc_err_code, 0, sizeof(struct unf_err_code_s));
port_mgr = &lport->low_level_func.port_mgr_op;
if (!port_mgr->pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
lport->port_id);
return UNF_RETURN_ERROR;
}
ret = port_mgr->pfn_ll_port_config_get((void *)lport->fc_port,
UNF_PORT_CFG_GET_LESB_THEN_CLR, (void *)&fc_err_code);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
if (lport->link_up != UNF_PORT_LINK_UP) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO,
"LPort(0x%x) is not link up.", lport->port_id);
memcpy(v_fc_err_code, &lport->err_code_sum,
sizeof(struct unf_err_code_s));
return RETURN_OK;
}
lport->err_code_sum.bad_rx_char_count += fc_err_code.bad_rx_char_count;
lport->err_code_sum.link_fail_count += fc_err_code.link_fail_count;
lport->err_code_sum.loss_of_signal_count +=
fc_err_code.loss_of_signal_count;
lport->err_code_sum.loss_of_sync_count +=
fc_err_code.loss_of_sync_count;
lport->err_code_sum.proto_error_count += fc_err_code.proto_error_count;
lport->err_code_sum.rx_eo_fa_count = fc_err_code.rx_eo_fa_count;
lport->err_code_sum.dis_frame_count = fc_err_code.dis_frame_count;
lport->err_code_sum.bad_crc_count = fc_err_code.bad_crc_count;
memcpy(v_fc_err_code, &lport->err_code_sum,
sizeof(struct unf_err_code_s));
return RETURN_OK;
}
static int unf_clear_port_error_code_sum(void *argc_in, void *argc_out)
{
struct unf_lport_s *lport = NULL;
unsigned int port_id = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2331, UNF_TRUE, argc_in, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(argc_out);
port_id = *(unsigned int *)argc_in;
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Cannot find LPort(0x%x).", port_id);
return UNF_RETURN_ERROR;
}
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
port_id);
return UNF_RETURN_ERROR;
}
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
(void *)lport->fc_port,
UNF_PORT_CFG_CLR_LESB, NULL);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
memset(&lport->err_code_sum, 0, sizeof(struct unf_err_code_s));
return RETURN_OK;
}
int unf_cm_clear_port_error_code_sum(unsigned int v_port_id)
{
return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id,
(void *)NULL, unf_clear_port_error_code_sum);
}
static int unf_update_lport_sfp_info(struct unf_lport_s *v_lport,
enum unf_port_config_get_op_e v_type)
{
struct unf_lport_s *lport = NULL;
int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2332, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
lport = v_lport;
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
lport->port_id);
return UNF_RETURN_ERROR;
}
ret = (int)(lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
(void *)lport->fc_port,
v_type, (void *)&lport->sfp_info));
return ret;
}
static int unf_translate_sfp_status(struct unf_lport_s *v_lport,
struct unf_get_sfp_argout *v_out)
{
struct unf_lport_s *lport = v_lport;
int ret = UNF_RETURN_ERROR;
switch (lport->sfp_info.status) {
case UNF_SFP_PRESENT_FAIL:
*v_out->status = DRV_CABLE_CONNECTOR_NONE;
ret = RETURN_OK;
break;
case UNF_SFP_POWER_FAIL:
*v_out->status = DRV_CABLE_CONNECTOR_INVALID;
ret = RETURN_OK;
break;
case UNF_9545_FAIL:
*v_out->status = DRV_CABLE_CONNECTOR_INVALID;
ret = RETURN_OK;
break;
default:
*v_out->status = DRV_CABLE_CONNECTOR_BUTT;
ret = UNF_RETURN_ERROR;
break;
}
return ret;
}
static void unf_record_chip_fault(struct unf_lport_s *v_lport)
{
#define UNF_CHIP_FAULT_MAX_CHECK_TIME 3
if (v_lport->sfp_info.status == UNF_9545_FAIL) {
/* If there are 9545 fault,explain that the sfp is power on,
* and reset sfp_power_fault_count
*/
v_lport->sfp_power_fault_count = 0;
if (v_lport->sfp_9545_fault_count <
UNF_CHIP_FAULT_MAX_CHECK_TIME) {
v_lport->sfp_9545_fault_count++;
} else {
v_lport->chip_info->sfp_9545_fault = UNF_TRUE;
v_lport->sfp_9545_fault_count = 0;
}
} else if (v_lport->sfp_info.status == UNF_SFP_POWER_FAIL) {
if (v_lport->sfp_power_fault_count <
UNF_CHIP_FAULT_MAX_CHECK_TIME) {
v_lport->sfp_power_fault_count++;
} else {
v_lport->chip_info->sfp_power_fault = UNF_TRUE;
v_lport->sfp_power_fault_count = 0;
}
}
}
int unf_check_sfp_tx_fault(struct unf_lport_s *v_lport,
struct unf_sfp_info_s *v_sfp_info)
{
/* 24 hours ms(24*60*60*1000) */
#define UNF_SFP_TXFALT_RECOVER_INTERVEL 86400000
struct unf_sfp_info_s *sfp_info = NULL;
struct unf_lport_s *lport = NULL;
sfp_info = v_sfp_info;
lport = v_lport;
if (sfp_info->sfp_info_a2.diag.status_ctrl.tx_fault_state == 0)
return RETURN_OK;
/* Repair conditions:
* 1 port linkdown;
* 2 from the last repair more than 24 hours;
* 3 sfp is on
*/
if ((lport->link_up == UNF_PORT_LINK_DOWN) &&
(lport->b_switch_state) &&
((lport->last_tx_fault_jif == 0) ||
(jiffies_to_msecs(jiffies - lport->last_tx_fault_jif) >
UNF_SFP_TXFALT_RECOVER_INTERVEL))) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"LPort(0x%x) stat(0x%x) jiff(%ld) lastjiff(%llu) Ctrl(0x%x) TxFault set 1.",
lport->port_id, lport->link_up, jiffies,
lport->last_tx_fault_jif,
*((unsigned char *)
&sfp_info->sfp_info_a2.diag.status_ctrl));
lport->last_tx_fault_jif = jiffies;
(void)unf_sfp_switch(lport->port_id, UNF_FALSE);
msleep(100);
/* Around quickly switch port FW state error problem */
(void)unf_sfp_switch(lport->port_id, UNF_TRUE);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
static int unf_get_sfp_info(void *argc_in, void *argc_out)
{
struct unf_lport_s *lport = NULL;
struct unf_get_sfp_argout *out = NULL;
unsigned int port_id = 0;
int ret = RETURN_OK;
UNF_CHECK_VALID(0x2333, UNF_TRUE, argc_in, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2334, UNF_TRUE, argc_out, return UNF_RETURN_ERROR);
port_id = *(unsigned int *)argc_in;
out = (struct unf_get_sfp_argout *)argc_out;
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Cannot find LPort(0x%x).", port_id);
return UNF_RETURN_ERROR;
}
lport->sfp_info.status = 0;
ret = unf_update_lport_sfp_info(lport, UNF_PORT_CFG_GET_SFP_INFO);
if (ret == RETURN_OK) {
lport->sfp_power_fault_count = 0;
lport->sfp_9545_fault_count = 0;
*out->status = DRV_CABLE_CONNECTOR_OPTICAL;
if (unf_check_sfp_tx_fault(
lport,
&lport->sfp_info.sfp_eeprom_info.sfp_info) ==
UNF_RETURN_ERROR) {
return UNF_RETURN_ERROR;
}
memcpy(out->sfp_info, &lport->sfp_info.sfp_eeprom_info,
sizeof(union unf_sfp_eeprome_info));
ret = RETURN_OK;
} else {
ret = unf_translate_sfp_status(lport, out);
unf_record_chip_fault(lport);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x)'s getsfpinfo fail, sfp status(0x%x).",
lport->port_id, lport->sfp_info.status);
}
return ret;
}
int unf_cm_get_sfp_info(unsigned int v_port_id, unsigned int *v_status,
union unf_sfp_eeprome_info *v_sfp_info,
unsigned int *sfp_type)
{
struct unf_lport_s *lport = NULL;
struct unf_get_sfp_argout out = { 0 };
lport = unf_find_lport_by_port_id(v_port_id);
if (!lport)
return UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2335, UNF_TRUE, v_status, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2336, UNF_TRUE, v_sfp_info, return UNF_RETURN_ERROR);
out.status = v_status;
out.sfp_info = v_sfp_info;
if (global_lport_mgr.b_start_work == UNF_FALSE) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) have not start work, return.", v_port_id);
return UNF_RETURN_ERROR;
}
*sfp_type = lport->low_level_func.sfp_type;
return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id,
(void *)&out, unf_get_sfp_info);
}
int unf_cm_reset_port(unsigned int v_port_id)
{
int ret = UNF_RETURN_ERROR;
ret = unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id,
(void *)NULL, unf_reset_port);
return ret;
}
int unf_lport_reset_port(struct unf_lport_s *v_lport, unsigned int v_flag)
{
UNF_CHECK_VALID(0x2352, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
return unf_send_event(v_lport->port_id, v_flag,
(void *)&v_lport->port_id,
(void *)NULL,
unf_reset_port);
}
static inline unsigned int unf_get_loop_alpa(struct unf_lport_s *v_lport,
void *v_loop_alpa)
{
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2357, UNF_TRUE,
v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get,
return UNF_RETURN_ERROR);
ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_LOOP_ALPA, v_loop_alpa);
return ret;
}
static unsigned int unf_lport_enter_private_loop_login(
struct unf_lport_s *v_lport)
{
struct unf_lport_s *lport = v_lport;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2358, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&lport->lport_state_lock, flag);
unf_lport_stat_ma(lport, UNF_EVENT_LPORT_READY);
/* LPort: LINK_UP --> READY */
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
unf_lport_update_topo(lport, UNF_ACT_TOP_PRIVATE_LOOP);
/* NOP: check L_Port state */
if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) is NOP, do nothing",
lport->port_id);
return RETURN_OK;
}
/* INI: check L_Port mode */
if ((lport->options & UNF_PORT_MODE_INI) != UNF_PORT_MODE_INI) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) has no INI feature(0x%x), do nothing",
lport->port_id, lport->options);
return RETURN_OK;
}
if (lport->disc.unf_disc_temp.pfn_unf_disc_start) {
ret = lport->disc.unf_disc_temp.pfn_unf_disc_start(lport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[warn]Port(0x%x) with nportid(0x%x) start discovery failed",
lport->port_id, lport->nport_id);
}
}
return ret;
}
unsigned int unf_lport_login(struct unf_lport_s *v_lport,
enum unf_act_topo_e v_en_act_topo)
{
unsigned int loop_alpa = 0;
unsigned int ret = RETURN_OK;
unsigned long flag = 0;
UNF_CHECK_VALID(0x2359, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
/* 1. Update (set) L_Port topo which get from low level */
unf_lport_update_topo(v_lport, v_en_act_topo);
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
/* 2. Link state check */
if (v_lport->link_up != UNF_PORT_LINK_UP) {
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) with link_state(0x%x) port_state(0x%x) when login",
v_lport->port_id, v_lport->link_up,
v_lport->en_states);
return UNF_RETURN_ERROR;
}
/* 3. Update L_Port state */
unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_LINK_UP);
/* LPort: INITIAL --> LINK UP */
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]LOGIN: Port(0x%x) start to login with topology(0x%x)",
v_lport->port_id, v_lport->en_act_topo);
/* 4. Start logoin */
if ((v_en_act_topo == UNF_TOP_P2P_MASK) ||
(v_en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ||
(v_en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) {
/* P2P or Fabric mode */
ret = unf_lport_enter_flogi(v_lport);
} else if (v_en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP) {
/* Public loop */
(void)unf_get_loop_alpa(v_lport, &loop_alpa);
/* Before FLOGI ALPA just low 8 bit after FLOGI ACC switch
* will assign complete addresses
*/
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
v_lport->nport_id = loop_alpa;
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
ret = unf_lport_enter_flogi(v_lport);
} else if (v_en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) {
/* Private loop */
(void)unf_get_loop_alpa(v_lport, &loop_alpa);
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
v_lport->nport_id = loop_alpa;
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
ret = unf_lport_enter_private_loop_login(v_lport);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]LOGIN: Port(0x%x) login with unknown topology(0x%x)",
v_lport->port_id, v_lport->en_act_topo);
}
return ret;
}
static unsigned int unf_port_link_up(struct unf_lport_s *v_lport,
void *v_in_put)
{
struct unf_lport_s *lport = v_lport;
unsigned int ret = RETURN_OK;
enum unf_act_topo_e en_act_topo = UNF_ACT_TOP_UNKNOWN;
unsigned long flag = 0;
UNF_CHECK_VALID(0x2361, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
/* If NOP state, stop */
if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[warn]Port(0x%x) is NOP and do nothing",
lport->port_id);
return RETURN_OK;
}
/* Update port state */
spin_lock_irqsave(&lport->lport_state_lock, flag);
lport->link_up = UNF_PORT_LINK_UP;
lport->speed = *((unsigned int *)v_in_put);
unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL);
/* INITIAL state */
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
/* set hot pool wait state: so far, do not care */
unf_set_hot_pool_wait_state(lport, UNF_TRUE);
lport->enhanced_features |= UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE;
/* Get port active topopolgy (from low level) */
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[warn]Port(0x%x) get topo function is NULL",
lport->port_id);
return UNF_RETURN_ERROR;
}
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
lport->fc_port,
UNF_PORT_CFG_GET_TOPO_ACT, (void *)&en_act_topo);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[warn]Port(0x%x) get topo from low level failed",
lport->port_id);
return UNF_RETURN_ERROR;
}
/* Start Login process */
ret = unf_lport_login(lport, en_act_topo);
unf_report_io_dm_event(lport, UNF_PORT_LINK_UP, 0);
return ret;
}
static unsigned int unf_port_link_down(struct unf_lport_s *v_lport,
void *v_in_put)
{
unsigned long flag = 0;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x2363, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
lport = v_lport;
unf_report_io_dm_event(lport, UNF_PORT_LINK_DOWN, 0);
/* To prevent repeated reporting linkdown */
spin_lock_irqsave(&lport->lport_state_lock, flag);
lport->speed = UNF_PORT_SPEED_UNKNOWN;
lport->en_act_topo = UNF_ACT_TOP_UNKNOWN;
if (lport->link_up == UNF_PORT_LINK_DOWN) {
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
return RETURN_OK;
}
unf_lport_stat_ma(lport, UNF_EVENT_LPORT_LINK_DOWN);
unf_reset_lport_params(lport);
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
unf_set_hot_pool_wait_state(lport, UNF_FALSE);
/*
* clear I/O:
* 1. INI do ABORT only,
* for INI: busy/delay/delay_transfer/wait
* Clean L_Port/V_Port Link Down I/O: only set ABORT tag
*/
unf_flush_disc_event(&lport->disc, NULL);
unf_clean_link_down_io(lport, UNF_FALSE);
/* for L_Port's R_Ports */
unf_clean_linkdown_rport(lport);
/* for L_Port's all Vports */
unf_linkdown_all_vports(v_lport);
return RETURN_OK;
}
static unsigned int unf_port_abnormal_reset(struct unf_lport_s *v_lport,
void *v_in_put)
{
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x2363, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
lport = v_lport;
ret = (unsigned int)unf_lport_reset_port(lport, UNF_EVENT_ASYN);
return ret;
}
static unsigned int unf_port_reset_start(struct unf_lport_s *v_lport,
void *v_in_put)
{
unsigned int ret = RETURN_OK;
unsigned long flag = 0;
UNF_CHECK_VALID(0x2364, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
unf_set_lport_state(v_lport, UNF_LPORT_ST_RESET);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"Port(0x%x) begin to reset.", v_lport->port_id);
return ret;
}
static unsigned int unf_port_reset_end(struct unf_lport_s *v_lport,
void *v_in_put)
{
unsigned long flag = 0;
UNF_CHECK_VALID(0x2365, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"Port(0x%x) reset end.", v_lport->port_id);
/* Task management command returns success and avoid
* repair measures case offline device
*/
unf_wakeup_scsi_task_cmnd(v_lport);
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
return RETURN_OK;
}
static unsigned int unf_port_nop(struct unf_lport_s *v_lport, void *v_in_put)
{
struct unf_lport_s *lport = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x2366, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
lport = v_lport;
atomic_set(&lport->port_no_operater_flag, UNF_LPORT_NOP);
spin_lock_irqsave(&lport->lport_state_lock, flag);
unf_lport_stat_ma(lport, UNF_EVENT_LPORT_LINK_DOWN);
unf_reset_lport_params(lport);
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
/* Set Tag prevent pending I/O to wait_list when close sfp failed */
unf_set_hot_pool_wait_state(lport, UNF_FALSE);
unf_flush_disc_event(&lport->disc, NULL);
/* L_Port/V_Port's I/O(s): Clean Link Down I/O: Set Abort Tag */
unf_clean_link_down_io(lport, UNF_FALSE);
/* L_Port/V_Port's R_Port(s): report link down event to
* scsi & clear resource
*/
unf_clean_linkdown_rport(lport);
unf_linkdown_all_vports(lport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) report NOP event done",
lport->nport_id);
return RETURN_OK;
}
static unsigned int unf_port_clean_done(struct unf_lport_s *v_lport,
void *v_in_put)
{
UNF_CHECK_VALID(0x2691, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
/* when port reset,delte delete all Rport immediately,
* in order to remove immediately for resources
*/
unf_clean_linkdown_rport(v_lport);
return RETURN_OK;
}
static unsigned int unf_port_begin_remove(struct unf_lport_s *v_lport,
void *v_in_put)
{
UNF_CHECK_VALID(0x2691, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_in_put);
/* Cancel route timer delay work */
unf_destroy_lport_route(v_lport);
return RETURN_OK;
}
static unsigned int unf_get_pcie_link_state(struct unf_lport_s *v_lport)
{
struct unf_lport_s *lport = v_lport;
int link_state = UNF_TRUE;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2257, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(
INVALID_VALUE32, UNF_TRUE,
lport->low_level_func.port_mgr_op.pfn_ll_port_config_get,
return UNF_RETURN_ERROR);
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
lport->fc_port,
UNF_PORT_CFG_GET_PCIE_LINK_STATE, (void *)&link_state);
if (ret != RETURN_OK || link_state != UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT,
"[err]Can't Get Pcie Link State");
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
void unf_root_lport_ref_dec(struct unf_lport_s *v_lport)
{
unsigned long flags = 0;
unsigned long lport_flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2385, UNF_TRUE, v_lport, return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%p) port_id(0x%x) reference count is %d",
v_lport, v_lport->port_id,
atomic_read(&v_lport->lport_ref_cnt));
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags);
if (atomic_dec_and_test(&v_lport->lport_ref_cnt)) {
spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags);
list_del(&v_lport->entry_lport);
global_lport_mgr.lport_sum--;
/* Put L_Port to destroy list for debuging */
list_add_tail(&v_lport->entry_lport,
&global_lport_mgr.list_destroy_head);
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
ret = unf_schedule_global_event((void *)v_lport,
UNF_GLOBAL_EVENT_ASYN,
unf_lport_destroy);
if (ret != RETURN_OK)
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT,
UNF_CRITICAL,
"[warn]Schedule global event faile. remain nodes(0x%x)",
global_event_queue.list_number);
} else {
spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags);
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
}
}
void unf_lport_ref_dec_to_destroy(struct unf_lport_s *v_lport)
{
if (v_lport->root_lport != v_lport)
unf_vport_ref_dec(v_lport);
else
unf_root_lport_ref_dec(v_lport);
}
void unf_lport_route_work(struct work_struct *v_work)
{
#define MAX_INTERVAL_TIMES 60
struct unf_lport_s *lport = NULL;
int ret = 0;
struct unf_err_code_s fc_err_code;
UNF_CHECK_VALID(0x2388, UNF_TRUE, v_work, return);
lport = container_of(v_work, struct unf_lport_s, route_timer_work.work);
if (unlikely(!lport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT,
UNF_KEVENT, "[err]LPort is NULL");
return;
}
if (unlikely(lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT,
"[warn]LPort(0x%x) route work is closing.",
lport->port_id);
unf_lport_ref_dec_to_destroy(lport);
return;
}
if (unlikely(unf_get_pcie_link_state(lport)))
lport->pcie_link_down_cnt++;
else
lport->pcie_link_down_cnt = 0;
if (lport->pcie_link_down_cnt >= 3) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT,
"[warn]LPort(0x%x) detected pcie linkdown, closing route work",
lport->port_id);
lport->b_pcie_linkdown = UNF_TRUE;
unf_free_lport_all_xchg(lport);
unf_lport_ref_dec_to_destroy(lport);
return;
}
if (unlikely(UNF_LPORT_CHIP_ERROR(lport))) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT,
"[warn]LPort(0x%x) reported chip error, closing route work. ",
lport->port_id);
unf_lport_ref_dec_to_destroy(lport);
return;
}
if (lport->enhanced_features &
UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT,
"[warn]User close LPort(0x%x) route work. ",
lport->port_id);
unf_lport_ref_dec_to_destroy(lport);
return;
}
if (atomic_read(&lport->err_code_obtain_freq) == 0) {
memset(&fc_err_code, 0, sizeof(struct unf_err_code_s));
unf_get_error_code_sum(lport, &fc_err_code);
atomic_inc(&lport->err_code_obtain_freq);
} else if (atomic_read(&lport->err_code_obtain_freq) ==
MAX_INTERVAL_TIMES) {
atomic_set(&lport->err_code_obtain_freq, 0);
} else {
atomic_inc(&lport->err_code_obtain_freq);
}
/* Scheduling 1 second */
ret = queue_delayed_work(
unf_work_queue, &lport->route_timer_work,
(unsigned long)msecs_to_jiffies(UNF_LPORT_POLL_TIMER));
if (ret == 0) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT,
"[warn]LPort(0x%x) schedule work unsuccessful.",
lport->port_id);
unf_lport_ref_dec_to_destroy(lport);
}
}
int unf_cm_get_port_info(void *argc_in, void *argc_out)
{
struct unf_lport_s *lport = NULL;
struct unf_get_port_info_argout *port_info = NULL;
UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_in, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_out, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)argc_in;
port_info = (struct unf_get_port_info_argout *)argc_out;
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
lport->port_id);
return UNF_RETURN_ERROR;
}
if (lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
lport->fc_port,
UNF_PORT_CFG_GET_PORT_INFO, port_info) !=
RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) get current info failed.",
lport->port_id);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
static unsigned int unf_get_lport_current_info(struct unf_lport_s *v_lport)
{
struct unf_get_port_info_argout port_info = { 0 };
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x2403, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
lport = unf_find_lport_by_port_id(v_lport->port_id);
if (!lport)
return UNF_RETURN_ERROR;
ret = (unsigned int)unf_send_event(lport->port_id, UNF_EVENT_SYN,
(void *)lport,
(void *)&port_info,
unf_cm_get_port_info);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"UNF_GetPortCurrentInfo SendEvent(unf_cm_get_port_info) fail.");
return UNF_RETURN_ERROR;
}
lport->low_level_func.sfp_speed = port_info.sfp_speed;
return RETURN_OK;
}
int unf_set_link_lose_tmo_to_up(struct unf_lport_s *v_lport,
struct unf_flash_link_tmo_s *v_link_tmo)
{
int ret = UNF_RETURN_ERROR;
struct unf_flash_data_s flash_data;
if ((!v_lport) || (!v_link_tmo) ||
(sizeof(struct unf_flash_data_s) > HIFC_FLASH_DATA_MAX_LEN)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]set link tmo param check fail");
return ret;
}
memset(&flash_data, 0, sizeof(struct unf_flash_data_s));
if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]link tmo fun null");
return ret;
}
if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_FLASH_DATA_INFO, &flash_data) !=
RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]get link tmo to up fail");
return ret;
}
memcpy(&flash_data.link_tmo, v_link_tmo, HIFC_FLASH_LINK_TMO_MAX_LEN);
if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]set link tmo fun null");
return ret;
}
if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
v_lport->fc_port, UNF_PORT_CFG_SET_FLASH_DATA_INFO,
&flash_data) !=
RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]set link tmo to up fail");
return ret;
}
ret = RETURN_OK;
return ret;
}
int unf_set_link_lose_tmo(struct unf_lport_s *v_lport, int time_out)
{
struct unf_flash_link_tmo_s flash_link_tmo;
int ret = UNF_RETURN_ERROR;
unsigned int link_tmo = (unsigned int)time_out;
memset(&flash_link_tmo, 0, sizeof(struct unf_flash_link_tmo_s));
if (!v_lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL,
UNF_KEVENT, "[warn]set link tmo lport null");
return ret;
}
/* 1. update gloabl var */
if ((int)atomic_read(&v_lport->link_lose_tmo) == time_out)
return RETURN_OK;
atomic_set(&v_lport->link_lose_tmo, time_out);
flash_link_tmo.writeflag = HIFC_MGMT_TMO_MAGIC_NUM;
flash_link_tmo.link_tmo0 = (unsigned char)link_tmo;
flash_link_tmo.link_tmo1 = (unsigned char)(link_tmo >> 8);
flash_link_tmo.link_tmo2 = (unsigned char)(link_tmo >> 16);
flash_link_tmo.link_tmo3 = (unsigned char)(link_tmo >> 24);
/* 2. write to up */
ret = unf_set_link_lose_tmo_to_up(v_lport, &flash_link_tmo);
return ret;
}
int unf_set_link_lose_tmo_to_all(int time_out)
{
int ret = RETURN_OK;
struct list_head list_lport_tmp_head;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_lport_s *lport = NULL;
unsigned long flags = 0;
INIT_LIST_HEAD(&list_lport_tmp_head);
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_safe(node, next_node,
&global_lport_mgr.list_lport_list_head) {
lport = list_entry(node, struct unf_lport_s, entry_lport);
list_del_init(&lport->entry_lport);
list_add_tail(&lport->entry_lport, &list_lport_tmp_head);
(void)unf_lport_refinc(lport);
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
while (!list_empty(&list_lport_tmp_head)) {
node = (&list_lport_tmp_head)->next;
lport = list_entry(node, struct unf_lport_s, entry_lport);
if (lport)
unf_set_link_lose_tmo(lport, time_out);
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock,
flags);
list_del_init(&lport->entry_lport);
list_add_tail(&lport->entry_lport,
&global_lport_mgr.list_lport_list_head);
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
unf_lport_ref_dec_to_destroy(lport);
}
return ret;
}
static int unf_cm_adm_show_xchg(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
unsigned long flags = 0;
unsigned int aborted = 0;
unsigned int ini_busy = 0;
unsigned int tgt_busy = 0;
unsigned int delay = 0;
unsigned int free = 0;
unsigned int wait = 0;
unsigned int sfs_free = 0;
unsigned int sfs_busy = 0;
unsigned int i;
struct unf_adm_xchg *buff_out = NULL;
buff_out = (struct unf_adm_xchg *)v_input->buff_out;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >=
sizeof(struct unf_adm_xchg), return UNF_RETURN_ERROR);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
if (!xchg_mgr)
continue;
if (!xchg_mgr->hot_pool)
continue;
/* hot Xchg */
spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
flags);
UNF_TRACE(0x2659, UNF_LOG_NORMAL, UNF_INFO, "ini busy :");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->ini_busylist) {
ini_busy++;
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(0x2660, UNF_LOG_NORMAL, UNF_INFO,
"0x%p--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--(%llu)",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
(unsigned int)xchg->seq_id,
(unsigned int)xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
}
UNF_TRACE(0x2665, UNF_LOG_NORMAL, UNF_INFO, "SFS Busy:");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->sfs_busylist) {
sfs_busy++;
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(0x2666, UNF_LOG_NORMAL, UNF_INFO,
"0x%p--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--(%llu)",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
(unsigned int)xchg->seq_id,
(unsigned int)xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
}
spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
flags);
/* free Xchg */
spin_lock_irqsave(&xchg_mgr->free_pool.xchg_free_pool_lock,
flags);
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->free_pool.list_free_xchg_list) {
free++;
}
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->free_pool.list_sfs_xchg_list) {
sfs_free++;
}
spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_free_pool_lock,
flags);
ret = RETURN_OK;
}
buff_out->aborted = aborted;
buff_out->ini_busy = ini_busy;
buff_out->tgt_busy = tgt_busy;
buff_out->delay = delay;
buff_out->free = free;
buff_out->wait = wait;
buff_out->sfs_free = sfs_free;
buff_out->sfs_busy = sfs_busy;
UNF_REFERNCE_VAR(xchg);
return ret;
}
static int unf_cm_adm_link_time_out_opt(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = RETURN_OK;
int time_out = 0;
struct unf_link_tmo_opt_s *buff_in = NULL;
struct unf_link_tmo_opt_s *buff_out = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport,
return RETURN_ERROR);
buff_in = (struct unf_link_tmo_opt_s *)(v_input->buff_in);
buff_out = (struct unf_link_tmo_opt_s *)(v_input->buff_out);
msg_head.status = UNF_ADMIN_MSG_DONE;
msg_head.size = sizeof(struct unf_admin_msg_head);
if (buff_in->link_opt) {
/* set link tmo value */
time_out = unf_get_link_lose_tmo(v_lport);
/* compatible for PI2 branch tool (not release)not
* include syncAllPort section
*/
if (v_input->in_size > 16) {
if (buff_in->sync_all_port)
/* sync to all other lport */
unf_set_link_lose_tmo_to_all(buff_in->tmo_value);
else
unf_set_link_lose_tmo(v_lport,
buff_in->tmo_value);
buff_out->sync_all_port = 1;
} else {
unf_set_link_lose_tmo_to_all(buff_in->tmo_value);
}
buff_out->link_opt = 1;
/* return orige value */
buff_out->tmo_value = time_out;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_KEVENT,
"[info]set fc port(0x%0x)link tmo value(%d -> %d) success .",
v_lport->nport_id, time_out, buff_out->tmo_value);
} else {
/* get link tmo value */
buff_out->tmo_value = unf_get_link_lose_tmo(v_lport);
buff_out->link_opt = 0;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"get fc port(0x%0x) link tmo value(%d) success .",
v_lport->nport_id, buff_out->tmo_value);
}
*v_input->out_size = v_input->in_size;
memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static int unf_cm_adm_log_level_opt(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = RETURN_OK;
unsigned int log_level = 0;
unsigned int log_count = 0;
struct unf_log_level_opt_s *buff_in = NULL;
struct unf_log_level_opt_s *buff_out = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_log_level_opt_s),
return RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >=
sizeof(struct unf_log_level_opt_s),
return RETURN_ERROR);
buff_in = (struct unf_log_level_opt_s *)(v_input->buff_in);
buff_out = (struct unf_log_level_opt_s *)(v_input->buff_out);
msg_head.status = UNF_ADMIN_MSG_DONE;
msg_head.size = sizeof(struct unf_admin_msg_head);
if (buff_in->log_opt) {
/* set log level value */
log_level = log_print_level;
log_count = log_limted_times;
log_print_level = buff_in->log_level;
log_limted_times = buff_in->log_fre_qunce;
buff_out->log_opt = 1;
/* return orige value */
buff_out->log_level = log_level;
buff_out->log_fre_qunce = log_count;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"set fc log level(%u -> %u), frenqunce(%u -> %u)in 2s success .",
log_level, log_print_level, log_count,
log_limted_times);
} else {
/* get link tmo value */
buff_out->log_level = log_print_level;
buff_out->log_fre_qunce = log_limted_times;
buff_out->log_opt = 0;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"get fc log level(%u),frenqunce(%u) in 2s success .",
buff_out->log_level, buff_out->log_fre_qunce);
}
*v_input->out_size = sizeof(struct unf_log_level_opt_s);
memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
int unf_cm_echo_test(unsigned int v_port_id, unsigned int v_nport_id,
unsigned int *v_link_delay)
{
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
int ret = RETURN_OK;
unsigned int index = 0;
lport = unf_find_lport_by_port_id(v_port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"fcping request failed [invalid source lport (0x%x)].\n",
v_port_id);
return UNF_RETURN_ERROR;
}
rport = unf_get_rport_by_nport_id(lport, v_nport_id);
if ((!rport) || (v_nport_id == UNF_FC_FID_FLOGI)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"fcping request failed [invalid destination rport(0x%x)].\n",
v_nport_id);
return UNF_RETURN_ERROR;
}
for (index = 0; index < UNF_ECHO_SEND_MAX_TIMES; index++) {
ret = (int)unf_send_echo(lport, rport, v_link_delay);
if (ret != RETURN_OK) {
*v_link_delay = 0xffffffff;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL,
UNF_MAJOR,
"fcping request failed [lport(0x%x)-> rport(0x%x)].\n",
v_port_id, v_nport_id);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL,
UNF_MAJOR,
"fcping request succeed within %u us [lport(0x%x)->rport(0x%x)].\n",
*(unsigned int *)v_link_delay, v_port_id,
v_nport_id);
}
msleep(1000);
}
return ret;
}
static int unf_cm_link_delay_get(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int link_delay = 0xffffffff;
unsigned int nport_id = 0xffffff;
unsigned int port_id = 0;
struct unf_adm_cmd *buff_in = NULL;
struct unf_adm_cmd *buff_out = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >=
sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >=
sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)(v_input->buff_in);
buff_out = (struct unf_adm_cmd *)(v_input->buff_out);
port_id = v_lport->port_id;
nport_id = buff_in->arg[0];
msg_head.status = UNF_ADMIN_MSG_DONE;
ret = unf_cm_echo_test(port_id, nport_id, &link_delay);
if ((ret == RETURN_OK) && (link_delay != 0xffffffff)) {
buff_out->arg[0] = link_delay;
msg_head.size = sizeof(struct unf_admin_msg_head) +
sizeof(unsigned int) * 1;
} else {
msg_head.status = UNF_ADMIN_MSG_FAILED;
msg_head.size = sizeof(struct unf_admin_msg_head);
}
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static unsigned int unf_port_release_rport_index(struct unf_lport_s *v_lport,
void *v_input)
{
unsigned int index = INVALID_VALUE32;
unsigned int *rport_index = NULL;
unsigned long flag = 0;
struct unf_rport_pool_s *rport_pool = NULL;
UNF_CHECK_VALID(0x2370, UNF_FALSE, v_lport, return UNF_RETURN_ERROR);
if (v_input) {
rport_index = (unsigned int *)v_input;
index = *rport_index;
if (index < v_lport->low_level_func.support_max_rport) {
rport_pool = &((struct unf_lport_s *)v_lport->root_lport)->rport_pool;
spin_lock_irqsave(&rport_pool->rport_free_pool_lock,
flag);
if (test_bit((int)index, rport_pool->pul_rpi_bitmap))
clear_bit((int)index,
rport_pool->pul_rpi_bitmap);
else
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[warn]Port(0x%x) try to release a free rport index(0x%x)",
v_lport->port_id, index);
spin_unlock_irqrestore(
&rport_pool->rport_free_pool_lock,
flag);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_ERR,
"[warn]Port(0x%x) try to release a not exist rport index(0x%x)",
v_lport->port_id, index);
}
}
return RETURN_OK;
}
void *unf_lookup_lport_by_nport_id(void *v_lport, unsigned int v_nport_id)
{
struct unf_lport_s *lport = NULL;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1978, UNF_TRUE, v_lport, return NULL);
lport = (struct unf_lport_s *)v_lport;
lport = lport->root_lport;
vport_pool = lport->vport_pool;
if (v_nport_id == lport->nport_id)
return lport;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) vport pool is NULL",
lport->port_id);
return NULL;
}
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->nport_id == v_nport_id) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
list_for_each_safe(node, next_node, &lport->list_intergrad_vports) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->nport_id == v_nport_id) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"Port(0x%x) has no vport Nport ID(0x%x)",
lport->port_id, v_nport_id);
return NULL;
}
static int unf_get_port_info(struct unf_lport_s *v_lport,
struct unf_lport_info *v_port_info)
{
unsigned int act_speed = INVALID_VALUE32;
unsigned int cfg_speed = INVALID_VALUE32;
unsigned int cfg_topo = INVALID_VALUE32;
enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN;
struct unf_err_code_s fc_err_code;
unsigned int cfg_led_mode = INVALID_VALUE32;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x2205, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2206, UNF_TRUE, v_port_info, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2207, UNF_TRUE, v_lport->fc_port,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(
0x2208, UNF_TRUE,
v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get,
return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(cfg_speed);
UNF_REFERNCE_VAR(act_topo);
memset(&fc_err_code, 0, sizeof(fc_err_code));
/* get port speed */
cfg_speed = v_lport->low_level_func.lport_cfg_items.port_speed;
if (v_lport->link_up == UNF_PORT_LINK_UP)
(void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_SPEED_ACT, (void *)&act_speed);
else
act_speed = UNF_PORT_SPEED_UNKNOWN;
(void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_SPEED_CFG, (void *)&cfg_speed);
if (v_lport->link_up == UNF_PORT_LINK_UP)
(void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_TOPO_ACT, (void *)&act_topo);
else
act_topo = UNF_ACT_TOP_UNKNOWN;
(void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_TOPO_CFG, (void *)&cfg_topo);
(void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_LED_STATE, (void *)&cfg_led_mode);
v_port_info->port_id = v_lport->port_id;
v_port_info->options = v_lport->options;
v_port_info->b_start_work = global_lport_mgr.b_start_work;
v_port_info->phy_link = UNF_PORT_LINK_UP;
v_port_info->link_up = v_lport->link_up;
v_port_info->act_speed = act_speed;
v_port_info->cfg_speed = cfg_speed;
v_port_info->port_name = v_lport->port_name;
v_port_info->tape_support =
v_lport->low_level_func.lport_cfg_items.tape_support;
v_port_info->msi = 0;
v_port_info->ini_io_retry_timeout = 0;
v_port_info->support_max_npiv_num =
v_lport->low_level_func.support_max_npiv_num;
v_port_info->act_topo = act_topo;
v_port_info->port_topology =
v_lport->low_level_func.lport_cfg_items.port_topology;
v_port_info->fc_ser_max_speed =
v_lport->low_level_func.fc_ser_max_speed;
if (unf_get_error_code_sum(v_lport, &fc_err_code) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) get error code failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
v_port_info->loss_of_signal_count = fc_err_code.loss_of_signal_count;
v_port_info->bad_rx_char_count = fc_err_code.bad_rx_char_count;
v_port_info->loss_of_sync_count = fc_err_code.loss_of_sync_count;
v_port_info->link_fail_count = fc_err_code.link_fail_count;
v_port_info->rx_eo_fa_count = fc_err_code.rx_eo_fa_count;
v_port_info->dis_frame_count = fc_err_code.dis_frame_count;
v_port_info->bad_crc_count = fc_err_code.bad_crc_count;
v_port_info->proto_error_count = fc_err_code.proto_error_count;
v_port_info->chip_type = v_lport->low_level_func.chip_info.chip_type;
v_port_info->cfg_led_mode = cfg_led_mode;
v_port_info->vport_num = 0;
vport_pool = v_lport->vport_pool;
if (unlikely(!vport_pool))
return RETURN_OK;
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
list_for_each_safe(node, next_node, &v_lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
v_port_info->vport_id[v_port_info->vport_num] = vport->port_id;
v_port_info->vport_num = v_port_info->vport_num + 1;
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
return RETURN_OK;
}
static int unf_get_vport_info(struct unf_lport_s *v_lport,
unsigned int v_vport_id,
struct unf_lport_info *v_port_info)
{
unsigned char vport_index = INVALID_VALUE8;
struct unf_lport_s *vport = NULL;
UNF_CHECK_VALID(0x2203, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2203, UNF_TRUE, v_port_info, return UNF_RETURN_ERROR);
vport_index = (v_vport_id & PORTID_VPINDEX_MASK) >> PORTID_VPINDEX_SHIT;
if (unlikely(vport_index == 0)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]VPortId(0x%x) is not vport", v_vport_id);
return UNF_RETURN_ERROR;
}
vport = unf_cm_lookup_vport_by_vp_index(v_lport, vport_index);
if (unlikely(!vport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]VPortId(0x%x) can not be found",
v_vport_id);
return UNF_RETURN_ERROR;
}
v_port_info->port_id = vport->port_id;
v_port_info->port_name = vport->port_name;
v_port_info->nport_id = vport->nport_id;
v_port_info->options = 0;
return RETURN_OK;
}
static int unf_get_all_port_info(void *v_arg_in, void *v_arg_out)
{
struct unf_lport_s *lport = NULL;
struct unf_get_allinfo_argout *arg_in = NULL;
unsigned int current_len = 0;
struct unf_lport_info *cur_lport_info = NULL;
struct unf_admin_msg_head msg_head = { 0 };
int ret = UNF_RETURN_ERROR;
unsigned int out_buf_len = 0;
char *out_buf = NULL;
struct hifc_adm_cmd_s *buff_in = NULL;
UNF_CHECK_VALID(0x2203, UNF_TRUE, v_arg_in, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_arg_out);
arg_in = (struct unf_get_allinfo_argout *)v_arg_in;
out_buf = (char *)arg_in->out_buf;
buff_in = (struct hifc_adm_cmd_s *)arg_in->in_buf;
lport = (struct unf_lport_s *)arg_in->lport;
UNF_CHECK_VALID(0x2203, UNF_TRUE, out_buf, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2203, UNF_TRUE, buff_in, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2203, UNF_TRUE, lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, arg_in->in_size >=
sizeof(struct hifc_adm_cmd_s), return UNF_RETURN_ERROR);
cur_lport_info = vmalloc(sizeof(struct unf_lport_info));
if (!cur_lport_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) malloc memory fail", lport->port_id);
((struct unf_admin_msg_head *)out_buf)->status =
UNF_ADMIN_MSG_FAILED;
return ret;
}
memset(cur_lport_info, 0, sizeof(struct unf_lport_info));
out_buf_len = arg_in->in_size;
msg_head.status = UNF_ADMIN_MSG_DONE;
*arg_in->out_size = out_buf_len;
/* Storage info */
current_len += sizeof(struct unf_admin_msg_head);
if (lport->b_port_removing != UNF_TRUE) {
/* Cmd[3] is Vportid */
if (buff_in->cmd[3] != 0) {
ret = unf_get_vport_info(lport, buff_in->cmd[3],
cur_lport_info);
} else {
ret = unf_get_port_info(lport, cur_lport_info);
}
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT,
UNF_INFO,
"[err]Port(0x%x) get port information error",
lport->port_id);
msg_head.status = UNF_ADMIN_MSG_FAILED;
msg_head.size = current_len;
memcpy(out_buf, &msg_head,
sizeof(struct unf_admin_msg_head));
vfree(cur_lport_info);
return ret;
}
if (out_buf_len < current_len + sizeof(struct unf_lport_info)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL,
UNF_ERR,
"[warn]Allocated buff size (%u < %lu) is not enough",
out_buf_len,
current_len + sizeof(struct unf_lport_info));
/* Compatible for vport: return Lport info
* if tools version is not support npiv
*/
memcpy(out_buf + current_len, cur_lport_info,
out_buf_len - current_len);
current_len = out_buf_len;
} else {
memcpy(out_buf + current_len, cur_lport_info,
sizeof(struct unf_lport_info));
current_len += sizeof(struct unf_lport_info);
}
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[warn]Port(0x%x) is removing. Ref count 0x%x",
lport->port_id, atomic_read(&lport->lport_ref_cnt));
msg_head.status = UNF_ADMIN_MSG_FAILED;
}
msg_head.size = current_len;
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
vfree(cur_lport_info);
return ret;
}
static int unf_cm_get_all_port_info(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
struct unf_get_allinfo_argout out = { 0 };
int ret = UNF_RETURN_ERROR;
out.out_buf = v_input->buff_out;
out.in_buf = v_input->buff_in;
out.out_size = v_input->out_size;
out.in_size = v_input->in_size;
out.lport = v_lport;
ret = (int)unf_schedule_global_event((void *)&out,
UNF_GLOBAL_EVENT_SYN,
unf_get_all_port_info);
return ret;
}
static int unf_cm_port_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int mode = 0; /* 1:portreset 2:sfp on/off */
int turn_on = 0; /* 0:sfp off 1:sfp on */
unsigned int port_id = 0;
void *out_buf = NULL;
struct unf_adm_cmd *buff_in = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >=
sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >=
sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR);
out_buf = v_input->buff_out;
buff_in = v_input->buff_in;
mode = buff_in->arg[0];
port_id = v_lport->port_id;
msg_head.status = UNF_ADMIN_MSG_DONE;
if (mode == 1) {
ret = unf_cm_reset_port(port_id);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
} else if (mode == 2) {
turn_on = (int)buff_in->arg[1];
if ((turn_on == 0) || (turn_on == 1)) {
ret = unf_cm_sfp_switch(port_id, turn_on);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Switch sfp failed. Parameter(0x%x) error",
turn_on);
msg_head.status = UNF_ADMIN_MSG_FAILED;
}
}
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static int unf_cm_topo_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int topo = 0; /* topology set */
unsigned int port_id = 0;
void *out_buf = NULL;
struct unf_adm_cmd *buff_in = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >=
sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >=
sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR);
out_buf = v_input->buff_out;
buff_in = v_input->buff_in;
topo = buff_in->arg[0];
port_id = v_lport->port_id;
msg_head.status = UNF_ADMIN_MSG_DONE;
if ((topo == UNF_TOP_AUTO_MASK) || (topo == UNF_TOP_LOOP_MASK) ||
(topo == UNF_TOP_P2P_MASK)) {
ret = unf_cm_set_port_topo(port_id, topo);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Set topo failed. Parameter(0x%x) error", topo);
msg_head.status = UNF_ADMIN_MSG_FAILED;
}
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static int unf_cm_port_speed_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int port_speed = 0;
unsigned int port_id = 0;
void *out_buf = NULL;
struct unf_adm_cmd *buff_in = NULL;
struct unf_admin_msg_head msg_head = { 0 };
struct unf_lport_s *lport = NULL;
int check_speed_flag = UNF_TRUE;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
lport = v_lport;
out_buf = v_input->buff_out;
buff_in = v_input->buff_in;
port_speed = buff_in->arg[0];
port_id = v_lport->port_id;
msg_head.status = UNF_ADMIN_MSG_DONE;
/* get and check sfp speed */
if (unf_get_lport_current_info(lport) != RETURN_OK) {
msg_head.status = UNF_ADMIN_MSG_FAILED;
lport->low_level_func.sfp_speed = UNF_PORT_SFP_SPEED_ERR;
}
if (UNF_CHECK_CONFIG_SPEED_BY_SFSSPEED(lport->low_level_func.sfp_speed,
port_speed)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Set port speed failed. Speed (0x%x) is greater than SfpSpeed (0x%x)",
port_speed, lport->low_level_func.sfp_speed);
msg_head.status = UNF_ADMIN_MSG_FAILED;
check_speed_flag = UNF_FALSE;
} else {
if (lport->low_level_func.fc_ser_max_speed ==
UNF_PORT_SPEED_32_G) {
check_speed_flag =
(port_speed == UNF_PORT_SPEED_AUTO) ||
(port_speed == UNF_PORT_SPEED_8_G) ||
(port_speed == UNF_PORT_SPEED_16_G) ||
(port_speed == UNF_PORT_SPEED_32_G);
} else if (lport->low_level_func.fc_ser_max_speed ==
UNF_PORT_SPEED_16_G) {
check_speed_flag =
(port_speed == UNF_PORT_SPEED_AUTO) ||
(port_speed == UNF_PORT_SPEED_4_G) ||
(port_speed == UNF_PORT_SPEED_8_G) ||
(port_speed == UNF_PORT_SPEED_16_G);
} else if (lport->low_level_func.fc_ser_max_speed ==
UNF_PORT_SPEED_8_G) {
check_speed_flag =
(port_speed == UNF_PORT_SPEED_AUTO) ||
(port_speed == UNF_PORT_SPEED_2_G) ||
(port_speed == UNF_PORT_SPEED_4_G) ||
(port_speed == UNF_PORT_SPEED_8_G);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Board maxspeed is unknown");
msg_head.status = UNF_ADMIN_MSG_FAILED;
check_speed_flag = UNF_FALSE;
}
}
if (check_speed_flag) {
ret = unf_cm_set_port_speed(port_id, &port_speed);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
}
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static int unf_cm_set_vport(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
unsigned int mode = 0;
unsigned int index = 0;
unsigned int high32 = 0x2000286e;
unsigned int low32 = 0;
unsigned long long port_name = 0;
unsigned int port_id = 0;
void *out_buf = NULL;
struct unf_adm_cmd *buff_in = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
out_buf = v_input->buff_out;
buff_in = v_input->buff_in;
port_id = v_lport->port_id;
msg_head.status = UNF_ADMIN_MSG_DONE;
mode = buff_in->arg[0];
switch (mode) {
case 1:
/* create vport with wwpn */
low32 = buff_in->arg[1];
port_name = ((unsigned long)high32 << 32) | low32;
//lint -fallthrough
case 3:
/* create vport and autogeneration wwpn */
ret = unf_npiv_conf(port_id, port_name);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
msleep(2000);
break;
case 2:
/* delete vport by vport index */
index = buff_in->arg[2];
ret = unf_delete_vport(port_id, index);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
break;
case 4:
/* delete all vport on Lport */
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL,
UNF_ERR,
"[err]Port(0x%x) can't find", port_id);
msg_head.status = UNF_ADMIN_MSG_FAILED;
} else {
unf_destroy_all_vports(lport);
ret = RETURN_OK;
}
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Mode is unknown");
msg_head.status = UNF_ADMIN_MSG_FAILED;
break;
}
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
return (int)ret;
}
static int unf_cm_port_info_get(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int topo_cfg = 0;
enum unf_act_topo_e topo = UNF_ACT_TOP_UNKNOWN;
unsigned int port_speed = 0;
unsigned int port_id = 0;
struct unf_adm_cmd *buff_out = NULL;
struct unf_admin_msg_head msg_head = { 0 };
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
lport = v_lport;
port_id = v_lport->port_id;
buff_out = (struct unf_adm_cmd *)v_input->buff_out;
msg_head.status = UNF_ADMIN_MSG_DONE;
ret = unf_cm_get_port_topo(port_id, &topo_cfg, &topo);
if (ret == RETURN_OK) {
ret = unf_cm_get_port_speed(port_id, &port_speed);
if (ret == RETURN_OK) {
buff_out->arg[0] = lport->port_id;
buff_out->arg[1] = topo_cfg;
buff_out->arg[2] = topo;
buff_out->arg[3] = port_speed;
buff_out->arg[4] = lport->link_up;
msg_head.size = sizeof(struct unf_admin_msg_head) +
sizeof(unsigned int) * 5;
} else {
msg_head.status = UNF_ADMIN_MSG_FAILED;
msg_head.size = sizeof(struct unf_admin_msg_head);
}
} else {
msg_head.status = UNF_ADMIN_MSG_FAILED;
msg_head.size = sizeof(struct unf_admin_msg_head);
}
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static int unf_get_port_sfp_info(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
#define MIN_SFPINFO_LEN 512
union unf_sfp_eeprome_info *sfp_info = NULL;
int ret = UNF_RETURN_ERROR;
unsigned int status = 0;
unsigned int sfp_type = 0;
unsigned int port_id = 0;
char *buff_out = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(0x2203, UNF_TRUE, v_input, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= MIN_SFPINFO_LEN,
return UNF_RETURN_ERROR);
buff_out = v_input->buff_out;
port_id = v_lport->port_id;
*v_input->out_size = MIN_SFPINFO_LEN;
msg_head.status = UNF_ADMIN_MSG_DONE;
sfp_info = vmalloc(sizeof(union unf_sfp_eeprome_info));
if (!sfp_info)
return UNF_RETURN_ERROR;
memset(sfp_info, 0, sizeof(union unf_sfp_eeprome_info));
ret = unf_cm_get_sfp_info(port_id, &status, sfp_info, &sfp_type);
if (ret == UNF_RETURN_ERROR || (status != DRV_CABLE_CONNECTOR_OPTICAL))
msg_head.status = UNF_ADMIN_MSG_FAILED;
msg_head.size = sizeof(struct unf_admin_msg_head);
memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
memcpy((buff_out + msg_head.size),
&sfp_info->sfp_info, sizeof(struct unf_sfp_info_s));
vfree(sfp_info);
return ret;
}
static int unf_cm_clear_error_code_sum(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = RETURN_OK;
void *out_buf = NULL;
unsigned int port_id = 0;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
out_buf = v_input->buff_out;
port_id = v_lport->port_id;
msg_head.status = UNF_ADMIN_MSG_DONE;
ret = unf_cm_clear_port_error_code_sum(port_id);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static int unf_cm_bbscn_set(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int bbscn_val = 0;
unsigned int port_id = 0;
void *out_buf = NULL;
struct unf_adm_cmd *buff_in = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32,
UNF_TRUE, v_input, return UNF_RETURN_ERROR);
out_buf = v_input->buff_out;
buff_in = v_input->buff_in;
port_id = v_lport->port_id;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
out_buf, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
buff_in, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
bbscn_val = buff_in->arg[1];
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]BBSCN value (0x%x)", bbscn_val);
msg_head.status = UNF_ADMIN_MSG_DONE;
if (bbscn_val <= UNF_MAX_BBSCN_VALUE) {
ret = unf_cm_set_port_bbscn(port_id, bbscn_val);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]BBSCN value is invalid(0x%x)", bbscn_val);
msg_head.status = UNF_ADMIN_MSG_FAILED;
}
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static void unf_fc_host_counter(struct unf_lport_s *v_lport,
struct hifc_adm_dfx_cmd_s *v_buff_out)
{
unsigned int scsi_id = 0;
unsigned int index = 0;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buff_out, return);
scsi_image_table = &v_lport->rport_scsi_table;
v_buff_out->unresult.host_cnt.host_num =
v_lport->host_info.p_scsi_host->host_no;
v_buff_out->unresult.host_cnt.port_id = v_lport->port_id;
v_buff_out->unresult.host_cnt.scsi_session_add_success =
atomic_read(&v_lport->scsi_session_add_success);
v_buff_out->unresult.host_cnt.scsi_session_add_failed =
atomic_read(&v_lport->scsi_session_add_failed);
v_buff_out->unresult.host_cnt.scsi_session_del_success =
atomic_read(&v_lport->scsi_session_del_success);
v_buff_out->unresult.host_cnt.scsi_session_del_failed =
atomic_read(&v_lport->scsi_session_del_failed);
v_buff_out->unresult.host_cnt.device_alloc =
atomic_read(&v_lport->device_alloc);
v_buff_out->unresult.host_cnt.device_destroy =
atomic_read(&v_lport->device_destroy);
v_buff_out->unresult.host_cnt.session_loss_tmo =
atomic_read(&v_lport->session_loss_tmo);
v_buff_out->unresult.host_cnt.alloc_scsi_id =
atomic_read(&v_lport->alloc_scsi_id);
v_buff_out->unresult.host_cnt.reuse_scsi_id =
atomic_read(&v_lport->reuse_scsi_id);
v_buff_out->unresult.host_cnt.resume_scsi_id =
atomic_read(&v_lport->resume_scsi_id);
v_buff_out->unresult.host_cnt.add_start_work_failed =
atomic_read(&v_lport->add_start_work_failed);
v_buff_out->unresult.host_cnt.add_closing_work_failed =
atomic_read(&v_lport->add_closing_work_failed);
for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID / 2; scsi_id++) {
index = scsi_id * 2;
v_buff_out->unresult.host_cnt.session_state[scsi_id].session1 =
(unsigned char)atomic_read(&scsi_image_table->wwn_rport_info_table[index].en_scsi_state);
index = scsi_id * 2 + 1;
v_buff_out->unresult.host_cnt.session_state[scsi_id].session2 =
(unsigned char)atomic_read(&scsi_image_table->wwn_rport_info_table[index].en_scsi_state);
}
for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) {
if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)
continue;
v_buff_out->unresult.host_cnt.abort_io +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_ABORT_IO_TYPE]);
v_buff_out->unresult.host_cnt.device_reset +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_DEVICE_RESET_TYPE]);
v_buff_out->unresult.host_cnt.target_reset +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_TARGET_RESET_TYPE]);
v_buff_out->unresult.host_cnt.bus_reset +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_BUS_RESET_TYPE]);
v_buff_out->unresult.host_cnt.virtual_reset +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_VIRTUAL_RESET_TYPE]);
v_buff_out->unresult.host_cnt.abort_io_result +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_ABORT_IO_TYPE]);
v_buff_out->unresult.host_cnt.device_reset_result +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_DEVICE_RESET_TYPE]);
v_buff_out->unresult.host_cnt.target_reset_result +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_TARGET_RESET_TYPE]);
v_buff_out->unresult.host_cnt.bus_reset_result +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_BUS_RESET_TYPE]);
v_buff_out->unresult.host_cnt.virtual_reset_result +=
atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_VIRTUAL_RESET_TYPE]);
}
}
static void unf_fc_session_counter(struct unf_lport_s *v_lport,
unsigned int scsi_id,
struct hifc_adm_dfx_cmd_s *v_buff_out)
{
struct unf_wwpn_rport_info_s *rport_info = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buff_out, return);
rport_info = &v_lport->rport_scsi_table.wwn_rport_info_table[scsi_id];
v_buff_out->unresult.session_cnt.port_id = v_lport->port_id;
v_buff_out->unresult.session_cnt.host_id =
v_lport->host_info.p_scsi_host->host_no;
if (rport_info->dfx_counter) {
v_buff_out->unresult.session_cnt.target_busy =
atomic64_read(&rport_info->dfx_counter->target_busy);
v_buff_out->unresult.session_cnt.host_busy =
atomic64_read(&rport_info->dfx_counter->host_busy);
v_buff_out->unresult.session_cnt.abort_io =
atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_ABORT_IO_TYPE]);
v_buff_out->unresult.session_cnt.device_reset =
atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_DEVICE_RESET_TYPE]);
v_buff_out->unresult.session_cnt.target_reset =
atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_TARGET_RESET_TYPE]);
v_buff_out->unresult.session_cnt.bus_reset =
atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_BUS_RESET_TYPE]);
v_buff_out->unresult.session_cnt.virtual_reset =
atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_VIRTUAL_RESET_TYPE]);
v_buff_out->unresult.session_cnt.abort_io_result =
atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_ABORT_IO_TYPE]);
v_buff_out->unresult.session_cnt.device_reset_result =
atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_DEVICE_RESET_TYPE]);
v_buff_out->unresult.session_cnt.target_reset_result =
atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_TARGET_RESET_TYPE]);
v_buff_out->unresult.session_cnt.bus_reset_result =
atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_BUS_RESET_TYPE]);
v_buff_out->unresult.session_cnt.virtual_reset_result =
atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_VIRTUAL_RESET_TYPE]);
v_buff_out->unresult.session_cnt.device_alloc =
atomic_read(&rport_info->dfx_counter->device_alloc);
v_buff_out->unresult.session_cnt.device_destroy =
atomic_read(&rport_info->dfx_counter->device_destroy);
}
v_buff_out->unresult.session_cnt.target_id = rport_info->target_id;
if ((rport_info->wwpn != INVALID_WWPN) && (rport_info->rport)) {
v_buff_out->unresult.session_cnt.remote_port_wwpn =
rport_info->wwpn;
v_buff_out->unresult.session_cnt.remote_port_nportid =
rport_info->rport->nport_id;
v_buff_out->unresult.session_cnt.scsi_state =
atomic_read(&rport_info->en_scsi_state);
v_buff_out->unresult.session_cnt.remote_port_state =
rport_info->rport->rp_state;
v_buff_out->unresult.session_cnt.remote_port_scsiid =
rport_info->rport->scsi_id;
v_buff_out->unresult.session_cnt.remote_port_index =
rport_info->rport->rport_index;
if (rport_info->rport->lport) {
v_buff_out->unresult.session_cnt.local_port_wwpn =
rport_info->rport->lport->port_name;
v_buff_out->unresult.session_cnt.local_port_nportid =
rport_info->rport->local_nport_id;
v_buff_out->unresult.session_cnt.local_port_ini_state =
rport_info->rport->lport_ini_state;
v_buff_out->unresult.session_cnt.local_port_state =
rport_info->rport->lport->en_states;
}
}
}
static int unf_fc_session_scsi_cmd_in(
struct unf_hinicam_pkg *v_input,
struct unf_rport_scsi_id_image_s *scsi_image_table)
{
unsigned int scsi_id = 0;
unsigned int scsi_cmd_type = 0;
int ret = RETURN_OK;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct unf_adm_cmd *buff_in = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table,
return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)v_input->buff_in;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out;
scsi_id = buff_in->arg[2];
scsi_cmd_type = buff_in->arg[3];
if (scsi_id >= UNF_MAX_SCSI_ID || scsi_cmd_type >= UNF_MAX_SCSI_CMD)
return UNF_RETURN_ERROR;
if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)
buff_out->unresult.scsi_cmd_in =
atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->scsi_cmd_cnt[scsi_cmd_type]);
return ret;
}
static int unf_fc_host_scsi_cmd_in_total(
struct unf_hinicam_pkg *v_input,
struct unf_rport_scsi_id_image_s *scsi_image_table)
{
unsigned int scsi_id = 0;
unsigned int scsi_cmd_type = 0;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct unf_adm_cmd *buff_in = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table,
return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)v_input->buff_in;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out;
scsi_cmd_type = buff_in->arg[3];
if (scsi_cmd_type >= UNF_MAX_SCSI_CMD)
return UNF_RETURN_ERROR;
for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) {
if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)
continue;
buff_out->unresult.scsi_cmd_in +=
atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->scsi_cmd_cnt[scsi_cmd_type]);
}
return RETURN_OK;
}
static int unf_fc_host_scsi_cmd_done_total(
struct unf_hinicam_pkg *v_input,
struct unf_rport_scsi_id_image_s *scsi_image_table)
{
unsigned int scsi_id = 0;
unsigned int io_return_value = 0;
int ret = RETURN_OK;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct unf_adm_cmd *buff_in = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table,
return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)v_input->buff_in;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out;
io_return_value = buff_in->arg[3];
if (io_return_value >= UNF_MAX_IO_RETURN_VALUE)
return UNF_RETURN_ERROR;
for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) {
if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)
continue;
buff_out->unresult.scsi_cmd_done +=
atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->io_done_cnt[io_return_value]);
}
return ret;
}
static int unf_fc_session_scsi_cmd_done(
struct unf_hinicam_pkg *v_input,
struct unf_rport_scsi_id_image_s *scsi_image_table)
{
unsigned int scsi_id = 0;
unsigned int io_return_value = 0;
int ret = RETURN_OK;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct unf_adm_cmd *buff_in = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table,
return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)v_input->buff_in;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out;
scsi_id = buff_in->arg[2];
io_return_value = buff_in->arg[3];
if (scsi_id >= UNF_MAX_SCSI_ID ||
io_return_value >= UNF_MAX_IO_RETURN_VALUE)
return UNF_RETURN_ERROR;
if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)
buff_out->unresult.scsi_cmd_done =
atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->io_done_cnt[io_return_value]);
return ret;
}
static int unf_get_io_dfx_statistics(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = RETURN_OK;
unsigned int counter_mode = 0;
struct hifc_adm_dfx_cmd_s *buff_out = NULL;
struct unf_adm_cmd *buff_in = NULL;
struct unf_admin_msg_head msg_head = { 0 };
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
unsigned int scsi_id = 0;
struct unf_lport_s *vport = NULL;
unsigned int buff_flag = 0;
buff_flag = (!v_input) || (!v_input->buff_out) ||
(!v_input->buff_in) || (!v_lport);
if (buff_flag)
return UNF_RETURN_ERROR;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct hifc_adm_dfx_cmd_s),
return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)v_input->buff_in;
buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out;
msg_head.status = UNF_ADMIN_MSG_DONE;
vport = unf_cm_lookup_vport_by_vp_index(
v_lport, (unsigned short)(buff_in->arg[4]));
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, vport,
return UNF_RETURN_ERROR);
scsi_image_table = &vport->rport_scsi_table;
FC_DRIVE_ACTION_CHECK((!scsi_image_table->wwn_rport_info_table),
(msg_head.status = UNF_ADMIN_MSG_FAILED),
(ret = UNF_RETURN_ERROR),
goto err);
counter_mode = buff_in->arg[1];
switch (counter_mode) {
case FC_HOST_COUNTER:
unf_fc_host_counter(vport, buff_out);
break;
case FC_SESSION_SCSI_CMD_IN:
ret = unf_fc_session_scsi_cmd_in(v_input, scsi_image_table);
break;
case FC_HOST_SCSI_CMD_IN_TOTAL:
ret = unf_fc_host_scsi_cmd_in_total(v_input, scsi_image_table);
break;
case FC_HOST_SCSI_CMD_DONE_TOTAL:
ret = unf_fc_host_scsi_cmd_done_total(v_input,
scsi_image_table);
break;
case FC_SESSION_SCSI_CMD_DONE:
ret = unf_fc_session_scsi_cmd_done(v_input, scsi_image_table);
break;
case FC_SESSION_COUNTER:
scsi_id = buff_in->arg[2];
FC_DRIVE_ACTION_CHECK((scsi_id >= UNF_MAX_SCSI_ID),
(msg_head.status = UNF_ADMIN_MSG_FAILED),
(ret = UNF_RETURN_ERROR),
goto err);
unf_fc_session_counter(vport, scsi_id, buff_out);
break;
default:
msg_head.status = UNF_ADMIN_MSG_FAILED;
ret = UNF_RETURN_ERROR;
break;
}
if (ret != RETURN_OK)
return ret;
err:
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct hifc_adm_dfx_cmd_s);
memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
static int unf_cm_switch_dif(unsigned int v_option,
unsigned int v_dix_ip_checksum)
{
#define UNF_WAIT_IO_COMPLETE_TIME_MS 5000
#define UNF_WAIT_ONE_TIME_MS 100
#define UNF_LOOP_TIMES (UNF_WAIT_IO_COMPLETE_TIME_MS / UNF_WAIT_ONE_TIME_MS)
int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
unsigned long flags = 0;
int enable_dif;
unsigned int index;
dix_flag = v_dix_ip_checksum ? UNF_TRUE : UNF_FALSE;
enable_dif = (v_option >= UNF_ENABLE_DIF_DIX_PROT &&
v_option <= UNF_ENABLE_DIX_PROT);
if (enable_dif) {
dif_sgl_mode = UNF_TRUE;
hifc_dif_enable = UNF_TRUE;
}
switch (v_option) {
case UNF_DIF_ACTION_NONE:
dif_sgl_mode = UNF_FALSE;
hifc_dif_enable = UNF_FALSE;
hifc_dif_type = 0;
hifc_guard = 0;
break;
case UNF_ENABLE_DIF_DIX_PROT:
hifc_dif_type = SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIX_TYPE1_PROTECTION;
break;
case UNF_ENABLE_DIF_PROT:
hifc_dif_type = SHOST_DIF_TYPE1_PROTECTION;
dif_sgl_mode = UNF_FALSE;
break;
case UNF_ENABLE_DIX_PROT:
hifc_dif_type = SHOST_DIX_TYPE0_PROTECTION;
break;
default:
return UNF_ADMIN_MSG_FAILED;
}
/* 1. Close Lport's SFP */
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head,
entry_lport) {
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
ret = unf_cm_sfp_switch(lport->port_id, UNF_FALSE);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Port(0x%x) close SFP failed in DIF switch",
lport->port_id);
return UNF_ADMIN_MSG_FAILED;
}
for (index = 0; index < UNF_LOOP_TIMES; index++) {
if (unf_busy_io_completed(lport) == UNF_TRUE)
break;
msleep(UNF_WAIT_ONE_TIME_MS);
}
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock,
flags);
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
/* 2. UnRegister the SCSI host of LPort, including its Vports */
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head,
entry_lport) {
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
unf_unregister_scsi_host(lport);
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock,
flags);
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
/* 3. Register the SCSI host of LPort, including its Vports */
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head,
entry_lport) {
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
if (unf_register_scsi_host(lport) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT,
UNF_WARN, "[warn]Port(0x%x) register scsi host failed in DIF switch",
lport->port_id);
return UNF_ADMIN_MSG_FAILED;
}
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock,
flags);
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
/* 4. Open Lport's SFP */
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags);
list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head,
entry_lport) {
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock,
flags);
ret = unf_cm_sfp_switch(lport->port_id, UNF_TRUE);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Port(0x%x) reopen SFP failed in DIF switch",
lport->port_id);
return UNF_ADMIN_MSG_FAILED;
}
spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock,
flags);
}
spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags);
return UNF_ADMIN_MSG_DONE;
}
static int unf_cm_switch_app_ref_escape(unsigned int v_option)
{
switch (v_option) {
case UNF_APP_REF_ESC_BOTH_NOT_CHECK:
dif_app_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK;
dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK;
break;
case UNF_APP_ESC_CHECK:
dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK;
break;
case UNF_REF_ESC_CHECK:
dif_app_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK;
dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
break;
case UNF_APP_REF_ESC_BOTH_CHECK:
dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
break;
default:
dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
break;
}
return UNF_ADMIN_MSG_DONE;
}
static int unf_cm_select_dif_mode(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
unsigned int dif_mode = 0;
unsigned int option = 0;
unsigned int dix_ip_checksum = 0;
struct unf_adm_cmd *buff_in = NULL;
struct unf_adm_cmd *buff_out = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_out,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_in,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)v_input->buff_in;
buff_out = (struct unf_adm_cmd *)v_input->buff_out;
msg_head.status = UNF_ADMIN_MSG_DONE;
dif_mode = buff_in->arg[0];
option = buff_in->arg[1];
dix_ip_checksum = buff_in->arg[2];
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]DIF mode(0x%x) sub option(0x%x 0x%x)",
dif_mode, option, dix_ip_checksum);
switch (dif_mode) {
case UNF_SWITCH_DIF_DIX:
msg_head.status =
(unsigned short)unf_cm_switch_dif(option,
dix_ip_checksum);
break;
case UNF_APP_REF_ESCAPE:
msg_head.status =
(unsigned short)unf_cm_switch_app_ref_escape(option);
break;
default:
msg_head.status = UNF_ADMIN_MSG_FAILED;
goto end;
}
end:
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
return RETURN_OK;
}
static int unf_cm_set_dif(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
unsigned int dif_switch = 0;
struct unf_adm_cmd *buff_in = NULL;
struct unf_adm_cmd *buff_out = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_out,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_in,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
buff_in = (struct unf_adm_cmd *)v_input->buff_in;
buff_out = (struct unf_adm_cmd *)v_input->buff_out;
msg_head.status = UNF_ADMIN_MSG_DONE;
dif_switch = (buff_in->arg[0]) ?
UNF_ENABLE_DIF_DIX_PROT : UNF_DIF_ACTION_NONE;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]DIF switch is 0x%x", dif_switch);
if (dif_switch == UNF_ENABLE_DIF_DIX_PROT)
msg_head.status = (unsigned short)unf_cm_switch_dif(dif_switch,
UNF_ENABLE_IP_CHECKSUM);
else
msg_head.status = (unsigned short)unf_cm_switch_dif(dif_switch,
UNF_DISABLE_IP_CHECKSUM);
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head));
return RETURN_OK;
}
static unsigned int unf_save_port_info(struct unf_lport_s *lport,
void *save_info_addr)
{
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x2271, UNF_TRUE, save_info_addr,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x2271, UNF_TRUE, lport, return UNF_RETURN_ERROR);
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
lport->port_id);
return ret;
}
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SAVE_HBA_INFO, (void *)save_info_addr);
return ret;
}
static unsigned int unf_save_port_base_info(struct unf_lport_s *lport,
void *v_save_info)
{
struct unf_save_info_head_s *save_info_head = v_save_info;
struct unf_port_info_entry_s *sava_port_entry = NULL;
struct unf_low_level_port_mgr_op_s *port_mgr = NULL;
unsigned int cfg_speed = 0;
unsigned int topo_cfg = 0;
int fec = UNF_FALSE;
int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, save_info_head,
return UNF_RETURN_ERROR);
save_info_head->opcode = 0;
/* write information to up */
save_info_head->type = UNF_PORT_BASE_INFO; /* port base info */
save_info_head->entry_num = 1;
save_info_head->next = 0xffff;
sava_port_entry = (struct unf_port_info_entry_s *)
((void *)(save_info_head + 1));
port_mgr = &lport->low_level_func.port_mgr_op;
if (!port_mgr->pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"Port(0x%x)'s corresponding function is NULL.",
lport->nport_id);
return UNF_RETURN_ERROR;
}
/* get Bbscn */
sava_port_entry->bb_scn = unf_low_level_bbscn(lport);
/* get speed */
port_mgr->pfn_ll_port_config_get(lport->fc_port,
UNF_PORT_CFG_GET_SPEED_CFG,
(void *)&cfg_speed);
sava_port_entry->speed = cfg_speed;
/* get topo */
port_mgr->pfn_ll_port_config_get(lport->fc_port,
UNF_PORT_CFG_GET_TOPO_CFG,
(void *)&topo_cfg);
sava_port_entry->topo = topo_cfg;
/* get fec */
port_mgr->pfn_ll_port_config_get(lport->fc_port,
UNF_PORT_CFG_GET_FEC,
(void *)&fec);
sava_port_entry->fec = fec;
ret = (int)unf_save_port_info(lport, v_save_info);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x) send mailbox fail",
lport->port_id);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
unsigned int unf_cm_save_port_info(unsigned int v_port_id)
{
unsigned int port_id = v_port_id;
struct unf_lport_s *lport = NULL;
struct unf_save_info_head_s *save_info = NULL;
unsigned int ret = UNF_RETURN_ERROR;
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) can not be found", port_id);
return ret;
}
save_info = vmalloc(SAVE_PORT_INFO_LEN);
if (!save_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Can't alloc buffer for saving port info");
return ret;
}
/* 1 clean flush */
memset(save_info, 0, SAVE_PORT_INFO_LEN);
save_info->opcode = 2; /* notify up to clean flush */
save_info->type = 0xf;
save_info->entry_num = 0;
save_info->next = 0xffff;
ret = unf_save_port_info(lport, save_info);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"[warn]Port(0x%x) send mailbox fail", lport->port_id);
vfree(save_info);
return ret;
}
/* 2 save port base information */
memset(save_info, 0, SAVE_PORT_INFO_LEN);
ret = unf_save_port_base_info(lport, save_info);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) save port base information failed",
lport->port_id);
vfree(save_info);
return ret;
}
vfree(save_info);
return ret;
}
static void unf_handle_port_base_info(struct unf_lport_s *lport,
struct unf_port_info_entry_s *v_save_info)
{
struct unf_port_info_entry_s *sava_port_entry = NULL;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, return);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_save_info, return);
sava_port_entry = v_save_info;
UNF_CHECK_VALID(INVALID_VALUE32,
UNF_TRUE,
(sava_port_entry->topo == UNF_TOP_LOOP_MASK) ||
(sava_port_entry->topo == UNF_TOP_P2P_MASK) ||
(sava_port_entry->topo == UNF_TOP_AUTO_MASK),
return);
if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Port(0x%x)'s corresponding function is NULL.",
lport->port_id);
return;
}
ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set(
lport->fc_port,
UNF_PORT_CFG_SET_HBA_BASE_INFO, (void *)sava_port_entry);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Cannot set port base info");
return;
}
/* update bbsn cfg to Lport */
lport->low_level_func.lport_cfg_items.bb_scn = sava_port_entry->bb_scn;
lport->low_level_func.lport_cfg_items.port_topology =
sava_port_entry->topo;
}
static unsigned int unf_recovery_save_info(struct unf_lport_s *lport,
void *v_save_info,
unsigned char v_type)
{
struct unf_save_info_head_s *save_info_head = v_save_info;
void *info_entry = NULL;
int ret = 0;
unsigned short next_flag = 0;
unsigned char entry_num = 0;
unsigned char index = 0;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, save_info_head,
return UNF_RETURN_ERROR);
do {
memset(save_info_head, 0, SAVE_PORT_INFO_LEN);
save_info_head->opcode = 1;
/* read information from up */
save_info_head->type = v_type;
/* vport[qos] info */
save_info_head->entry_num = 0xff;
save_info_head->next = next_flag;
ret = (int)unf_save_port_info(lport, save_info_head);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT,
UNF_WARN,
"[warn]Port(0x%x) send mailbox fail",
lport->port_id);
return UNF_RETURN_ERROR;
}
next_flag = (unsigned short)save_info_head->next;
entry_num = (unsigned char)save_info_head->entry_num;
info_entry = save_info_head + 1;
for (index = 0; index < entry_num; index++) {
switch (v_type) {
case UNF_PORT_BASE_INFO:
unf_handle_port_base_info(lport, info_entry);
info_entry = ((struct unf_port_info_entry_s *)
info_entry) + 1;
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_EQUIP_ATT,
UNF_ERR,
"[err]Port(0x%x) handle message failed",
lport->port_id);
return UNF_RETURN_ERROR;
}
}
} while (next_flag != 0xffff);
return RETURN_OK;
}
unsigned int unf_cm_get_save_info(struct unf_lport_s *v_lport)
{
struct unf_lport_s *lport = NULL;
void *save_info = NULL;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport,
return UNF_RETURN_ERROR);
lport = v_lport;
save_info = vmalloc(SAVE_PORT_INFO_LEN);
if (!save_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Can't alloc buffer for saving port info");
return ret;
}
/* 1 get port base information */
ret = unf_recovery_save_info(lport, save_info, UNF_PORT_BASE_INFO);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"[warn]Port(0x%x) send mailbox fail", lport->port_id);
vfree(save_info);
return ret;
}
vfree(save_info);
return ret;
}
int unf_get_link_lose_tmo(struct unf_lport_s *v_lport)
{
unsigned int tmo_value = 0;
if (!v_lport)
return UNF_LOSE_TMO;
tmo_value = atomic_read(&v_lport->link_lose_tmo);
if (!tmo_value)
tmo_value = UNF_LOSE_TMO;
return (int)tmo_value;
}
int unf_get_link_lose_tmo_from_up(struct unf_lport_s *v_lport,
struct unf_flash_link_tmo_s *v_link_tmo)
{
int ret = UNF_RETURN_ERROR;
struct unf_flash_data_s flash_data;
if (!v_lport || !v_link_tmo || (sizeof(struct unf_flash_data_s)
> HIFC_FLASH_DATA_MAX_LEN)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]get flas link tmo param check fail");
return ret;
}
memset(&flash_data, 0, sizeof(struct unf_flash_data_s));
if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]link tmo fun null");
return ret;
}
if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_FLASH_DATA_INFO, &flash_data) !=
RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT,
"[warn]get link tmo from up fail");
return ret;
}
ret = RETURN_OK;
memcpy(v_link_tmo, &flash_data.link_tmo, HIFC_FLASH_LINK_TMO_MAX_LEN);
return ret;
}
void unf_init_link_lose_tmo(struct unf_lport_s *v_lport)
{
struct unf_flash_link_tmo_s flash_link_tmo;
unsigned int tmo;
memset(&flash_link_tmo, 0, sizeof(struct unf_flash_link_tmo_s));
if (!v_lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"[warn]int link tmo param check fail");
return;
}
if ((unf_get_link_lose_tmo_from_up(v_lport, &flash_link_tmo) ==
RETURN_OK) &&
(flash_link_tmo.writeflag == HIFC_MGMT_TMO_MAGIC_NUM)) {
tmo = (((unsigned int)flash_link_tmo.link_tmo3 << 24) |
((unsigned int)flash_link_tmo.link_tmo2 << 16) |
((unsigned int)flash_link_tmo.link_tmo1 << 8) |
flash_link_tmo.link_tmo0);
if (tmo > 600)
unf_set_link_lose_tmo(v_lport, UNF_LOSE_TMO);
else
atomic_set(&v_lport->link_lose_tmo, (int)tmo);
} else {
unf_set_link_lose_tmo(v_lport, UNF_LOSE_TMO);
}
}
unsigned int unf_register_scsi_host(struct unf_lport_s *v_lport)
{
struct unf_host_param_s host_param = { 0 };
unf_scsi_host_s **p_scsi_host = NULL;
struct unf_lport_cfg_item_s *lport_cfg_items = NULL;
UNF_CHECK_VALID(0x1359, TRUE, v_lport, return UNF_RETURN_ERROR);
/* Point to -->> L_port->Scsi_host */
p_scsi_host = &v_lport->host_info.p_scsi_host;
lport_cfg_items = &v_lport->low_level_func.lport_cfg_items;
host_param.can_queue = (int)lport_cfg_items->max_queue_depth;
/* Performance optimization */
host_param.cmnd_per_lun = UNF_MAX_CMND_PER_LUN;
host_param.sg_table_size = UNF_MAX_DMA_SEGS;
host_param.max_id = UNF_MAX_TARGET_NUMBER;
host_param.max_lun = UNF_DEFAULT_MAX_LUN;
host_param.max_channel = UNF_MAX_BUS_CHANNEL;
host_param.max_cmnd_len = UNF_MAX_SCSI_CMND_LEN; /* CDB-16 */
host_param.dma_boundary = UNF_DMA_BOUNDARY;
host_param.max_sectors = UNF_MAX_SECTORS;
host_param.port_id = v_lport->port_id;
host_param.lport = v_lport;
host_param.pdev = &v_lport->low_level_func.dev->dev;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Port(0x%x) allocate scsi host: can queue(%u), command performance LUN(%u), max lun(%u)",
v_lport->port_id, host_param.can_queue,
host_param.cmnd_per_lun, host_param.max_lun);
if (unf_alloc_scsi_host(p_scsi_host, &host_param) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) allocate scsi host failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Port(0x%x) allocate scsi host(0x%x) succeed",
v_lport->port_id, UNF_GET_SCSI_HOST_ID(*p_scsi_host));
return RETURN_OK;
}
void unf_unregister_scsi_host(struct unf_lport_s *v_lport)
{
unf_scsi_host_s *p_scsi_host = NULL;
unsigned int host_no = 0;
UNF_REFERNCE_VAR(p_scsi_host);
UNF_CHECK_VALID(0x1360, TRUE, v_lport, return);
p_scsi_host = v_lport->host_info.p_scsi_host;
if (p_scsi_host) {
host_no = UNF_GET_SCSI_HOST_ID(p_scsi_host);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[event]Port(0x%x) starting unregister scsi host(0x%x)",
v_lport->port_id, host_no);
unf_free_scsi_host(p_scsi_host);
/* can`t set p_scsi_host for NULL,
* since it does`t alloc by itself
*/
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[warn]Port(0x%x) unregister scsi host, invalid ScsiHost ",
v_lport->port_id);
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[event]Port(0x%x) unregister scsi host(0x%x) succeed",
v_lport->port_id, host_no);
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST;
UNF_REFERNCE_VAR(p_scsi_host);
UNF_REFERNCE_VAR(host_no);
}
unsigned int unf_cm_clear_flush(unsigned int v_port_id)
{
unsigned int port_id = v_port_id;
struct unf_lport_s *lport = NULL;
struct unf_save_info_head_s *save_info = NULL;
unsigned int ret = UNF_RETURN_ERROR;
lport = unf_find_lport_by_port_id(port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) can not be found", port_id);
return ret;
}
save_info = vmalloc(SAVE_PORT_INFO_LEN);
if (!save_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Can't alloc buffer for saving port info");
return ret;
}
/* 1 clean flush */
memset(save_info, 0, SAVE_PORT_INFO_LEN);
save_info->opcode = 2; /* notify up to clean flush */
save_info->type = 0xf;
save_info->entry_num = 0;
save_info->next = 0xffff;
ret = unf_save_port_info(lport, save_info);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"[warn]Port(0x%x) send mailbox fail", lport->port_id);
vfree(save_info);
return ret;
}
vfree(save_info);
return ret;
}
static int unf_cm_save_data_mode(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input)
{
int ret = UNF_RETURN_ERROR;
unsigned int save_data_mode = 0;
unsigned int port_id = 0;
void *out_buf = NULL;
struct unf_adm_cmd *buff_in = NULL;
struct unf_admin_msg_head msg_head = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
out_buf = v_input->buff_out;
buff_in = v_input->buff_in;
port_id = v_lport->port_id;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, out_buf,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, buff_in,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
v_input->in_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
*v_input->out_size >= sizeof(struct unf_adm_cmd),
return UNF_RETURN_ERROR);
save_data_mode = buff_in->arg[0];
msg_head.status = UNF_ADMIN_MSG_DONE;
if (save_data_mode == UNF_SAVA_INFO_MODE) {
ret = (int)unf_cm_save_port_info(port_id);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
} else if (save_data_mode == UNF_CLEAN_INFO_MODE) {
ret = (int)unf_cm_clear_flush(port_id);
if (ret != RETURN_OK)
msg_head.status = UNF_ADMIN_MSG_FAILED;
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR,
"[err]This mode(0x%x) is unknown", save_data_mode);
msg_head.status = UNF_ADMIN_MSG_FAILED;
}
msg_head.size = sizeof(struct unf_admin_msg_head);
*v_input->out_size = sizeof(struct unf_adm_cmd);
memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head));
return ret;
}
int unf_cmd_adm_handler(void *v_lport, struct unf_hinicam_pkg *v_input)
{
struct unf_lport_s *lport = NULL;
int ret = UNF_RETURN_ERROR;
enum unf_msg_format_e msg_formate;
unsigned int index = 0;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input,
return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
msg_formate = v_input->msg_format;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Enter HIFC_Adm, msg_formate=0x%x, 0x%x",
msg_formate, *v_input->out_size);
/* hifcadm event */
while (index < (sizeof(unf_hifcadm_action) /
sizeof(struct unf_hifcadm_action_s))) {
if ((msg_formate == unf_hifcadm_action[index].hifc_action) &&
unf_hifcadm_action[index].fn_unf_hifc_action) {
ret = unf_hifcadm_action[index].fn_unf_hifc_action(lport, v_input);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT,
UNF_WARN,
"[warn]Port(0x%x) process up msg(0x%x) failed",
lport->port_id, msg_formate);
}
return ret;
}
index++;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_KEVENT,
"[event]Port(0x%x) not support adm cmd, msg type(0x%x) ",
lport->port_id, msg_formate);
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_PORT_MAN_H__
#define __UNF_PORT_MAN_H__
#define UNF_LPORT_POLL_TIMER ((unsigned int)(1 * 1000))
#define UNF_MAX_BBSCN_VALUE 14
#define UNF_SAVA_INFO_MODE 0
#define UNF_CLEAN_INFO_MODE 1
#define FC_DRIVE_ACTION_CHECK(condition, fail_do0, fail_do1, return) \
do { \
if (condition) { \
fail_do0; \
fail_do1; \
return; \
} \
} while (0)
/* Used in hifcadm tool */
#define UNF_ENABLE_DIF_DIX_PROT 1
#define UNF_ENABLE_DIF_PROT 2
#define UNF_ENABLE_DIX_PROT 3
#define UNF_DISABLE_IP_CHECKSUM 0
#define UNF_ENABLE_IP_CHECKSUM 1
#define UNF_APP_REF_ESC_BOTH_NOT_CHECK 0
#define UNF_APP_ESC_CHECK 1
#define UNF_REF_ESC_CHECK 2
#define UNF_APP_REF_ESC_BOTH_CHECK 3
struct unf_global_card_thread_s {
struct list_head list_card_list_head;
spinlock_t global_card_list_lock;
unsigned int card_sum;
};
/* Global L_Port MG,manage all L_Port */
struct unf_global_lport_s {
struct list_head list_lport_list_head;
/* Temporary list,used in hold list traverse */
struct list_head list_intergrad_head;
/* destroy list,used in card remove */
struct list_head list_destroy_head;
/* Dirty list,abnormal port */
struct list_head list_dirty_head;
spinlock_t global_lport_list_lock;
unsigned int lport_sum;
unsigned char dft_mode;
int b_start_work;
};
struct unf_reset_port_argin {
unsigned int port_id;
};
struct unf_get_topo_argout {
unsigned int *topo_cfg;
enum unf_act_topo_e *en_act_topo;
};
struct unf_set_topo_argin {
unsigned int port_id;
unsigned int topo;
};
struct unf_set_bbscn_argin {
unsigned int port_id;
unsigned int bb_scn;
};
struct unf_set_sfp_argin {
unsigned int port_id;
int turn_on;
};
struct unf_set_speed_argin {
unsigned int port_id;
unsigned int *speed;
};
struct unf_get_sfp_argout {
unsigned int *status;
union unf_sfp_eeprome_info *sfp_info;
};
struct unf_get_allinfo_argout {
unsigned int *out_size;
unsigned int in_size;
void *out_buf;
void *in_buf;
void *lport;
};
struct unf_port_action_s {
unsigned int action;
unsigned int (*fn_unf_action)(struct unf_lport_s *v_lport,
void *v_input);
};
struct unf_hifcadm_action_s {
unsigned int hifc_action;
int (*fn_unf_hifc_action)(struct unf_lport_s *v_lport,
struct unf_hinicam_pkg *v_input);
};
struct unf_lport_info {
#define NPIVMAX 255
unsigned int port_id;
unsigned int options;
int b_start_work;
unsigned int phy_link;
unsigned int link_up;
unsigned int act_speed;
unsigned int cfg_speed;
unsigned int tape_support;
unsigned long long port_name;
unsigned int msi;
unsigned int ini_io_retry_timeout;
unsigned int support_max_npiv_num;
unsigned int act_topo;
unsigned int port_topology;
unsigned int fc_ser_max_speed;
unsigned int loss_of_signal_count;
unsigned int bad_rx_char_count;
unsigned int loss_of_sync_count;
unsigned int link_fail_count;
unsigned int rx_eo_fa_count;
unsigned int dis_frame_count;
unsigned int bad_crc_count;
unsigned int proto_error_count;
unsigned int cfg_led_mode;
unsigned char chip_type;
unsigned char vport_num;
unsigned short rsvd1;
unsigned int vport_id[NPIVMAX];
unsigned int nport_id;
};
struct unf_admin_msg_head {
unsigned int size;
unsigned short status;
unsigned char success_num;
unsigned char rsvd;
};
#define UNF_PORT_INFO_SIZE 10
struct unf_adm_cmd {
struct unf_admin_msg_head msg_head;
unsigned int arg[UNF_PORT_INFO_SIZE];
};
struct unf_adm_xchg {
unsigned int aborted;
unsigned int ini_busy;
unsigned int tgt_busy;
unsigned int delay;
unsigned int free;
unsigned int wait;
unsigned int sfs_free;
unsigned int sfs_busy;
};
enum unf_admin_msg_status_e {
UNF_ADMIN_MSG_DONE = 0,
UNF_ADMIN_MSG_INCOMPLETE,
UNF_ADMIN_MSG_FAILED,
UNF_ADMIN_MSG_BUTT
};
/* the structure define with fc unf driver */
enum fc_dfx_io_count_type_e {
FC_HOST_COUNTER = 0,
FC_HOST_SCSI_CMD_IN_TOTAL,
FC_HOST_SCSI_CMD_DONE_TOTAL,
FC_SESSION_COUNTER,
FC_SESSION_SCSI_CMD_IN,
FC_SESSION_SCSI_CMD_DONE,
FC_SRB_COUNT,
};
enum unf_msg_format_e {
UNF_PORT_SET_OP = 1,
UNF_TOPO_SET_OP,
UNF_SPEED_SET_OP,
UNF_INFO_GET_OP,
UNF_INFO_CLEAR_OP,
UNF_SFP_INFO_OP,
UNF_DFX,
UNF_FEC_SET = 8,
UNF_BBSCN,
UNF_VPORT,
UNF_LINK_DELAY = 11,
UNF_DIF,
UNF_DIF_CONFIG = 14,
UNF_SAVA_DATA,
UNF_SHOW_XCHG = 23,
UNF_PORTSTAT = 24,
UNF_ALL_INFO_OP = 25,
FC_LINK_TMO_OPT = 26,
FC_DRV_LOG_OPT = 27,
UNF_COMPAT_TEST = 0xFF
};
struct unf_save_info_head_s {
unsigned int opcode : 4;
unsigned int type : 4;
unsigned int entry_num : 8;
unsigned int next : 16;
};
enum unf_save_info_type_e {
UNF_SESSION_QOS = 0,
UNF_PORT_BASE_INFO = 2,
UNF_SAVE_TYPE_BUTT,
};
struct unf_link_tmo_opt_s {
struct unf_admin_msg_head head;
unsigned int link_opt;
int tmo_value;
unsigned int sync_all_port;
};
struct unf_log_level_opt_s {
struct unf_admin_msg_head head;
unsigned int log_opt;
unsigned int log_level;
unsigned int log_fre_qunce;
};
extern struct unf_global_lport_s global_lport_mgr;
extern struct unf_global_card_thread_s card_thread_mgr;
extern struct workqueue_struct *unf_work_queue;
struct unf_lport_s *unf_find_lport_by_port_id(unsigned int v_port_id);
struct unf_lport_s *unf_find_lport_by_scsi_host_id(unsigned int scsi_host_id);
void *unf_lport_create_and_init(
void *private_data,
struct unf_low_level_function_op_s *low_level_op);
int unf_cm_reset_port(unsigned int v_port_id);
int unf_cm_sfp_switch(unsigned int v_port_id, int v_bturn_on);
int unf_cm_get_sfp_info(unsigned int v_port_id, unsigned int *v_status,
union unf_sfp_eeprome_info *v_sfp_info,
unsigned int *sfp_type);
int unf_cm_set_port_bbscn(unsigned int v_port_id, unsigned int v_bbscn);
int unf_cm_set_port_topo(unsigned int v_port_id, unsigned int v_topo);
int unf_cm_get_port_topo(unsigned int v_port_id,
unsigned int *v_topo_cfg,
enum unf_act_topo_e *v_en_act_topo);
int unf_cm_clear_port_error_code_sum(unsigned int v_port_id);
unsigned int unf_fc_port_link_event(void *v_lport, unsigned int v_events,
void *v_input);
unsigned int unf_release_local_port(void *v_lport);
void unf_lport_route_work(struct work_struct *v_work);
void unf_lport_update_topo(struct unf_lport_s *v_lport,
enum unf_act_topo_e v_enactive_topo);
void unf_lport_ref_dec(struct unf_lport_s *v_lport);
unsigned int unf_lport_refinc(struct unf_lport_s *v_lport);
void unf_lport_ref_dec_to_destroy(struct unf_lport_s *v_lport);
int unf_send_event(unsigned int port_id, unsigned int syn_flag,
void *argc_in, void *argc_out,
int (*p_func)(void *argc_in, void *argc_out));
void unf_port_mgmt_deinit(void);
void unf_port_mgmt_init(void);
int unf_cm_echo_test(unsigned int v_port_id, unsigned int v_nport_id,
unsigned int *v_link_delay);
void unf_show_dirty_port(int v_show_only, unsigned int *v_ditry_port_num);
unsigned int unf_get_error_code_sum(struct unf_lport_s *v_lport,
struct unf_err_code_s *v_fc_err_code);
int unf_cm_set_port_speed(unsigned int v_port_id, unsigned int *v_speed);
void *unf_lookup_lport_by_nport_id(void *v_lport, unsigned int v_nport_id);
int unf_cmd_adm_handler(void *v_lport, struct unf_hinicam_pkg *v_input);
unsigned int unf_is_lport_valid(struct unf_lport_s *v_lport);
unsigned int unf_cm_save_port_info(unsigned int v_port_id);
unsigned int unf_cm_get_save_info(struct unf_lport_s *v_lport);
unsigned int unf_cm_clear_flush(unsigned int v_port_id);
int unf_lport_reset_port(struct unf_lport_s *v_lport, unsigned int v_flag);
unsigned int unf_register_scsi_host(struct unf_lport_s *v_lport);
void unf_unregister_scsi_host(struct unf_lport_s *v_lport);
int unf_get_link_lose_tmo(struct unf_lport_s *v_lport);
int unf_set_link_lose_tmo(struct unf_lport_s *v_lport, int time_out);
void unf_init_link_lose_tmo(struct unf_lport_s *v_lport);
int unf_set_link_lose_tmo_to_all(int time_out);
void unf_destroy_scsi_id_table(struct unf_lport_s *v_lport);
unsigned int unf_lport_login(struct unf_lport_s *v_lport,
enum unf_act_topo_e v_en_act_topo);
unsigned int unf_init_scsi_id_table(struct unf_lport_s *v_lport);
void unf_set_lport_removing(struct unf_lport_s *v_lport);
void unf_lport_release_lw_fun_op(struct unf_lport_s *v_lport);
void unf_disc_state_ma(struct unf_lport_s *v_lport,
enum unf_disc_event_e v_event);
unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport);
void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册