提交 949a895b 编写于 作者: C Chenguangli 提交者: Yang Yingliang

scsi/hifc: add hifc driver port resource module

driver inclusion
category: feature
bugzilla: NA

-----------------------------------------------------------------------

This module maintains hifc driver port resources, including HBA, Lport,
Rport, queue, and npiv.
Signed-off-by: NChenguangli <chenguangli2@huawei.com>
Reviewed-by: NZengweiliang <zengweiliang.zengweiliang@huawei.com>
Acked-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 79d2dfad
// SPDX-License-Identifier: GPL-2.0
/* Huawei Fabric Channel Linux driver
* Copyright(c) 2018 Huawei Technologies Co., Ltd
*
*/
#include "unf_common.h"
#include "hifc_chipitf.h"
#define HIFC_MBOX_TIME_SEC_MAX 60
#define HIFC_LINK_UP_COUNT 1
#define HIFC_LINK_DOWN_COUNT 2
#define HIFC_FC_DELETE_CMND_COUNT 3
#define HIFC_MBX_MAX_TIMEOUT 10000
static unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba,
void *v_buf_in);
static unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba,
void *v_buf_in);
static unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba,
void *v_buf_in);
static unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba,
void *v_buf_in);
static struct hifc_up_2_drv_msg_handle_s up_msg_handle[] = {
{ HIFC_MBOX_RECV_FC_LINKUP, hifc_recv_fc_link_up },
{ HIFC_MBOX_RECV_FC_LINKDOWN, hifc_recv_fc_link_down },
{ HIFC_MBOX_RECV_FC_DELCMD, hifc_recv_fc_del_cmd },
{ HIFC_MBOX_RECV_FC_ERROR, hifc_recv_fc_error }
};
void hifc_up_msg_2_driver_proc(void *v_hwdev_handle, void *v_pri_handle,
unsigned char v_cmd, void *v_buf_in,
unsigned short v_in_size, void *v_buf_out,
unsigned short *v_out_size)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int index = 0;
struct hifc_hba_s *hba = NULL;
struct hifc_mbox_header_s *mbx_header = NULL;
HIFC_CHECK(INVALID_VALUE32, v_hwdev_handle, return);
HIFC_CHECK(INVALID_VALUE32, v_pri_handle, return);
HIFC_CHECK(INVALID_VALUE32, v_buf_in, return);
HIFC_CHECK(INVALID_VALUE32, v_buf_out, return);
HIFC_CHECK(INVALID_VALUE32, v_out_size, return);
hba = (struct hifc_hba_s *)v_pri_handle;
if (!hba) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR,
"[err]Hba is null");
return;
}
mbx_header = (struct hifc_mbox_header_s *)v_buf_in;
if (mbx_header->cmnd_type != v_cmd) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR,
"[err]Port(0x%x) cmd(0x%x) is not matched with header cmd type(0x%x)",
hba->port_cfg.port_id, v_cmd,
mbx_header->cmnd_type);
return;
}
while (index < (sizeof(up_msg_handle) /
sizeof(struct hifc_up_2_drv_msg_handle_s))) {
if ((v_cmd == up_msg_handle[index].cmd) &&
(up_msg_handle[index].pfn_hifc_msg_up2drv_handler)) {
ret =
up_msg_handle[index].pfn_hifc_msg_up2drv_handler(
hba,
v_buf_in);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT,
UNF_ERR,
"[warn]Port(0x%x) process up cmd(0x%x) failed",
hba->port_cfg.port_id, v_cmd);
}
/* Process Done & return */
return;
}
index++;
}
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR,
"[err]Port(0x%x) process up cmd(0x%x) failed",
hba->port_cfg.port_id, v_cmd);
PRINT_OUTBOUND_IOB(UNF_MAJOR, v_buf_in, ((unsigned int)v_in_size));
}
unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac)
{
struct hifc_hba_s *hba = NULL;
struct unf_get_chip_info_argout *wwn = NULL;
struct hifc_inbox_get_chip_info_s get_chip_info;
union hifc_outmbox_generic_u *chip_info_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_mac, return UNF_RETURN_ERROR);
hba = (struct hifc_hba_s *)v_hba;
wwn = (struct unf_get_chip_info_argout *)v_mac;
memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s));
chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!chip_info_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u));
get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO;
get_chip_info.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s));
if (hifc_mb_send_and_wait_mbox(hba, &get_chip_info,
sizeof(get_chip_info), chip_info_sts) !=
RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"hifc can't send and wait mailbox, command type: 0x%x.",
get_chip_info.header.cmnd_type);
goto exit;
}
if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) mailbox status incorrect status(0x%x) .",
hba->port_cfg.port_id,
chip_info_sts->get_chip_info_sts.status);
goto exit;
}
if (chip_info_sts->get_chip_info_sts.header.cmnd_type !=
HIFC_MBOX_GET_CHIP_INFO_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) receive mailbox type incorrect type: 0x%x.",
hba->port_cfg.port_id,
chip_info_sts->get_chip_info_sts.header.cmnd_type);
goto exit;
}
wwn->board_type = chip_info_sts->get_chip_info_sts.board_type;
hba->card_info.card_type = chip_info_sts->get_chip_info_sts.board_type;
wwn->wwpn = chip_info_sts->get_chip_info_sts.wwpn;
wwn->wwnn = chip_info_sts->get_chip_info_sts.wwnn;
wwn->sys_mac = chip_info_sts->get_chip_info_sts.sys_mac;
ret = RETURN_OK;
exit:
kfree(chip_info_sts);
return ret;
}
unsigned int hifc_get_chip_capability(void *hw_dev_handle,
struct hifc_chip_info_s *v_chip_info)
{
struct hifc_inbox_get_chip_info_s get_chip_info;
union hifc_outmbox_generic_u *chip_info_sts = NULL;
unsigned short out_size = 0;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, hw_dev_handle, return UNF_RETURN_ERROR);
memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s));
chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!chip_info_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u));
get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO;
get_chip_info.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s));
out_size = sizeof(union hifc_outmbox_generic_u);
if (hifc_msg_to_mgmt_sync(hw_dev_handle, HIFC_MOD_FC,
HIFC_MBOX_GET_CHIP_INFO,
(void *)&get_chip_info.header,
sizeof(struct hifc_inbox_get_chip_info_s),
(union hifc_outmbox_generic_u *)chip_info_sts,
&out_size,
(HIFC_MBX_MAX_TIMEOUT)) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"hifc can't send and wait mailbox, command type: 0x%x.",
HIFC_MBOX_GET_CHIP_INFO);
goto exit;
}
if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port mailbox status incorrect status(0x%x) .",
chip_info_sts->get_chip_info_sts.status);
goto exit;
}
if (chip_info_sts->get_chip_info_sts.header.cmnd_type !=
HIFC_MBOX_GET_CHIP_INFO_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port receive mailbox type incorrect type: 0x%x.",
chip_info_sts->get_chip_info_sts.header.cmnd_type);
goto exit;
}
v_chip_info->wwnn = chip_info_sts->get_chip_info_sts.wwnn;
v_chip_info->wwpn = chip_info_sts->get_chip_info_sts.wwpn;
v_chip_info->tape_support = (unsigned char)
chip_info_sts->get_chip_info_sts.tape_support;
ret = RETURN_OK;
exit:
kfree(chip_info_sts);
return ret;
}
void hifc_get_red_info_by_rw_type(struct unf_rw_reg_param_s *param,
struct hifc_inmbox_get_reg_info_s *v_reg_info)
{
if ((param->rw_type == UNF_READ) ||
(param->rw_type == UNF_READ_64)) {
v_reg_info->op_code = 0;
} else if ((param->rw_type == UNF_WRITE) ||
(param->rw_type == UNF_WRITE_64)) {
v_reg_info->op_code = 1;
}
if ((param->rw_type == UNF_READ) ||
(param->rw_type == UNF_WRITE)) {
v_reg_info->reg_len = 32;
} else if ((param->rw_type == UNF_READ_64) ||
(param->rw_type == UNF_WRITE_64)) {
v_reg_info->reg_len = 64;
}
}
unsigned int hifc_rw_reg(void *v_hba, void *v_params)
{
struct hifc_hba_s *hba = NULL;
struct unf_rw_reg_param_s *param = NULL;
struct hifc_inmbox_get_reg_info_s reg_info;
union hifc_outmbox_generic_u *reg_info_sts = NULL;
unsigned int para_value_out_l = 0;
unsigned int para_value_out_h = 0;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_params, return UNF_RETURN_ERROR);
hba = (struct hifc_hba_s *)v_hba;
param = (struct unf_rw_reg_param_s *)v_params;
memset(&reg_info, 0, sizeof(struct hifc_inmbox_get_reg_info_s));
reg_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!reg_info_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(reg_info_sts, 0, sizeof(union hifc_outmbox_generic_u));
hifc_get_red_info_by_rw_type(param, &reg_info);
reg_info.reg_addr = param->offset;
reg_info.reg_value_l32 = (param->value) & VALUEMASK_L;
reg_info.reg_value_h32 = ((param->value) & VALUEMASK_H) >> 32;
reg_info.header.cmnd_type = HIFC_MBOX_REG_RW_MODE;
reg_info.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_reg_info_s));
if (hifc_mb_send_and_wait_mbox(hba, &reg_info,
sizeof(reg_info),
reg_info_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"hifc can't send and wait mailbox, command type: 0x%x.",
reg_info.header.cmnd_type);
goto exit;
}
if (reg_info_sts->get_reg_info_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) mailbox status incorrect status(0x%x) .",
hba->port_cfg.port_id,
reg_info_sts->get_reg_info_sts.status);
goto exit;
}
if (reg_info_sts->get_reg_info_sts.header.cmnd_type !=
HIFC_MBOX_REG_RW_MODE_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) receive mailbox type incorrect type: 0x%x.",
hba->port_cfg.port_id,
reg_info_sts->get_reg_info_sts.header.cmnd_type);
goto exit;
}
para_value_out_l = reg_info_sts->get_reg_info_sts.reg_value_l32;
para_value_out_h = reg_info_sts->get_reg_info_sts.reg_value_h32;
param->value = (unsigned long long)para_value_out_l |
((unsigned long long)para_value_out_h << 32);
ret = RETURN_OK;
exit:
kfree(reg_info_sts);
return ret;
}
unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba)
{
struct hifc_inbox_config_api_s config_api;
union hifc_outmbox_generic_u *out_mbox = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR);
memset(&config_api, 0, sizeof(config_api));
out_mbox = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC);
if (!out_mbox) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(out_mbox, 0, sizeof(union hifc_outmbox_generic_u));
config_api.header.cmnd_type = HIFC_MBOX_CONFIG_API;
config_api.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_config_api_s));
config_api.op_code = UNDEFINEOPCODE;
/* change switching top cmd of CM to the cmd that up recognize */
/* if the cmd equals UNF_TOP_P2P_MASK sending in CM means that it
* should be changed into P2P top, LL using HIFC_TOP_NON_LOOP_MASK
*/
if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_P2P_MASK) {
config_api.topy_mode = 0x2;
/* if the cmd equals UNF_TOP_LOOP_MASK sending in CM means that it
* should be changed into loop top, LL using HIFC_TOP_LOOP_MASK
*/
} else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_LOOP_MASK) {
config_api.topy_mode = 0x1;
/* if the cmd equals UNF_TOP_AUTO_MASK sending in CM means that it
* should be changed into loop top, LL using HIFC_TOP_AUTO_MASK
*/
} else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_AUTO_MASK) {
config_api.topy_mode = 0x0;
} else {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) topo cmd is error, command type: 0x%x",
v_hba->port_cfg.port_id,
(unsigned char)v_hba->port_topo_cfg);
return UNF_RETURN_ERROR;
}
/* About speed */
config_api.sfp_speed = (unsigned char)(v_hba->port_speed_cfg);
config_api.max_speed = (unsigned char)(v_hba->max_support_speed);
config_api.rx_bbcredit_32g = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT;
config_api.rx_bbcredit_16g = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT;
config_api.rx_bbcredit_842g = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT;
config_api.rdy_cnt_bf_fst_frm = HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT;
config_api.esch_value_32g = HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE;
config_api.esch_value_16g = HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE;
config_api.esch_value_8g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE;
config_api.esch_value_4g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE;
config_api.esch_value_2g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE;
config_api.esch_bust_size = HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE;
/* default value:0xFF */
config_api.hard_alpa = 0xFF;
memcpy(config_api.port_name, v_hba->sys_port_name, UNF_WWN_LEN);
/* if only for slave, the value is 1; if participate master choosing,
* the value is 0
*/
config_api.slave = v_hba->port_loop_role;
/* 1:auto negotiate, 0:fixed mode negotiate */
if (config_api.sfp_speed == 0)
config_api.auto_sneg = 0x1;
else
config_api.auto_sneg = 0x0;
/* send & wait */
if (hifc_mb_send_and_wait_mbox(v_hba, &config_api,
sizeof(config_api),
out_mbox) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x",
v_hba->port_cfg.port_id,
config_api.header.cmnd_type);
goto exit;
}
/* mailbox status check */
if (out_mbox->config_api_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) receive mailbox type(0x%x) with status(0x%x) error",
v_hba->port_cfg.port_id,
out_mbox->config_api_sts.header.cmnd_type,
out_mbox->config_api_sts.status);
goto exit;
}
/* RSP type check */
if (out_mbox->config_api_sts.header.cmnd_type !=
HIFC_MBOX_CONFIG_API_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) receive mailbox type(0x%x) error",
v_hba->port_cfg.port_id,
out_mbox->config_api_sts.header.cmnd_type);
goto exit;
}
ret = RETURN_OK;
exit:
kfree(out_mbox);
return ret;
}
unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on)
{
struct hifc_inbox_port_switch_s port_switch;
union hifc_outmbox_generic_u *port_switch_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR);
memset(&port_switch, 0, sizeof(port_switch));
port_switch_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!port_switch_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(port_switch_sts, 0, sizeof(union hifc_outmbox_generic_u));
port_switch.header.cmnd_type = HIFC_MBOX_PORT_SWITCH;
port_switch.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_port_switch_s));
port_switch.op_code = (unsigned char)turn_on;
port_switch.port_type = (unsigned char)v_hba->port_type;
/* set the value is 0 first, vn2vf mode, vlan discovery automatically */
port_switch.host_id = 0;
port_switch.pf_id =
(unsigned char)(hifc_global_func_id(v_hba->hw_dev_handle));
port_switch.fcoe_mode = HIFC_FIP_MODE_VN2VF;
port_switch.conf_vlan = 0xffff;
port_switch.sys_node_name = *(unsigned long long *)v_hba->sys_node_name;
port_switch.sys_port_wwn = *(unsigned long long *)v_hba->sys_port_name;
/* send & wait mailbox */
if (hifc_mb_send_and_wait_mbox(v_hba, &port_switch, sizeof(port_switch),
port_switch_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x) opcode(0x%x)",
v_hba->port_cfg.port_id,
port_switch.header.cmnd_type, port_switch.op_code);
goto exit;
}
/* check mailbox rsp status */
if (port_switch_sts->port_switch_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error",
v_hba->port_cfg.port_id,
port_switch_sts->port_switch_sts.header.cmnd_type,
port_switch_sts->port_switch_sts.status);
goto exit;
}
/* check mailbox rsp type */
if (port_switch_sts->port_switch_sts.header.cmnd_type !=
HIFC_MBOX_PORT_SWITCH_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) receive mailbox type(0x%x) error",
v_hba->port_cfg.port_id,
port_switch_sts->port_switch_sts.header.cmnd_type);
goto exit;
}
HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"[event]Port(0x%x) switch succeed, turns to %s",
v_hba->port_cfg.port_id,
(turn_on) ? "on" : "off");
ret = RETURN_OK;
exit:
kfree(port_switch_sts);
return ret;
}
unsigned int hifc_config_login_api(struct hifc_hba_s *v_hba,
struct unf_port_login_parms_s *v_login_para)
{
#define HIFC_LOOP_RDYNUM 8
int async_ret = RETURN_OK;
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_inmbox_config_login_s cfg_login;
union hifc_outmbox_generic_u *cfg_login_sts = NULL;
HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR);
memset(&cfg_login, 0, sizeof(cfg_login));
cfg_login_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!cfg_login_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(cfg_login_sts, 0, sizeof(union hifc_outmbox_generic_u));
cfg_login.header.cmnd_type = HIFC_MBOX_CONFIG_LOGIN_API;
cfg_login.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_login_s));
cfg_login.header.port_id = v_hba->port_index;
cfg_login.op_code = UNDEFINEOPCODE;
cfg_login.tx_bb_credit = v_hba->remote_bbcredit;
cfg_login.etov = v_hba->compared_edtov_val;
cfg_login.rtov = v_hba->compared_ratov_val;
cfg_login.rt_tov_tag = v_hba->remote_rttov_tag;
cfg_login.ed_tov_tag = v_hba->remote_edtov_tag;
cfg_login.bb_credit = v_hba->remote_bbcredit;
cfg_login.bbscn = HIFC_LSB(v_hba->compared_bbscn);
if (cfg_login.bbscn) {
cfg_login.lr_flag =
(v_login_para->els_cmnd_code == ELS_PLOGI) ? 0 : 1;
ret = hifc_mb_send_and_wait_mbox(v_hba, &cfg_login,
sizeof(cfg_login),
cfg_login_sts);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT,
UNF_WARN,
"Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.",
v_hba->port_cfg.port_id,
cfg_login.header.cmnd_type);
goto exit;
}
if (cfg_login_sts->config_login_sts.header.cmnd_type !=
HIFC_MBOX_CONFIG_LOGIN_API_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT,
UNF_INFO, "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.",
v_hba->port_cfg.port_id,
cfg_login_sts->config_login_sts.header.cmnd_type);
goto exit;
}
if (cfg_login_sts->config_login_sts.status != STATUS_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN, "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.",
v_hba->port_cfg.port_id,
cfg_login_sts->config_login_sts.header.cmnd_type,
cfg_login_sts->config_login_sts.status);
goto exit;
}
} else {
async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle,
HIFC_MOD_FC,
HIFC_MBOX_CONFIG_LOGIN_API,
&cfg_login,
sizeof(cfg_login));
if (async_ret != 0) {
HIFC_MAILBOX_STAT(v_hba,
HIFC_SEND_CONFIG_LOGINAPI_FAIL);
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"Port(0x%x) hifc can't send config login cmd to up,ret:%d.",
v_hba->port_cfg.port_id, async_ret);
goto exit;
}
HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CONFIG_LOGINAPI);
}
HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) Topo(0x%x) Config login param to up: txbbcredit(0x%x), BB_SC_N(0x%x).",
v_hba->port_cfg.port_id, v_hba->active_topo,
cfg_login.tx_bb_credit, cfg_login.bbscn);
ret = RETURN_OK;
exit:
kfree(cfg_login_sts);
return ret;
}
unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba,
const void *v_in_mbox,
unsigned short in_size,
union hifc_outmbox_generic_u *out_mbox)
{
void *handle = NULL;
unsigned short out_size = 0;
unsigned long time_out = 0;
int ret = 0;
struct hifc_mbox_header_s *header;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_in_mbox, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, out_mbox, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle,
return UNF_RETURN_ERROR);
header = (struct hifc_mbox_header_s *)v_in_mbox;
out_size = sizeof(union hifc_outmbox_generic_u);
handle = v_hba->hw_dev_handle;
/* Wait for las mailbox completion: */
time_out = wait_for_completion_timeout(
&v_hba->mbox_complete,
(unsigned long)msecs_to_jiffies(HIFC_MBOX_TIME_SEC_MAX * 1000));
if (time_out == UNF_ZERO) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) wait mailbox(0x%x) completion timeout: %d sec",
v_hba->port_cfg.port_id, header->cmnd_type,
HIFC_MBOX_TIME_SEC_MAX);
return UNF_RETURN_ERROR;
}
/* Send Msg to uP Sync: timer 10s */
ret = hifc_msg_to_mgmt_sync(handle, HIFC_MOD_FC, header->cmnd_type,
(void *)v_in_mbox, in_size,
(union hifc_outmbox_generic_u *)out_mbox,
&out_size,
HIFC_MBX_MAX_TIMEOUT);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Port(0x%x) can not send mailbox(0x%x) with ret:%d",
v_hba->port_cfg.port_id, header->cmnd_type, ret);
complete(&v_hba->mbox_complete);
return UNF_RETURN_ERROR;
}
complete(&v_hba->mbox_complete);
return RETURN_OK;
}
unsigned short hifc_get_global_base_qpn(void *v_handle)
{
#define NIC_UP_CMD_GET_GLOBAL_QPN 102
int ret = 0;
unsigned short out_size = 0;
struct hifc_get_global_base_qpn_s qpn_base = { 0 };
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle,
return INVALID_VALUE16);
qpn_base.func_id = hifc_global_func_id(v_handle);
out_size = (u16)sizeof(struct hifc_get_global_base_qpn_s);
/* Send Msg to uP Sync: timer 10s */
ret = hifc_msg_to_mgmt_sync(v_handle,
HIFC_MOD_L2NIC,
NIC_UP_CMD_GET_GLOBAL_QPN,
&qpn_base,
(u16)sizeof(qpn_base),
&qpn_base,
&out_size,
HIFC_MBX_MAX_TIMEOUT);
if (ret || (!out_size) || qpn_base.status) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]hifc_get_global_base_qpn failed, ret %d, out_size %u, qpn_info.ret%u",
ret, out_size, qpn_base.status);
return 0xFFFF;
}
return (u16)(qpn_base.base_qpn);
}
void hifc_initial_dynamic_info(struct hifc_hba_s *v_fc_port)
{
struct hifc_hba_s *hba = v_fc_port;
unsigned long flag = 0;
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return);
spin_lock_irqsave(&hba->hba_lock, flag);
hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN;
hba->active_topo = UNF_ACT_TOP_UNKNOWN;
hba->phy_link = UNF_PORT_LINK_DOWN;
hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT;
hba->loop_map_valid = LOOP_MAP_INVALID;
hba->delay_info.srq_delay_flag = 0;
hba->delay_info.root_rq_rcvd_flag = 0;
spin_unlock_irqrestore(&hba->hba_lock, flag);
}
unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba, void *v_buf_in)
{
#define HIFC_LOOP_MASK 0x1
#define HIFC_LOOPMAP_COUNT 128
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_link_event_s *buf_in = NULL;
buf_in = (struct hifc_link_event_s *)v_buf_in;
v_hba->phy_link = UNF_PORT_LINK_UP;
v_hba->active_port_speed = buf_in->speed;
v_hba->led_states.green_speed_led =
(unsigned char)(buf_in->green_speed_led);
v_hba->led_states.yellow_speed_led =
(unsigned char)(buf_in->yellow_speed_led);
v_hba->led_states.ac_led = (unsigned char)(buf_in->acled);
if ((buf_in->top_type == HIFC_LOOP_MASK) &&
((buf_in->loop_map_info[1] == UNF_FL_PORT_LOOP_ADDR) ||
(buf_in->loop_map_info[2] == UNF_FL_PORT_LOOP_ADDR))) {
v_hba->active_topo = UNF_ACT_TOP_PUBLIC_LOOP; /* Public Loop */
v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */
memcpy(v_hba->loop_map, buf_in->loop_map_info,
HIFC_LOOPMAP_COUNT);
v_hba->loop_map_valid = LOOP_MAP_VALID;
} else if (buf_in->top_type == HIFC_LOOP_MASK) {
v_hba->active_topo = UNF_ACT_TOP_PRIVATE_LOOP;/* Private Loop */
v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */
memcpy(v_hba->loop_map, buf_in->loop_map_info,
HIFC_LOOPMAP_COUNT);
v_hba->loop_map_valid = LOOP_MAP_VALID;
} else {
v_hba->active_topo = UNF_TOP_P2P_MASK; /* P2P_D or P2P_F */
}
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT,
"[event]Port(0x%x) receive link up event(0x%x) with speed(0x%x) uP_topo(0x%x) driver_topo(0x%x)",
v_hba->port_cfg.port_id, buf_in->link_event,
buf_in->speed, buf_in->top_type, v_hba->active_topo);
/* Set clear & flush state */
hifc_set_hba_flush_state(v_hba, UNF_FALSE);
hifc_set_root_sq_flush_state(v_hba, UNF_FALSE);
hifc_set_rport_flush_state(v_hba, UNF_FALSE);
/* Report link up event to COM */
UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport,
UNF_PORT_LINK_UP, &v_hba->active_port_speed);
HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_UP_COUNT);
return ret;
}
unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba, void *v_buf_in)
{
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_link_event_s *buf_in = NULL;
buf_in = (struct hifc_link_event_s *)v_buf_in;
/* 1. Led state setting */
v_hba->led_states.green_speed_led =
(unsigned char)(buf_in->green_speed_led);
v_hba->led_states.yellow_speed_led =
(unsigned char)(buf_in->yellow_speed_led);
v_hba->led_states.ac_led = (unsigned char)(buf_in->acled);
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT,
"[event]Port(0x%x) receive link down event(0x%x) reason(0x%x)",
v_hba->port_cfg.port_id, buf_in->link_event, buf_in->reason);
hifc_initial_dynamic_info(v_hba);
/* 2. set HBA flush state */
hifc_set_hba_flush_state(v_hba, UNF_TRUE);
/* 3. set Root SQ flush state */
hifc_set_root_sq_flush_state(v_hba, UNF_TRUE);
/* 4. set R_Port (parent SQ) flush state */
hifc_set_rport_flush_state(v_hba, UNF_TRUE);
/* 5. Report link down event to COM */
UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_LINK_DOWN, 0);
/* DFX setting */
HIFC_LINK_REASON_STAT(v_hba, buf_in->reason);
HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_DOWN_COUNT);
return ret;
}
unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba, void *v_buf_in)
{
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_link_event_s *buf_in = NULL;
buf_in = (struct hifc_link_event_s *)v_buf_in;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[event]Port(0x%x) receive delete cmd event(0x%x)",
v_hba->port_cfg.port_id, buf_in->link_event);
/* Send buffer clear cmnd */
ret = hifc_clear_fetched_sq_wqe(v_hba);
v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_SCANNING;
HIFC_LINK_EVENT_STAT(v_hba, HIFC_FC_DELETE_CMND_COUNT);
HIFC_REFERNCE_VAR(buf_in, buf_in, ret);
return ret;
}
unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba, void *v_buf_in)
{
#define FC_ERR_LEVEL_DEAD 0
#define FC_ERR_LEVEL_HIGH 1
#define FC_ERR_LEVEL_LOW 2
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_up_error_event_s *buf_in = NULL;
buf_in = (struct hifc_up_error_event_s *)v_buf_in;
if (buf_in->error_type >= HIFC_UP_ERR_BUTT ||
buf_in->error_value >= HIFC_ERR_VALUE_BUTT) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) receive a unsupported UP Error Event Type(0x%x) Value(0x%x).",
v_hba->port_cfg.port_id,
buf_in->error_type,
buf_in->error_value);
return ret;
}
switch (buf_in->error_level) {
case FC_ERR_LEVEL_DEAD:
/* todo: chip reset */
ret = RETURN_OK;
break;
case FC_ERR_LEVEL_HIGH:
/* port reset */
UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport,
UNF_PORT_ABNORMAL_RESET, NULL);
break;
case FC_ERR_LEVEL_LOW:
ret = RETURN_OK;
break;
default:
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) receive a unsupported UP Error Event Level(0x%x), Can not Process.",
v_hba->port_cfg.port_id, buf_in->error_level);
return ret;
}
if (buf_in->error_value < HIFC_ERR_VALUE_BUTT)
HIFC_UP_ERR_EVENT_STAT(v_hba, buf_in->error_value);
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Port(0x%x) process UP Error Event Level(0x%x) Type(0x%x) Value(0x%x) %s.",
v_hba->port_cfg.port_id, buf_in->error_level,
buf_in->error_type, buf_in->error_value,
(ret == UNF_RETURN_ERROR) ? "ERROR" : "OK");
HIFC_REFERNCE_VAR(buf_in, buf_in, ret);
return ret;
}
unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg)
{
struct hifc_hba_s *hba = v_hba;
unsigned int *topo_cfg = v_topo_cfg;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_topo_cfg, return UNF_RETURN_ERROR);
*topo_cfg = hba->port_topo_cfg;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Get topology config: 0x%x.",
*topo_cfg);
return RETURN_OK;
}
unsigned int hifc_get_topo_act(void *v_hba, void *topo_act)
{
struct hifc_hba_s *hba = v_hba;
enum unf_act_topo_e *ret_topo_act = topo_act;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, topo_act, return UNF_RETURN_ERROR);
/* Get topo from low_level */
*ret_topo_act = hba->active_topo;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Get active topology: 0x%x",
*ret_topo_act);
return RETURN_OK;
}
unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa)
{
unsigned long flags = 0;
struct hifc_hba_s *hba = v_hba;
unsigned char *alpa = v_alpa;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_alpa, return UNF_RETURN_ERROR);
spin_lock_irqsave(&hba->hba_lock, flags);
*alpa = hba->active_al_pa;
spin_unlock_irqrestore(&hba->hba_lock, flags);
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Get active AL_PA(0x%x)", *alpa);
return RETURN_OK;
}
unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state)
{
unsigned int ret = RETURN_OK;
struct hifc_hba_s *hba = v_hba;
struct hifc_led_state_s *led_state = v_led_state;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_led_state, return UNF_RETURN_ERROR);
led_state->green_speed_led = hba->led_states.green_speed_led;
led_state->yellow_speed_led = hba->led_states.yellow_speed_led;
led_state->ac_led = hba->led_states.ac_led;
return ret;
}
unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_version)
{
struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port;
struct unf_version_str_s *version =
(struct unf_version_str_s *)v_version;
char *hard_ware_ver = NULL;
HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR);
hard_ware_ver = version->buf;
HIFC_CHECK(INVALID_VALUE32, hard_ware_ver, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR);
hard_ware_ver[UNF_HW_VERSION_LEN - 1] = 0;
return RETURN_OK;
}
unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info)
{
struct unf_lport_sfp_info *sfp_info =
(struct unf_lport_sfp_info *)v_sfp_info;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_fc_port;
struct hifc_inmbox_get_sfp_info_s get_sfp_info;
union hifc_outmbox_generic_u *get_sfp_info_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, sfp_info, return UNF_RETURN_ERROR);
memset(&get_sfp_info, 0, sizeof(get_sfp_info));
get_sfp_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!get_sfp_info_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(get_sfp_info_sts, 0, sizeof(union hifc_outmbox_generic_u));
get_sfp_info.header.cmnd_type = HIFC_MBOX_GET_SFP_INFO;
get_sfp_info.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_sfp_info_s));
get_sfp_info.header.port_id = (hba->port_index);
/* send mailbox and handle the return sts */
if (hifc_mb_send_and_wait_mbox(hba, &get_sfp_info, sizeof(get_sfp_info),
get_sfp_info_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.",
hba->port_cfg.port_id,
get_sfp_info.header.cmnd_type);
goto exit;
}
sfp_info->status = get_sfp_info_sts->get_sfp_info_sts.status;
if (get_sfp_info_sts->get_sfp_info_sts.status != STATUS_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.",
hba->port_cfg.port_id,
get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type,
get_sfp_info_sts->get_sfp_info_sts.status);
goto exit;
}
if (get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type !=
HIFC_MBOX_GET_SFP_INFO_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.",
hba->port_cfg.port_id,
get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type);
goto exit;
}
/* the real sfpinfo is beyond the header of sts */
memcpy(&sfp_info->sfp_eeprom_info,
((unsigned char *)get_sfp_info_sts +
sizeof(get_sfp_info_sts->get_sfp_info_sts)),
sizeof(union unf_sfp_eeprome_info));
ret = RETURN_OK;
exit:
kfree(get_sfp_info_sts);
return ret;
}
unsigned int hifc_get_port_info(void *v_hba)
{
unsigned long flags = 0;
struct hifc_inmbox_get_port_info_s get_port_info;
union hifc_outmbox_generic_u *port_info_sts = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
unsigned int ret = UNF_RETURN_ERROR;
memset(&get_port_info, 0, sizeof(get_port_info));
port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!port_info_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u));
get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO;
get_port_info.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s));
get_port_info.header.port_id = hba->port_index;
if (hifc_mb_send_and_wait_mbox(hba, &get_port_info,
sizeof(get_port_info), port_info_sts) !=
RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"Port(0x%x) send and wait mailbox type(0x%x) failed.",
hba->port_cfg.port_id,
get_port_info.header.cmnd_type);
goto exit;
}
if ((port_info_sts->get_port_info_sts.status != STATUS_OK) ||
(port_info_sts->get_port_info_sts.header.cmnd_type !=
HIFC_MBOX_GET_PORT_INFO_STS)) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.",
hba->port_cfg.port_id,
port_info_sts->get_port_info_sts.header.cmnd_type,
port_info_sts->get_port_info_sts.status);
goto exit;
}
spin_lock_irqsave(&hba->hba_lock, flags);
hba->active_bb_scn = port_info_sts->get_port_info_sts.bbscn;
hba->active_rx_bb_credit =
port_info_sts->get_port_info_sts.non_loop_rx_credit;
spin_unlock_irqrestore(&hba->hba_lock, flags);
ret = RETURN_OK;
exit:
kfree(port_info_sts);
return ret;
}
unsigned int hifc_get_port_current_info(void *v_hba, void *port_info)
{
struct hifc_hba_s *hba = NULL;
struct hifc_inmbox_get_port_info_s get_port_info;
union hifc_outmbox_generic_u *port_info_sts = NULL;
struct unf_get_port_info_argout *current_port_info = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, port_info, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
hba = (struct hifc_hba_s *)v_hba;
current_port_info = (struct unf_get_port_info_argout *)port_info;
memset(&get_port_info, 0, sizeof(get_port_info));
port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!port_info_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u));
get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO;
get_port_info.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s));
get_port_info.header.port_id = hba->port_index;
if (hifc_mb_send_and_wait_mbox(hba, &get_port_info,
sizeof(get_port_info),
port_info_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) send and wait mailbox type(0x%x) failed",
hba->port_cfg.port_id,
get_port_info.header.cmnd_type);
goto exit;
}
if ((port_info_sts->get_port_info_sts.status != STATUS_OK) ||
(port_info_sts->get_port_info_sts.header.cmnd_type !=
HIFC_MBOX_GET_PORT_INFO_STS)) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.",
hba->port_cfg.port_id,
port_info_sts->get_port_info_sts.header.cmnd_type,
port_info_sts->get_port_info_sts.status);
goto exit;
}
current_port_info->sfp_speed =
(unsigned char)port_info_sts->get_port_info_sts.sfp_speed;
current_port_info->present =
(unsigned char)port_info_sts->get_port_info_sts.present;
ret = RETURN_OK;
exit:
kfree(port_info_sts);
return ret;
}
static void hifc_get_fabric_login_params(
struct hifc_hba_s *hba,
struct unf_port_login_parms_s *v_param_addr)
{
unsigned long flag = 0;
spin_lock_irqsave(&hba->hba_lock, flag);
hba->active_topo = v_param_addr->en_act_topo;
hba->compared_ratov_val = v_param_addr->compared_ratov_val;
hba->compared_edtov_val = v_param_addr->compared_edtov_val;
hba->compared_bbscn = v_param_addr->compared_bbscn;
hba->remote_edtov_tag = v_param_addr->remote_edtov_tag;
hba->remote_rttov_tag = v_param_addr->remote_rttov_tag;
hba->remote_bbcredit = v_param_addr->remote_bbcredit;
spin_unlock_irqrestore(&hba->hba_lock, flag);
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%x) topo(0x%x) get fabric params: R_A_TOV(0x%x) E_D_TOV(%u) BB_CREDIT(0x%x) BB_SC_N(0x%x)",
hba->port_cfg.port_id, hba->active_topo,
hba->compared_ratov_val, hba->compared_edtov_val,
hba->remote_bbcredit, hba->compared_bbscn);
}
static void hifc_get_port_login_params(
struct hifc_hba_s *hba,
struct unf_port_login_parms_s *v_param_addr)
{
unsigned long flag = 0;
spin_lock_irqsave(&hba->hba_lock, flag);
hba->compared_ratov_val = v_param_addr->compared_ratov_val;
hba->compared_edtov_val = v_param_addr->compared_edtov_val;
hba->compared_bbscn = v_param_addr->compared_bbscn;
hba->remote_edtov_tag = v_param_addr->remote_edtov_tag;
hba->remote_rttov_tag = v_param_addr->remote_rttov_tag;
hba->remote_bbcredit = v_param_addr->remote_bbcredit;
spin_unlock_irqrestore(&hba->hba_lock, flag);
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) Topo(0x%x) Get Port Params: R_A_TOV(0x%x), E_D_TOV(0x%x), BB_CREDIT(0x%x), BB_SC_N(0x%x).",
hba->port_cfg.port_id, hba->active_topo,
hba->compared_ratov_val, hba->compared_edtov_val,
hba->remote_bbcredit, hba->compared_bbscn);
}
unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in)
{
unsigned int ret = RETURN_OK;
struct hifc_hba_s *hba = v_hba;
struct unf_port_login_parms_s *login_coparms = v_para_in;
UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR);
hifc_get_fabric_login_params(hba, login_coparms);
if ((hba->active_topo == UNF_ACT_TOP_P2P_FABRIC) ||
(hba->active_topo == UNF_ACT_TOP_PUBLIC_LOOP)) {
if (hba->work_mode == HIFC_SMARTIO_WORK_MODE_FC)
ret = hifc_config_login_api(hba, login_coparms);
}
return ret;
}
unsigned int hifc_update_port_param(void *v_hba, void *v_para_in)
{
unsigned int ret = RETURN_OK;
struct hifc_hba_s *hba = v_hba;
struct unf_port_login_parms_s *login_coparms =
(struct unf_port_login_parms_s *)v_para_in;
UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR);
if ((hba->active_topo == UNF_ACT_TOP_PRIVATE_LOOP) ||
(hba->active_topo == UNF_ACT_TOP_P2P_DIRECT)) {
hifc_get_port_login_params(hba, login_coparms);
ret = hifc_config_login_api(hba, login_coparms);
}
hifc_save_login_para_in_sq_info(hba, login_coparms);
return ret;
}
unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code)
{
return RETURN_OK;
}
unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code)
{
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
struct hifc_inmbox_get_err_code_s get_err_code;
union hifc_outmbox_generic_u *err_code_sts = NULL;
struct unf_err_code_s *unf_err_code =
(struct unf_err_code_s *)v_err_code;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, unf_err_code, return UNF_RETURN_ERROR);
memset(&get_err_code, 0, sizeof(get_err_code));
err_code_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!err_code_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(err_code_sts, 0, sizeof(union hifc_outmbox_generic_u));
get_err_code.header.cmnd_type = HIFC_MBOX_GET_ERR_CODE;
get_err_code.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_err_code_s));
if (hifc_mb_send_and_wait_mbox(hba, &get_err_code, sizeof(get_err_code),
err_code_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.",
hba->port_cfg.port_id,
get_err_code.header.cmnd_type);
goto exit;
}
if (err_code_sts->get_err_code_sts.status != STATUS_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Port(0x%x) Receive mailbox type(0x%x) status incorrect, status: 0x%x.",
hba->port_cfg.port_id,
err_code_sts->get_err_code_sts.header.cmnd_type,
err_code_sts->get_err_code_sts.status);
goto exit;
}
unf_err_code->link_fail_count =
err_code_sts->get_err_code_sts.err_code[0];
unf_err_code->loss_of_sync_count =
err_code_sts->get_err_code_sts.err_code[1];
unf_err_code->loss_of_signal_count =
err_code_sts->get_err_code_sts.err_code[2];
unf_err_code->proto_error_count =
err_code_sts->get_err_code_sts.err_code[3];
unf_err_code->bad_rx_char_count =
err_code_sts->get_err_code_sts.err_code[4];
unf_err_code->bad_crc_count =
err_code_sts->get_err_code_sts.err_code[5];
unf_err_code->rx_eo_fa_count =
err_code_sts->get_err_code_sts.err_code[6];
unf_err_code->dis_frame_count =
err_code_sts->get_err_code_sts.err_code[7];
ret = RETURN_OK;
exit:
kfree(err_code_sts);
return ret;
}
unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bbcredit)
{
unsigned int *bb_credit = (unsigned int *)v_bbcredit;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_bbcredit, return UNF_RETURN_ERROR);
if (hba->active_port_speed == UNF_PORT_SPEED_32_G)
*bb_credit = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT;
else if (hba->active_port_speed == UNF_PORT_SPEED_16_G)
*bb_credit = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT;
else
*bb_credit = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT;
return RETURN_OK;
}
unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn)
{
unsigned int *bbscn = (unsigned int *)v_bbscn;
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_bbscn, return UNF_RETURN_ERROR);
*bbscn = hba->port_bbscn_cfg;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, "Return BBSCN(0x%x) to CM",
*bbscn);
return RETURN_OK;
}
unsigned int hifc_get_software_version(void *v_hba, void *v_version)
{
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
struct hifc_inmbox_get_fw_version_s fw_ver;
union hifc_outmbox_generic_u *fw_ver_sts = NULL;
unsigned char *ver = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_version, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
memset(&fw_ver, 0, sizeof(fw_ver));
fw_ver_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC);
if (!fw_ver_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(fw_ver_sts, 0, sizeof(union hifc_outmbox_generic_u));
ver = (unsigned char *)&fw_ver_sts->get_fw_ver_sts;
fw_ver.header.cmnd_type = HIFC_MBOX_GET_FW_VERSION;
fw_ver.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_fw_version_s));
if (hifc_mb_send_and_wait_mbox(hba, &fw_ver, sizeof(fw_ver),
fw_ver_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) can't send and wait mailbox, command type: 0x%x.",
hba->port_cfg.port_id,
fw_ver.header.cmnd_type);
goto exit;
}
if (fw_ver_sts->get_fw_ver_sts.header.cmnd_type !=
HIFC_MBOX_GET_FW_VERSION_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Port(0x%x) recv mailbox type(0x%x) incorrect.",
hba->port_cfg.port_id,
fw_ver_sts->get_fw_ver_sts.header.cmnd_type);
goto exit;
}
if (fw_ver_sts->get_fw_ver_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect.",
hba->port_cfg.port_id,
fw_ver_sts->get_fw_ver_sts.header.cmnd_type,
fw_ver_sts->get_fw_ver_sts.status);
goto exit;
}
memcpy(v_version, ver + HIFC_VER_ADDR_OFFSET,
sizeof(struct hifc_outmbox_get_fw_version_sts_s) -
HIFC_VER_ADDR_OFFSET);
ret = RETURN_OK;
exit:
kfree(fw_ver_sts);
return ret;
}
unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_version)
{
struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port;
struct unf_version_str_s *version =
(struct unf_version_str_s *)v_version;
char *fw_ver = NULL;
HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR);
fw_ver = version->buf;
HIFC_CHECK(INVALID_VALUE32, fw_ver, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR);
fw_ver[UNF_FW_VERSION_LEN - 1] = 0;
return RETURN_OK;
}
unsigned int hifc_get_loop_map(void *v_hba, void *v_buf)
{
unsigned long flags = 0;
struct unf_buf_s *buf = (struct unf_buf_s *)v_buf;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, buf, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, buf->cbuf, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, buf->buf_len, return UNF_RETURN_ERROR);
if (buf->buf_len > UNF_LOOPMAP_COUNT)
return UNF_RETURN_ERROR;
spin_lock_irqsave(&hba->hba_lock, flags);
if (hba->loop_map_valid != LOOP_MAP_VALID) {
spin_unlock_irqrestore(&hba->hba_lock, flags);
return UNF_RETURN_ERROR;
}
memcpy(buf->cbuf, hba->loop_map, buf->buf_len); /* do memcpy */
spin_unlock_irqrestore(&hba->hba_lock, flags);
return RETURN_OK;
}
unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg)
{
struct hifc_hba_s *hba = v_hba;
unsigned int *speed_cfg = v_speed_cfg;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_speed_cfg, return UNF_RETURN_ERROR);
*speed_cfg = hba->port_speed_cfg;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Get config link rate: 0x%x.",
*speed_cfg);
return RETURN_OK;
}
unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act)
{
struct hifc_hba_s *hba = v_hba;
unsigned int *speed_act = v_speed_act;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_speed_act, return UNF_RETURN_ERROR);
*speed_act = hba->active_port_speed;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Get config link rate: 0x%x.",
*speed_act);
return RETURN_OK;
}
unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out)
{
struct hifc_hba_s *hba = v_hba;
int *fec = v_para_out;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, fec, return UNF_RETURN_ERROR);
*fec = (hba->fec_status) ? UNF_TRUE : UNF_FALSE;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Get Port fec: 0x%x.",
(hba->fec_status));
return RETURN_OK;
}
unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in)
{
struct hifc_inmbox_save_hba_info_s *hba_info = NULL;
struct hifc_outmbox_save_hba_info_sts_s *hba_info_sts = NULL;
void *hba_info_addr = v_para_in;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR);
hba_info = vmalloc(sizeof(struct hifc_inmbox_save_hba_info_s));
if (!hba_info)
return UNF_RETURN_ERROR;
hba_info_sts = vmalloc(sizeof(struct hifc_outmbox_save_hba_info_sts_s));
if (!hba_info_sts) {
vfree(hba_info);
return UNF_RETURN_ERROR;
}
memset(hba_info, 0, sizeof(struct hifc_inmbox_save_hba_info_s));
memset(hba_info_sts, 0,
sizeof(struct hifc_outmbox_save_hba_info_sts_s));
hba_info->header.cmnd_type = HIFC_MBOX_SAVE_HBA_INFO;
hba_info->header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_save_hba_info_s));
/* fill mailbox payload */
memcpy(&hba_info->hba_save_info[0], hba_info_addr, SAVE_PORT_INFO_LEN);
/* send & wait mailbox */
if (hifc_mb_send_and_wait_mbox(
hba, hba_info,
sizeof(*hba_info),
(union hifc_outmbox_generic_u *)hba_info_sts)
!= RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x)",
hba->port_cfg.port_id,
hba_info->header.cmnd_type);
vfree(hba_info);
vfree(hba_info_sts);
return UNF_RETURN_ERROR;
}
/* check mailbox rsp status */
if (hba_info_sts->status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error",
hba->port_cfg.port_id,
hba_info_sts->header.cmnd_type,
hba_info_sts->status);
vfree(hba_info);
vfree(hba_info_sts);
return UNF_RETURN_ERROR;
}
/* check mailbox rsp type */
if (hba_info_sts->header.cmnd_type != HIFC_MBOX_SAVE_HBA_INFO_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[err]Port(0x%x) receive mailbox type(0x%x) error",
hba->port_cfg.port_id,
hba_info_sts->header.cmnd_type);
vfree(hba_info);
vfree(hba_info_sts);
return UNF_RETURN_ERROR;
}
memcpy(hba_info_addr, &hba_info_sts->save_hba_info[0],
SAVE_PORT_INFO_LEN - 8);
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"[event]Port(0x%x) save hba info succeed",
hba->port_cfg.port_id);
vfree(hba_info);
vfree(hba_info_sts);
return RETURN_OK;
}
unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba,
unsigned char v_sub_type)
{
struct hifc_inmbox_port_reset_s port_reset;
union hifc_outmbox_generic_u *port_reset_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
memset(&port_reset, 0, sizeof(port_reset));
port_reset_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!port_reset_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(port_reset_sts, 0, sizeof(union hifc_outmbox_generic_u));
port_reset.header.cmnd_type = HIFC_MBOX_PORT_RESET;
port_reset.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_port_reset_s));
port_reset.op_code = v_sub_type;
if (hifc_mb_send_and_wait_mbox(v_hba, &port_reset, sizeof(port_reset),
port_reset_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Port(0x%x) can't send and wait mailbox with command type(0x%x)",
v_hba->port_cfg.port_id,
port_reset.header.cmnd_type);
goto exit;
}
if (port_reset_sts->port_reset_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[warn]Port(0x%x) receive mailbox type(0x%x) status(0x%x) incorrect",
v_hba->port_cfg.port_id,
port_reset_sts->port_reset_sts.header.cmnd_type,
port_reset_sts->port_reset_sts.status);
goto exit;
}
if (port_reset_sts->port_reset_sts.header.cmnd_type !=
HIFC_MBOX_PORT_RESET_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[warn]Port(0x%x) recv mailbox type(0x%x) incorrect",
v_hba->port_cfg.port_id,
port_reset_sts->port_reset_sts.header.cmnd_type);
goto exit;
}
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"[info]Port(0x%x) reset chip mailbox success",
v_hba->port_cfg.port_id);
ret = RETURN_OK;
exit:
kfree(port_reset_sts);
return ret;
}
unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba)
{
int async_ret = RETURN_OK;
struct hifc_inmbx_clear_node_s clear_done;
clear_done.header.cmnd_type = HIFC_MBOX_BUFFER_CLEAR_DONE;
clear_done.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbx_clear_node_s));
clear_done.header.port_id = v_hba->port_index;
async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle,
HIFC_MOD_FC,
HIFC_MBOX_BUFFER_CLEAR_DONE,
&clear_done, sizeof(clear_done));
if (async_ret != 0) {
HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE_FAIL);
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC Port(0x%x) can't send clear done cmd to up, ret:%d",
v_hba->port_cfg.port_id, async_ret);
return UNF_RETURN_ERROR;
}
HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE);
v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE;
v_hba->next_clearing_sq = 0;
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT,
"[info]Port(0x%x) clear done msg(0x%x) sent to up succeed with stage(0x%x)",
v_hba->port_cfg.port_id,
clear_done.header.cmnd_type, v_hba->q_set_stage);
return RETURN_OK;
}
unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba,
unsigned int *v_clear_state)
{
struct hifc_inmbox_get_clear_state_s clr_state;
union hifc_outmbox_generic_u *port_clr_state_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_clear_state, return UNF_RETURN_ERROR);
memset(&clr_state, 0, sizeof(clr_state));
port_clr_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!port_clr_state_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(port_clr_state_sts, 0, sizeof(union hifc_outmbox_generic_u));
clr_state.header.cmnd_type = HIFC_MBOX_GET_CLEAR_STATE;
clr_state.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_clear_state_s));
if (hifc_mb_send_and_wait_mbox(v_hba, &clr_state, sizeof(clr_state),
port_clr_state_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"hifc can't send and wait mailbox, command type: 0x%x",
clr_state.header.cmnd_type);
goto exit;
}
if (port_clr_state_sts->get_clr_state_sts.status != RETURN_OK) {
HIFC_TRACE(
UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x, state 0x%x.",
v_hba->port_cfg.port_id,
port_clr_state_sts->get_clr_state_sts.header.cmnd_type,
port_clr_state_sts->get_clr_state_sts.status,
port_clr_state_sts->get_clr_state_sts.state);
goto exit;
}
if (port_clr_state_sts->get_clr_state_sts.header.cmnd_type !=
HIFC_MBOX_GET_CLEAR_STATE_STS) {
HIFC_TRACE(
UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Port(0x%x) recv mailbox type(0x%x) incorrect.",
v_hba->port_cfg.port_id,
port_clr_state_sts->get_clr_state_sts.header.cmnd_type);
goto exit;
}
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR,
"Port(0x%x) get port clear state 0x%x.",
v_hba->port_cfg.port_id,
port_clr_state_sts->get_clr_state_sts.state);
*v_clear_state = port_clr_state_sts->get_clr_state_sts.state;
ret = RETURN_OK;
exit:
kfree(port_clr_state_sts);
return ret;
}
unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba,
unsigned int v_fec_opcode)
{
struct hifc_inmbox_config_fec_s cfg_fec;
union hifc_outmbox_generic_u *port_fec_state_sts = NULL;
unsigned char op_code = 0;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
memset(&cfg_fec, 0, sizeof(cfg_fec));
port_fec_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!port_fec_state_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(port_fec_state_sts, 0, sizeof(union hifc_outmbox_generic_u));
op_code = (unsigned char)v_fec_opcode;
cfg_fec.header.cmnd_type = HIFC_MBOX_CONFIG_FEC;
cfg_fec.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(HIFC_MBOX_CONFIG_FEC));
cfg_fec.fec_op_code = op_code;
if (hifc_mb_send_and_wait_mbox(v_hba, &cfg_fec, sizeof(cfg_fec),
port_fec_state_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) hifc can't send and wait mailbox, command type: 0x%x",
v_hba->port_cfg.port_id, cfg_fec.header.cmnd_type);
goto exit;
}
if (port_fec_state_sts->config_fec_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.",
v_hba->port_cfg.port_id,
port_fec_state_sts->config_fec_sts.header.cmnd_type,
port_fec_state_sts->config_fec_sts.status);
goto exit;
}
if (port_fec_state_sts->config_fec_sts.header.cmnd_type !=
HIFC_MBOX_CONFIG_FEC_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"Port(0x%x) recv mailbox type(0x%x) incorrect.",
v_hba->port_cfg.port_id,
port_fec_state_sts->config_fec_sts.header.cmnd_type);
goto exit;
}
v_hba->fec_status = v_fec_opcode;
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_MAJOR,
"Port(0x%x) set FEC Status is %u.",
v_hba->port_cfg.port_id, op_code);
ret = RETURN_OK;
exit:
kfree(port_fec_state_sts);
return ret;
}
unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba, int op_code,
unsigned int user_data)
{
struct hifc_inmbox_config_timer_s time_cfg;
union hifc_outmbox_generic_u *time_cfg_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
memset(&time_cfg, 0, sizeof(time_cfg));
time_cfg_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!time_cfg_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed");
return UNF_RETURN_ERROR;
}
memset(time_cfg_sts, 0, sizeof(union hifc_outmbox_generic_u));
time_cfg.header.cmnd_type = HIFC_MBOX_CONFIG_TIMER;
time_cfg.header.length =
HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_timer_s));
time_cfg.op_code = (unsigned short)op_code;
time_cfg.fun_id = hifc_global_func_id(v_hba->hw_dev_handle);
time_cfg.user_data = user_data;
if (hifc_mb_send_and_wait_mbox(v_hba, &time_cfg, sizeof(time_cfg),
time_cfg_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Port(0x%x) hifc can't send and wait mailbox with command type(0x%x)",
v_hba->port_cfg.port_id, time_cfg.header.cmnd_type);
goto exit;
}
if (time_cfg_sts->timer_config_sts.header.cmnd_type !=
HIFC_MBOX_CONFIG_TIMER_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[warn]Port(0x%x) recv mailbox type(0x%x) incorrect",
v_hba->port_cfg.port_id,
time_cfg_sts->timer_config_sts.header.cmnd_type);
goto exit;
}
if (time_cfg_sts->timer_config_sts.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR,
"[warn]Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect",
v_hba->port_cfg.port_id,
time_cfg_sts->timer_config_sts.header.cmnd_type,
time_cfg_sts->timer_config_sts.status);
goto exit;
}
HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR,
"[info]Port(0x%x) notify uP to %s timer success",
v_hba->port_cfg.port_id, op_code ? "open" : "close");
ret = RETURN_OK;
exit:
kfree(time_cfg_sts);
return ret;
}
unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data)
{
struct hifc_hba_s *hba = NULL;
struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL;
union hifc_outmbox_generic_u *flash_data_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR);
hba = (struct hifc_hba_s *)v_hba;
flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s),
GFP_ATOMIC);
if (!flash_data_mgmt) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT,
"can't malloc buff for set flashData.");
return ret;
}
flash_data_sts = kmalloc(sizeof(struct unf_flash_data_mgmt_sts_s),
GFP_ATOMIC);
if (!flash_data_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT,
"can't malloc buff for set flashData sts.");
kfree(flash_data_mgmt);
return ret;
}
memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s));
memset(flash_data_sts, 0, sizeof(struct unf_flash_data_mgmt_sts_s));
flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT;
flash_data_mgmt->mbox_head.length = 1; /* not used */
flash_data_mgmt->mbox_head.op_code = 0; /* read config */
if (hifc_mb_send_and_wait_mbox(
hba, flash_data_mgmt,
sizeof(struct unf_mbox_flash_data_mgmt_s),
flash_data_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"hifc can't send and wait mailbox, command type: 0x%x.",
flash_data_mgmt->mbox_head.cmnd_type);
goto exit;
}
if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) mailbox status incorrect status(0x%x) .",
hba->port_cfg.port_id,
flash_data_sts->flash_data_sts.mbox_head.status);
goto exit;
}
if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type !=
HIFC_MBOX_FLASH_DATA_MGMT_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) receive mailbox type incorrect type: 0x%x.",
hba->port_cfg.port_id,
flash_data_sts->flash_data_sts.mbox_head.cmnd_type);
goto exit;
}
memcpy((unsigned char *)v_flash_data,
(unsigned char *)&flash_data_sts->flash_data_sts.flash_data,
sizeof(struct unf_flash_data_s));
ret = RETURN_OK;
exit:
kfree(flash_data_mgmt);
kfree(flash_data_sts);
return ret;
}
unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data)
{
struct hifc_hba_s *hba = NULL;
struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL;
union hifc_outmbox_generic_u *flash_data_sts = NULL;
unsigned int ret = UNF_RETURN_ERROR;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR);
hba = (struct hifc_hba_s *)v_hba;
flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s),
GFP_ATOMIC);
if (!flash_data_mgmt) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"can't malloc buff for set flashData.");
return ret;
}
flash_data_sts = kmalloc(sizeof(union hifc_outmbox_generic_u),
GFP_ATOMIC);
if (!flash_data_sts) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"can't malloc buff for set flashData sts.");
kfree(flash_data_mgmt);
return ret;
}
memset(flash_data_sts, 0, sizeof(union hifc_outmbox_generic_u));
memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s));
flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT;
flash_data_mgmt->mbox_head.length = 1; /* not used */
flash_data_mgmt->mbox_head.op_code = 2; /* flash config */
if (hifc_mb_send_and_wait_mbox(
hba, flash_data_mgmt,
sizeof(struct unf_mbox_flash_data_mgmt_s),
flash_data_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"hifc can't send and wait mailbox, command type: 0x%x.",
flash_data_sts->flash_data_sts.mbox_head.cmnd_type);
goto END;
}
if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"Port(0x%x) mailbox status incorrect status(0x%x) .",
hba->port_cfg.port_id,
flash_data_sts->flash_data_sts.mbox_head.status);
goto END;
}
if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type !=
HIFC_MBOX_FLASH_DATA_MGMT_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"Port(0x%x) receive mailbox type incorrect type: 0x%x.",
hba->port_cfg.port_id,
flash_data_sts->flash_data_sts.mbox_head.cmnd_type);
goto END;
}
flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT;
flash_data_mgmt->mbox_head.length = 1; /* not used */
flash_data_mgmt->mbox_head.op_code = 1; /* write config */
memcpy(&flash_data_mgmt->flash_data,
(unsigned char *)v_flash_data, sizeof(struct unf_flash_data_s));
if (hifc_mb_send_and_wait_mbox(
hba, flash_data_mgmt,
sizeof(struct unf_mbox_flash_data_mgmt_s),
flash_data_sts) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"hifc can't send and wait mailbox, command type: 0x%x.",
flash_data_sts->flash_data_sts.mbox_head.cmnd_type);
goto END;
}
if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"Port(0x%x) mailbox status incorrect status(0x%x) .",
hba->port_cfg.port_id,
flash_data_sts->flash_data_sts.mbox_head.status);
goto END;
}
if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type !=
HIFC_MBOX_FLASH_DATA_MGMT_STS) {
HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT,
"Port(0x%x) receive mailbox type incorrect type: 0x%x.",
hba->port_cfg.port_id,
flash_data_sts->flash_data_sts.mbox_head.cmnd_type);
goto END;
}
ret = RETURN_OK;
END:
kfree(flash_data_mgmt);
kfree(flash_data_sts);
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_CHIPITF_H__
#define __HIFC_CHIPITF_H__
#include "unf_log.h"
#include "hifc_utils.h"
#include "hifc_module.h"
#include "hifc_service.h"
/* CONF_API_CMND */
#define HIFC_MBOX_CONFIG_API 0x00
#define HIFC_MBOX_CONFIG_API_STS 0xA0
/* GET_CHIP_INFO_API_CMD */
#define HIFC_MBOX_GET_CHIP_INFO 0x01
#define HIFC_MBOX_GET_CHIP_INFO_STS 0xA1
/* PORT_RESET */
#define HIFC_MBOX_PORT_RESET 0x02
#define HIFC_MBOX_PORT_RESET_STS 0xA2
/* SFP_SWITCH_API_CMND */
#define HIFC_MBOX_PORT_SWITCH 0x03
#define HIFC_MBOX_PORT_SWITCH_STS 0xA3
/* GET_SFP_INFO */
#define HIFC_MBOX_GET_SFP_INFO 0x04
#define HIFC_MBOX_GET_SFP_INFO_STS 0xA4
/* CONF_AF_LOGIN_API_CMND */
#define HIFC_MBOX_CONFIG_LOGIN_API 0x06
#define HIFC_MBOX_CONFIG_LOGIN_API_STS 0xA6
/* BUFFER_CLEAR_DONE_CMND */
#define HIFC_MBOX_BUFFER_CLEAR_DONE 0x07
#define HIFC_MBOX_BUFFER_CLEAR_DONE_STS 0xA7
#define HIFC_MBOX_GET_ERR_CODE 0x08
#define HIFC_MBOX_GET_ERR_CODE_STS 0xA8
#define HIFC_MBOX_GET_UP_STATE 0x09
#define HIFC_MBOX_GET_UP_STATE_STS 0xA9
/* LOOPBACK MODE */
#define HIFC_MBOX_LOOPBACK_MODE 0x0A
#define HIFC_MBOX_LOOPBACK_MODE_STS 0xAA
/* REG RW MODE */
#define HIFC_MBOX_REG_RW_MODE 0x0B
#define HIFC_MBOX_REG_RW_MODE_STS 0xAB
/* GET CLEAR DONE STATE */
#define HIFC_MBOX_GET_CLEAR_STATE 0x0E
#define HIFC_MBOX_GET_CLEAR_STATE_STS 0xAE
/* GET UP & UCODE VER */
#define HIFC_MBOX_GET_FW_VERSION 0x0F
#define HIFC_MBOX_GET_FW_VERSION_STS 0xAF
/* CONFIG TIMER */
#define HIFC_MBOX_CONFIG_TIMER 0x10
#define HIFC_MBOX_CONFIG_TIMER_STS 0xB0
/* CONFIG SRQC */
#define HIFC_MBOX_CONFIG_SRQC 0x11
#define HIFC_MBOX_CONFIG_SRQC_STS 0xB1
/* Led Test */
#define HIFC_MBOX_LED_TEST 0x12
#define HIFC_MBOX_LED_TEST_STS 0xB2
/* set esch */
#define HIFC_MBOX_SET_ESCH 0x13
#define HIFC_MBOX_SET_ESCH_STS 0xB3
/* set get tx serdes */
#define HIFC_MBOX_SET_GET_SERDES_TX 0x14
#define HIFC_MBOX_SET_GET_SERDES_TX_STS 0xB4
/* get rx serdes */
#define HIFC_MBOX_GET_SERDES_RX 0x15
#define HIFC_MBOX_GET_SERDES_RX_STS 0xB5
/* i2c read write */
#define HIFC_MBOX_I2C_WR_RD 0x16
#define HIFC_MBOX_I2C_WR_RD_STS 0xB6
/* Set FEC Enable */
#define HIFC_MBOX_CONFIG_FEC 0x17
#define HIFC_MBOX_CONFIG_FEC_STS 0xB7
/* GET UCODE STATS CMD */
#define HIFC_MBOX_GET_UCODE_STAT 0x18
#define HIFC_MBOX_GET_UCODE_STAT_STS 0xB8
/* gpio read write */
#define HIFC_MBOX_GPIO_WR_RD 0x19
#define HIFC_MBOX_GPIO_WR_RD_STS 0xB9
/* GET PORT INFO CMD */
#define HIFC_MBOX_GET_PORT_INFO 0x20
#define HIFC_MBOX_GET_PORT_INFO_STS 0xC0
/* save hba info CMD */
#define HIFC_MBOX_SAVE_HBA_INFO 0x24
#define HIFC_MBOX_SAVE_HBA_INFO_STS 0xc4
#define HIFC_MBOX_FLASH_DATA_MGMT 0x25
#define HIFC_MBOX_FLASH_DATA_MGMT_STS 0xc5
/* FCOE: DRV->UP */
#define HIFC_MBOX_SEND_ELS_CMD 0x2A
#define HIFC_MBOX_SEND_VPORT_INFO 0x2B
/* FC: UP->DRV */
#define HIFC_MBOX_RECV_FC_LINKUP 0x40
#define HIFC_MBOX_RECV_FC_LINKDOWN 0x41
#define HIFC_MBOX_RECV_FC_DELCMD 0x42
#define HIFC_MBOX_RECV_FC_ERROR 0x43
#define LOOP_MAP_VALID 1
#define LOOP_MAP_INVALID 0
#define HIFC_MBOX_SIZE 1024
#define HIFC_MBOX_HEADER_SIZE 4
#define ATUOSPEED 1
#define FIXEDSPEED 0
#define UNDEFINEOPCODE 0
#define VALUEMASK_L 0x00000000FFFFFFFF
#define VALUEMASK_H 0xFFFFFFFF00000000
#define STATUS_OK 0
#define STATUS_FAIL 1
enum hifc_drv_2_up_unblock_msg_cmd_code_e {
HIFC_SEND_ELS_CMD,
HIFC_SEND_ELS_CMD_FAIL,
HIFC_RCV_ELS_CMD_RSP,
HIFC_SEND_CONFIG_LOGINAPI,
HIFC_SEND_CONFIG_LOGINAPI_FAIL,
HIFC_RCV_CONFIG_LOGIN_API_RSP,
HIFC_SEND_CLEAR_DONE,
HIFC_SEND_CLEAR_DONE_FAIL,
HIFC_RCV_CLEAR_DONE_RSP,
HIFC_SEND_VPORT_INFO_DONE,
HIFC_SEND_VPORT_INFO_FAIL,
HIFC_SEND_VPORT_INFO_RSP,
HIFC_MBOX_CMD_BUTT
};
/* up to driver handle templete */
struct hifc_up_2_drv_msg_handle_s {
unsigned char cmd;
unsigned int (*pfn_hifc_msg_up2drv_handler)(struct hifc_hba_s *v_hba,
void *v_buf_in);
};
/* Mbox Common Header */
struct hifc_mbox_header_s {
unsigned char cmnd_type;
unsigned char length;
unsigned char port_id;
unsigned char reserved;
};
/* open or close the sfp */
struct hifc_inbox_port_switch_s {
struct hifc_mbox_header_s header;
unsigned char op_code;
unsigned char port_type;
unsigned short reserved;
unsigned char host_id;
unsigned char pf_id;
unsigned char fcoe_mode;
unsigned char reserved2;
unsigned short conf_vlan;
unsigned short reserved3;
unsigned long long sys_port_wwn;
unsigned long long sys_node_name;
};
struct hifc_outbox_port_switch_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
/* config API */
struct hifc_inbox_config_api_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 8;
unsigned int reserved1 : 24;
unsigned char topy_mode;
unsigned char sfp_speed;
unsigned char max_speed;
unsigned char hard_alpa;
unsigned char port_name[UNF_WWN_LEN];
unsigned int slave : 1;
unsigned int auto_sneg : 1;
unsigned int reserved2 : 30;
unsigned int rx_bbcredit_32g : 16; /* 160 */
unsigned int rx_bbcredit_16g : 16; /* 80 */
unsigned int rx_bbcredit_842g : 16; /* 50 */
unsigned int rdy_cnt_bf_fst_frm : 16; /* 8 */
unsigned int esch_value_32g;
unsigned int esch_value_16g;
unsigned int esch_value_8g;
unsigned int esch_value_4g;
unsigned int esch_value_2g;
unsigned int esch_bust_size;
};
struct hifc_outbox_config_api_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
/* Get chip info */
struct hifc_inbox_get_chip_info_s {
struct hifc_mbox_header_s header;
};
struct hifc_outbox_get_chip_info_sts_s {
struct hifc_mbox_header_s header;
unsigned char status;
unsigned char board_type;
unsigned char rvsd;
unsigned char tape_support : 1;
unsigned char reserved : 7;
unsigned long long wwpn;
unsigned long long wwnn;
unsigned long long sys_mac;
};
/* Get reg info */
struct hifc_inmbox_get_reg_info_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 1;
unsigned int reg_len : 8;
unsigned int rsvd : 23;
unsigned int reg_addr;
unsigned int reg_value_l32;
unsigned int reg_value_h32;
unsigned int rvsd[27];
};
/* Get reg info sts */
struct hifc_outmbox_get_reg_info_sts_s {
struct hifc_mbox_header_s header;
unsigned short rvsd0;
unsigned char rvsd1;
unsigned char status;
unsigned int reg_value_l32;
unsigned int reg_value_h32;
unsigned int rvsd[28];
};
/* Config login API */
struct hifc_inmbox_config_login_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 8;
unsigned int reserved1 : 24;
unsigned short tx_bb_credit;
unsigned short reserved2;
unsigned int rtov;
unsigned int etov;
unsigned int rt_tov_tag : 1;
unsigned int ed_tov_tag : 1;
unsigned int bb_credit : 6;
unsigned int bbscn : 8;
unsigned int lr_flag : 16;
};
struct hifc_outmbox_config_login_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
/* port reset */
#define HIFC_MBOX_SUBTYPE_LIGHT_RESET 0x0
#define HIFC_MBOX_SUBTYPE_HEAVY_RESET 0x1
struct hifc_inmbox_port_reset_s {
struct hifc_mbox_header_s header;
unsigned int op_code : 8;
unsigned int reserved1 : 24;
};
struct hifc_outmbox_port_reset_sts_s {
struct hifc_mbox_header_s header;
unsigned short reserved;
unsigned char reserved2;
unsigned char status;
};
struct hifc_inmbox_get_sfp_info_s {
struct hifc_mbox_header_s header;
};
struct hifc_outmbox_get_sfp_info_sts_s {
struct hifc_mbox_header_s header;
unsigned int rcvd : 8;
unsigned int length : 16;
unsigned int status : 8;
};
/* get and clear error code */
struct hifc_inmbox_get_err_code_s {
struct hifc_mbox_header_s header;
};
struct hifc_outmbox_get_err_code_sts_s {
struct hifc_mbox_header_s header;
unsigned short rsvd;
unsigned char rsvd2;
unsigned char status;
unsigned int err_code[8];
};
/* uP-->Driver asyn event API */
struct hifc_link_event_s {
struct hifc_mbox_header_s header;
unsigned char link_event;
unsigned char reason;
unsigned char speed;
unsigned char top_type;
unsigned char alpa_value;
unsigned char reserved1;
unsigned short paticpate : 1;
unsigned short acled : 1;
unsigned short yellow_speed_led : 1;
unsigned short green_speed_led : 1;
unsigned short reserved : 12;
unsigned char loop_map_info[128];
};
enum hifc_up_err_type_e {
HIFC_UP_ERR_DRV_PARA = 0,
HIFC_UP_ERR_SFP = 1,
HIFC_UP_ERR_32G_PUB = 2,
HIFC_UP_ERR_32G_UA = 3,
HIFC_UP_ERR_32G_MAC = 4,
HIFC_UP_ERR_NON32G_DFX = 5,
HIFC_UP_ERR_NON32G_MAC = 6,
HIFC_UP_ERR_BUTT
};
enum hifc_up_err_value_e {
/* ERR type 0 */
HIFC_DRV_2_UP_PARA_ERR = 0,
/* ERR type 1 */
HIFC_SFP_SPEED_ERR,
/* ERR type 2 */
HIFC_32GPUB_UA_RXESCH_FIFO_OF,
HIFC_32GPUB_UA_RXESCH_FIFO_UCERR,
/* ERR type 3 */
HIFC_32G_UA_UATX_LEN_ABN,
HIFC_32G_UA_RXAFIFO_OF,
HIFC_32G_UA_TXAFIFO_OF,
HIFC_32G_UA_RXAFIFO_UCERR,
HIFC_32G_UA_TXAFIFO_UCERR,
/* ERR type 4 */
HIFC_32G_MAC_RX_BBC_FATAL,
HIFC_32G_MAC_TX_BBC_FATAL,
HIFC_32G_MAC_TXFIFO_UF,
HIFC_32G_MAC_PCS_TXFIFO_UF,
HIFC_32G_MAC_RXBBC_CRDT_TO,
HIFC_32G_MAC_PCS_RXAFIFO_OF,
HIFC_32G_MAC_PCS_TXFIFO_OF,
HIFC_32G_MAC_FC2P_RXFIFO_OF,
HIFC_32G_MAC_FC2P_TXFIFO_OF,
HIFC_32G_MAC_FC2P_CAFIFO_OF,
HIFC_32G_MAC_PCS_RXRSFECM_UCEER,
HIFC_32G_MAC_PCS_RXAFIFO_UCEER,
HIFC_32G_MAC_PCS_TXFIFO_UCEER,
HIFC_32G_MAC_FC2P_RXFIFO_UCEER,
HIFC_32G_MAC_FC2P_TXFIFO_UCEER,
/* ERR type 5 */
HIFC_NON32G_DFX_FC1_DFX_BF_FIFO,
HIFC_NON32G_DFX_FC1_DFX_BP_FIFO,
HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR,
HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO,
HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO,
HIFC_NON32G_DFX_FC1_ERR_R_RDY,
/* ERR type 6 */
HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR,
HIFC_ERR_VALUE_BUTT
};
struct hifc_up_error_event_s {
struct hifc_mbox_header_s header;
unsigned char link_event;
unsigned char error_level;
unsigned char error_type;
unsigned char error_value;
};
struct hifc_inmbx_clear_node_s {
struct hifc_mbox_header_s header;
};
struct hifc_inmbox_get_clear_state_s {
struct hifc_mbox_header_s header;
unsigned int resvd[31];
};
struct hifc_outmbox_get_clear_state_sts_s {
struct hifc_mbox_header_s header;
unsigned short rsvd;
unsigned char state; /* 1--clear doing. 0---clear done. */
unsigned char status; /* 0--ok,!0---fail */
unsigned int resvd[30];
};
#define HIFC_FIP_MODE_VN2VF 0
#define HIFC_FIP_MODE_VN2VN 1
/* get port state */
struct hifc_inmbox_get_port_info_s {
struct hifc_mbox_header_s header;
};
/* save hba info */
struct hifc_inmbox_save_hba_info_s {
struct hifc_mbox_header_s header;
unsigned int hba_save_info[254];
};
struct hifc_outmbox_get_port_info_sts_s {
struct hifc_mbox_header_s header;
unsigned int status : 8;
unsigned int fec_vis_tts_16g : 8;
unsigned int bbscn : 8;
unsigned int loop_credit : 8;
unsigned int non_loop_rx_credit : 8;
unsigned int non_loop_tx_credit : 8;
unsigned int sfp_speed : 8;
unsigned int present : 8;
};
struct hifc_outmbox_save_hba_info_sts_s {
struct hifc_mbox_header_s header;
unsigned short rsvd1;
unsigned char rsvd2;
unsigned char status;
unsigned int rsvd3;
unsigned int save_hba_info[252];
};
#define HIFC_VER_ADDR_OFFSET (8)
struct hifc_inmbox_get_fw_version_s {
struct hifc_mbox_header_s header;
};
struct hifc_outmbox_get_fw_version_sts_s {
struct hifc_mbox_header_s header;
unsigned char status;
unsigned char rsv[3];
unsigned char ucode_ver[HIFC_VER_LEN];
unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char up_ver[HIFC_VER_LEN];
unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char boot_ver[HIFC_VER_LEN];
unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN];
};
/* Set Fec Enable */
struct hifc_inmbox_config_fec_s {
struct hifc_mbox_header_s header;
unsigned char fec_op_code;
unsigned char rsv0;
unsigned short rsv1;
};
struct hifc_outmbox_config_fec_sts_s {
struct hifc_mbox_header_s header;
unsigned short usrsv0;
unsigned char ucrsv1;
unsigned char status;
};
struct hifc_inmbox_config_timer_s {
struct hifc_mbox_header_s header;
unsigned short op_code;
unsigned short fun_id;
unsigned int user_data;
};
struct hifc_outmbox_config_timer_sts_s {
struct hifc_mbox_header_s header;
unsigned char status;
unsigned char rsv[3];
};
union hifc_outmbox_generic_u {
struct {
struct hifc_mbox_header_s header;
unsigned int rsvd[(HIFC_MBOX_SIZE - HIFC_MBOX_HEADER_SIZE) /
sizeof(unsigned int)];
} generic;
struct hifc_outbox_port_switch_sts_s port_switch_sts;
struct hifc_outbox_config_api_sts_s config_api_sts;
struct hifc_outbox_get_chip_info_sts_s get_chip_info_sts;
struct hifc_outmbox_get_reg_info_sts_s get_reg_info_sts;
struct hifc_outmbox_config_login_sts_s config_login_sts;
struct hifc_outmbox_port_reset_sts_s port_reset_sts;
struct hifc_outmbox_get_sfp_info_sts_s get_sfp_info_sts;
struct hifc_outmbox_get_err_code_sts_s get_err_code_sts;
struct hifc_outmbox_get_clear_state_sts_s get_clr_state_sts;
struct hifc_outmbox_get_fw_version_sts_s get_fw_ver_sts;
struct hifc_outmbox_config_fec_sts_s config_fec_sts;
struct hifc_outmbox_config_timer_sts_s timer_config_sts;
struct hifc_outmbox_get_port_info_sts_s get_port_info_sts;
struct unf_flash_data_mgmt_sts_s flash_data_sts;
};
unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac);
unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba);
unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on);
unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act);
unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg);
unsigned int hifc_get_loop_map(void *v_hba, void *v_buf);
unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_ver);
unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bb_credit);
unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn);
unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code);
unsigned int hifc_get_port_current_info(void *v_hba, void *v_port_info);
unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out);
unsigned int hifc_get_software_version(void *v_fc_port, void *v_ver);
unsigned int hifc_get_port_info(void *v_hba);
unsigned int hifc_rw_reg(void *v_hba, void *v_params);
unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code);
unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info);
unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_ver);
unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state);
unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa);
unsigned int hifc_get_topo_act(void *v_hba, void *v_topo_act);
unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg);
unsigned int hifc_config_login_api(
struct hifc_hba_s *v_hba,
struct unf_port_login_parms_s *v_login_parms);
unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba,
const void *v_in_mbox,
unsigned short in_size,
union hifc_outmbox_generic_u
*v_out_mbox);
void hifc_up_msg_2_driver_proc(void *v_hwdev_handle,
void *v_pri_handle,
unsigned char v_cmd,
void *v_buf_in,
unsigned short v_in_size,
void *v_buf_out,
unsigned short *v_out_size);
unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba,
unsigned char v_sub_type);
unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba);
unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in);
unsigned int hifc_update_port_param(void *v_hba, void *v_para_in);
unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba,
unsigned int *v_clear_state);
unsigned short hifc_get_global_base_qpn(void *v_handle);
unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba,
unsigned int v_fec_opcode);
unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba,
int v_opcode,
unsigned int v_user_data);
unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in);
unsigned int hifc_get_chip_capability(void *hw_dev_handle,
struct hifc_chip_info_s *v_chip_info);
unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data);
unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "hifc_module.h"
#include "hifc_chipitf.h"
#include "hifc_io.h"
#include "hifc_portmng.h"
#include "hifc_lld.h"
#include "hifc_cqm_object.h"
#include "hifc_cqm_main.h"
#include "hifc_mgmt.h"
#include "hifc_hba.h"
struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM];
unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM / HIFC_PORT_NUM_PER_TABLE];
static unsigned long card_num_bit_map[HIFC_MAX_PROBE_PORT_NUM /
HIFC_PORT_NUM_PER_TABLE];
static struct hifc_card_num_manage_s card_num_manage[HIFC_MAX_CARD_NUM];
/* probe global lock */
spinlock_t probe_spin_lock;
unsigned int max_parent_qpc_num;
static unsigned int hifc_port_config_set(void *v_hba,
enum unf_port_config_set_op_e op_code,
void *v_var_in);
static unsigned int hifc_port_config_get(void *v_hba,
enum unf_port_config_get_op_e op_code,
void *param_out);
static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in);
static unsigned int hifc_get_hba_pcie_link_state(void *v_hba,
void *v_link_state);
struct service_register_template_s service_cqm_temp = {
.scq_ctx_size = HIFC_SCQ_CNTX_SIZE,
/* srq, scq context_size configuration */
.srq_ctx_size = HIFC_SRQ_CNTX_SIZE,
/* the API of asynchronous event from TILE to driver */
.aeq_callback = hifc_process_aeqe,
};
/* default configuration: auto speed, auto topology, INI+TGT */
static struct unf_cfg_item_s hifc_port_cfg_parm[] = {
{ "port_id", 0, 0x110000, 0xffffff},
/* port mode:INI(0x20), TGT(0x10), BOTH(0x30) */
{ "port_mode", 0, 0x20, 0xff},
/* port topology, 0x3: loop, 0xc:p2p, 0xf:auto ,0x10:vn2vn */
{ "port_topology", 0, 0xf, 0x20},
/* alpa address of port */
{ "port_alpa", 0, 0xdead, 0xffff},
/* queue depth of originator registered to SCSI midlayer */
{ "max_queue_depth", 0, 512, 512},
{ "sest_num", 0, 4096, 4096},
{ "max_login", 0, 2048, 2048},
/* nodename from 32 bit to 64 bit */
{ "node_name_high", 0, 0x1000286e, 0xffffffff},
/* nodename from 0 bit to 31 bit */
{ "node_name_low", 0, 0xd4bbf12f, 0xffffffff},
/* portname from 32 bit to 64 bit */
{ "port_name_high", 0, 0x2000286e, 0xffffffff},
/* portname from 0 bit to 31 bit */
{ "port_name_low", 0, 0xd4bbf12f, 0xffffffff},
/* port speed 0:auto 1:1Gbps 2:2Gbps 3:4Gbps 4:8Gbps 5:16Gbps */
{ "port_speed", 0, 0, 32},
/* unit: us */
{ "interrupt_delay", 0, 0, 100},
{ "tape_support", 0, 0, 1}, /* tape support */
{ "End", 0, 0, 0}
};
struct unf_low_level_function_op_s hifc_fun_op = {
.low_level_type = UNF_HIFC_FC,
.name = "HIFC",
/* XID allocated from CM level */
.xchg_mgr_type = UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE,
.abts_xchg = UNF_NO_EXTRA_ABTS_XCHG,
.pass_through_flag = UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN,
.support_max_npiv_num = UNF_HIFC_MAXNPIV_NUM,
.chip_id = 0,
.support_max_speed = UNF_PORT_SPEED_32_G,
.support_max_rport = UNF_HIFC_MAXRPORT_NUM,
.sfp_type = UNF_PORT_TYPE_FC_SFP,
.rport_release_type = UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC,
.sirt_page_mode = UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG,
/* Link service */
.service_op = {
.pfn_unf_els_send = hifc_send_els_cmnd,
.pfn_unf_bls_send = hifc_send_bls_cmnd,
.pfn_unf_gs_send = hifc_send_gs_cmnd,
.pfn_unf_cmnd_send = hifc_send_scsi_cmnd,
.pfn_unf_release_rport_res = hifc_free_parent_resource,
.pfn_unf_flush_ini_resp_que = hifc_flush_ini_resp_queue,
.pfn_unf_alloc_rport_res = hifc_alloc_parent_resource,
.pfn_unf_rport_session_rst = hifc_rport_session_rst,
},
/* Port Mgr */
.port_mgr_op = {
.pfn_ll_port_config_set = hifc_port_config_set,
.pfn_ll_port_config_get = hifc_port_config_get,
.pfn_ll_port_diagnose = hifc_port_diagnose,
}
};
struct hifc_port_config_op_s {
enum unf_port_config_set_op_e op_code;
unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para_in);
};
struct hifc_port_config_op_s hifc_config_set_op[] = {
{ UNF_PORT_CFG_SET_SPEED, hifc_set_port_speed },
{ UNF_PORT_CFG_SET_TOPO, hifc_set_port_topo },
{ UNF_PORT_CFG_SET_BBSCN, hifc_set_port_bbscn },
{ UNF_PORT_CFG_SET_SFP_SWITCH, hifc_sfp_switch },
{ UNF_PORT_CFG_SET_PORT_SWITCH, hifc_sfp_switch },
{ UNF_PORT_CFG_SET_PORT_STATE, hifc_set_port_state },
{ UNF_PORT_CFG_UPDATE_WWN, NULL },
{ UNF_PORT_CFG_SET_FCP_CONF, hifc_set_port_fcp_conf },
{ UNF_PORT_CFG_SET_LOOP_ROLE, hifc_set_loop_role },
{ UNF_PORT_CFG_SET_MAX_SUPPORT_SPEED, hifc_set_max_support_speed },
{ UNF_PORT_CFG_UPDATE_FABRIC_PARAM, hifc_update_fabric_param },
{ UNF_PORT_CFG_UPDATE_PLOGI_PARAM, hifc_update_port_param },
{ UNF_PORT_CFG_UPDATE_FDISC_PARAM, NULL },
{ UNF_PORT_CFG_SAVE_HBA_INFO, hifc_save_hba_info },
{ UNF_PORT_CFG_SET_HBA_BASE_INFO, hifc_set_hba_base_info },
{ UNF_PORT_CFG_SET_FLASH_DATA_INFO, hifc_set_flash_data },
{ UNF_PORT_CFG_SET_BUTT, NULL }
};
struct hifc_port_cfg_get_op_s {
enum unf_port_config_get_op_e op_code;
unsigned int (*pfn_hifc_operation)(void *v_hba, void *param_out);
};
struct hifc_port_cfg_get_op_s hifc_config_get_op[] = {
{ UNF_PORT_CFG_GET_SPEED_CFG, hifc_get_speed_cfg },
{ UNF_PORT_CFG_GET_SPEED_ACT, hifc_get_speed_act },
{ UNF_PORT_CFG_GET_TOPO_CFG, hifc_get_topo_cfg },
{ UNF_PORT_CFG_GET_TOPO_ACT, hifc_get_topo_act },
{ UNF_PORT_CFG_GET_LOOP_MAP, hifc_get_loop_map },
{ UNF_PORT_CFG_GET_SFP_PRESENT, NULL },
{ UNF_PORT_CFG_GET_SFP_INFO, hifc_get_sfp_info },
{ UNF_PORT_CFG_GET_FW_VER, hifc_get_firmware_version },
{ UNF_PORT_CFG_GET_HW_VER, hifc_get_hardware_version },
{ UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, hifc_get_work_bale_bbcredit },
{ UNF_PORT_CFG_GET_WORKBALE_BBSCN, hifc_get_work_bale_bbscn },
{ UNF_PORT_CFG_GET_LOOP_ALPA, hifc_get_loop_alpa },
{ UNF_PORT_CFG_GET_MAC_ADDR, hifc_get_chip_msg },
{ UNF_PORT_CFG_CLR_LESB, hifc_clear_port_error_code },
{ UNF_PORT_CFG_GET_LESB_THEN_CLR, hifc_get_and_clear_port_error_code},
{ UNF_PORT_CFG_GET_PORT_INFO, hifc_get_port_current_info },
{ UNF_PORT_CFG_GET_LED_STATE, hifc_get_lport_led },
{ UNF_PORT_CFG_GET_FEC, hifc_get_port_fec },
{ UNF_PORT_CFG_GET_PCIE_LINK_STATE, hifc_get_hba_pcie_link_state },
{ UNF_PORT_CFG_GET_FLASH_DATA_INFO, hifc_get_flash_data },
{ UNF_PORT_CFG_GET_BUTT, NULL }
};
static unsigned int hifc_port_config_set(void *v_phba,
enum unf_port_config_set_op_e op_code,
void *v_var_in)
{
unsigned int op_idx = 0;
HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR);
for (op_idx = 0;
op_idx < sizeof(hifc_config_set_op) /
sizeof(struct hifc_port_config_op_s);
op_idx++) {
if (op_code == hifc_config_set_op[op_idx].op_code) {
if (!hifc_config_set_op[op_idx].pfn_hifc_operation) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Null operation for configuration, opcode(0x%x), operation ID(0x%x)",
op_code, op_idx);
return UNF_RETURN_ERROR;
} else {
return hifc_config_set_op[op_idx].pfn_hifc_operation(v_phba, v_var_in);
}
}
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]No operation code for configuration, opcode(0x%x)",
op_code);
return UNF_RETURN_ERROR;
}
static unsigned int hifc_port_config_get(void *v_phba,
enum unf_port_config_get_op_e op_code,
void *v_para_out)
{
unsigned int op_idx = 0;
HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR);
for (op_idx = 0;
op_idx < sizeof(hifc_config_get_op) /
sizeof(struct hifc_port_cfg_get_op_s);
op_idx++) {
if (op_code == hifc_config_get_op[op_idx].op_code) {
if (!hifc_config_get_op[op_idx].pfn_hifc_operation) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Null operation to get configuration, opcode(0x%x), operation ID(0x%x)",
op_code, op_idx);
return UNF_RETURN_ERROR;
} else {
return hifc_config_get_op[op_idx].pfn_hifc_operation(v_phba, v_para_out);
}
}
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]No operation to get configuration, opcode(0x%x)",
op_code);
return UNF_RETURN_ERROR;
}
static unsigned int hifc_check_port_cfg(
const struct hifc_port_cfg_s *v_port_cfg)
{
int topo_condition, speed_condition;
/* About Work Topology */
topo_condition = ((v_port_cfg->port_topology != UNF_TOP_LOOP_MASK) &&
(v_port_cfg->port_topology != UNF_TOP_P2P_MASK) &&
(v_port_cfg->port_topology != UNF_TOP_AUTO_MASK));
if (topo_condition) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Configured port topology(0x%x) is incorrect",
v_port_cfg->port_topology);
return UNF_RETURN_ERROR;
}
/* About Work Mode */
if (v_port_cfg->port_mode != UNF_PORT_MODE_INI) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Configured port mode(0x%x) is incorrect",
v_port_cfg->port_mode);
return UNF_RETURN_ERROR;
}
/* About Work Speed */
speed_condition = ((v_port_cfg->port_speed != UNF_PORT_SPEED_AUTO) &&
(v_port_cfg->port_speed != UNF_PORT_SPEED_2_G) &&
(v_port_cfg->port_speed != UNF_PORT_SPEED_4_G) &&
(v_port_cfg->port_speed != UNF_PORT_SPEED_8_G) &&
(v_port_cfg->port_speed != UNF_PORT_SPEED_16_G) &&
(v_port_cfg->port_speed != UNF_PORT_SPEED_32_G));
if (speed_condition) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Configured port speed(0x%x) is incorrect",
v_port_cfg->port_speed);
return UNF_RETURN_ERROR;
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Check port configuration OK");
return RETURN_OK;
}
static unsigned int hifc_get_port_cfg(struct hifc_hba_s *v_hba,
struct hifc_chip_info_s *v_chip_info,
unsigned char v_card_num)
{
#define UNF_CONFIG_ITEM_LEN 15
/*
* Maximum length of a configuration item name, including the end
* character
*/
#define UNF_MAX_ITEM_NAME_LEN (32 + 1)
/* Get and check parameters */
char cfg_item[UNF_MAX_ITEM_NAME_LEN];
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_hba_s *hba = v_hba;
int iret = RETURN_ERROR_S32;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
memset((void *)cfg_item, 0, sizeof(cfg_item));
hba->card_info.func_num =
(hifc_global_func_id(v_hba->hw_dev_handle)) & UNF_FUN_ID_MASK;
hba->card_info.card_num = v_card_num;
/* The range of PF of FC server is from PF1 to PF2 */
iret = snprintf(cfg_item, UNF_CONFIG_ITEM_LEN, "hifc_cfg_%1u",
(hba->card_info.func_num));
UNF_FUNCTION_RETURN_CHECK(iret, UNF_CONFIG_ITEM_LEN);
cfg_item[UNF_MAX_ITEM_NAME_LEN - 1] = 0;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Get port configuration: %s", cfg_item);
/* Get configuration parameters from file */
UNF_LOWLEVEL_GET_CFG_PARMS(ret, cfg_item, &hifc_port_cfg_parm[0],
(unsigned int *)(void *)&hba->port_cfg,
sizeof(hifc_port_cfg_parm) /
sizeof(struct unf_cfg_item_s));
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) can't get configuration",
hba->port_cfg.port_id);
return ret;
}
if (max_parent_qpc_num <= 2048) {
hba->port_cfg.sest_num = 2048;
hba->port_cfg.max_login = 2048;
}
hba->port_cfg.port_id &= 0xff0000;
hba->port_cfg.port_id |= hba->card_info.card_num << 8;
hba->port_cfg.port_id |= hba->card_info.func_num;
hba->port_cfg.tape_support = (unsigned int)v_chip_info->tape_support;
/* Parameters check */
ret = hifc_check_port_cfg(&hba->port_cfg);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) check configuration incorrect",
hba->port_cfg.port_id);
return ret;
}
/* Set configuration which is got from file */
hba->port_speed_cfg = hba->port_cfg.port_speed;
hba->port_topo_cfg = hba->port_cfg.port_topology;
return ret;
}
void hifc_flush_root_ctx(struct hifc_hba_s *v_hba)
{
int ret = 0;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return);
ret = hifc_func_rx_tx_flush(v_hba->hw_dev_handle);
if (ret) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]chipif_func_rx_tx_flush failed with return value(0x%x)",
ret);
}
}
static unsigned int hifc_delete_srqc_via_cmdq_sync(struct hifc_hba_s *v_hba,
unsigned long long sqrc_gpa)
{
/* Via CMND Queue */
#define HIFC_DEL_SRQC_TIMEOUT 3000
int ret;
struct hifcoe_cmdqe_delete_srqc_s del_srqc_cmd;
struct hifc_cmd_buf *cmdq_in_buf;
/* Alloc Cmnd buffer */
cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle);
if (!cmdq_in_buf) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]cmdq in_cmd_buf allocate failed");
HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC);
return UNF_RETURN_ERROR;
}
/* Build & Send Cmnd */
memset(&del_srqc_cmd, 0, sizeof(del_srqc_cmd));
del_srqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SRQC;
del_srqc_cmd.srqc_gpa_h = HIFC_HIGH_32_BITS(sqrc_gpa);
del_srqc_cmd.srqc_gpa_l = HIFC_LOW_32_BITS(sqrc_gpa);
hifc_cpu_to_big32(&del_srqc_cmd, sizeof(del_srqc_cmd));
memcpy(cmdq_in_buf->buf, &del_srqc_cmd, sizeof(del_srqc_cmd));
cmdq_in_buf->size = sizeof(del_srqc_cmd);
ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ,
HIFC_MOD_FCOE, 0,
cmdq_in_buf, NULL, HIFC_DEL_SRQC_TIMEOUT);
/* Free Cmnd Buffer */
hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf);
if (ret) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Send del srqc via cmdq failed, ret=0x%x", ret);
HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC);
return UNF_RETURN_ERROR;
}
HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC);
return RETURN_OK;
}
void hifc_flush_srq_ctx(struct hifc_hba_s *v_hba)
{
struct hifc_srq_info_s *srq_info = NULL;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Start destroy ELS SRQC");
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return);
/* Check state to avoid to flush SRQC again */
srq_info = &v_hba->els_srq_info;
if (srq_info->srq_type == HIFC_SRQ_ELS &&
srq_info->enable == UNF_TRUE) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[event]HBA(0x%x) flush ELS SRQC",
v_hba->port_index);
(void)hifc_delete_srqc_via_cmdq_sync(
v_hba,
srq_info->cqm_srq_info->q_ctx_paddr);
}
}
static unsigned int hifc_create_queues(struct hifc_hba_s *v_hba)
{
unsigned int ret = UNF_RETURN_ERROR;
ret = hifc_create_root_queues(v_hba);
if (ret != RETURN_OK)
goto out_creat_root_queue_fail;
/* Initialize shared resources of SCQ and SRQ in parent queue */
ret = hifc_create_common_share_queues(v_hba);
if (ret != RETURN_OK)
goto out_create_common_queue_fail;
/* Initialize parent queue manager resources */
ret = hifc_alloc_parent_queue_mgr(v_hba);
if (ret != RETURN_OK)
goto out_free_share_queue_resource;
/* Initialize shared WQE page pool in parent SQ */
ret = hifc_alloc_parent_sq_wqe_page_pool(v_hba);
if (ret != RETURN_OK)
goto out_free_parent_queue_resource;
/*
* Notice: the configuration of SQ and QID(default_sq_id)
* must be the same in FC
*/
v_hba->next_clearing_sq = 0;
v_hba->default_sq_id = HIFC_QID_SQ;
return RETURN_OK;
out_free_parent_queue_resource:
hifc_free_parent_queue_mgr(v_hba);
out_free_share_queue_resource:
hifc_flush_scq_ctx(v_hba);
hifc_flush_srq_ctx(v_hba);
hifc_destroy_common_share_queues(v_hba);
out_create_common_queue_fail:
hifc_destroy_root_queues(v_hba);
out_creat_root_queue_fail:
hifc_flush_root_ctx(v_hba);
return ret;
}
static void hifc_destroy_queues(struct hifc_hba_s *v_hba)
{
/* Free parent queue resource */
hifc_free_parent_queues(v_hba);
/* Free queue manager resource */
hifc_free_parent_queue_mgr(v_hba);
/* Free linked List SQ and WQE page pool resource */
hifc_free_parent_sq_wqe_page_pool(v_hba);
/* Free shared SRQ and SCQ queue resource */
hifc_destroy_common_share_queues(v_hba);
/* Free root queue resource */
hifc_destroy_root_queues(v_hba);
}
static unsigned int hifc_notify_up_open_timer(struct hifc_hba_s *v_hba)
{
int op_code = UNF_TRUE;
unsigned int cmd_scq_bit_map = 0;
unsigned int scq_index = 0;
unsigned int ret;
for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++)
cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ?
(1 << scq_index) : (0 << scq_index);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) open timer, cmdscq bitmap:0x%x",
v_hba->port_cfg.port_id, cmd_scq_bit_map);
ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map);
return ret;
}
static unsigned int hifc_notify_up_close_timer(struct hifc_hba_s *v_hba)
{
int op_code = UNF_FALSE;
unsigned int cmd_scq_bit_map = 0;
unsigned int scq_index = 0;
unsigned int ret;
for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++)
cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ?
(1 << scq_index) : (0 << scq_index);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) close timer with cmd_scq bitmap(0x%x)",
v_hba->port_cfg.port_id, cmd_scq_bit_map);
ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map);
return ret;
}
static unsigned int hifc_initial_chip_access(struct hifc_hba_s *v_hba)
{
int ret = RETURN_OK;
/* 1.
* Initialize cqm access related with scq, emb cq, aeq(ucode-->driver)
*/
service_cqm_temp.service_handle = v_hba;
ret = cqm_service_register(v_hba->hw_dev_handle, &service_cqm_temp);
if (ret != CQM_SUCCESS)
return UNF_RETURN_ERROR;
/* 2. Initialize mailbox(driver-->up), aeq(up--->driver) access */
ret = hifc_register_mgmt_msg_cb(v_hba->hw_dev_handle,
HIFC_MOD_FC, v_hba,
hifc_up_msg_2_driver_proc);
if (ret != CQM_SUCCESS)
goto out_unreg_cqm;
return RETURN_OK;
out_unreg_cqm:
cqm_service_unregister(v_hba->hw_dev_handle);
return UNF_RETURN_ERROR;
}
static void hifc_release_chip_access(struct hifc_hba_s *v_hba)
{
HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle, return);
hifc_unregister_mgmt_msg_cb(v_hba->hw_dev_handle, HIFC_MOD_FC);
cqm_service_unregister(v_hba->hw_dev_handle);
}
static void hifc_get_chip_info(struct hifc_hba_s *v_hba)
{
unsigned int exi_base = 0;
unsigned int fun_index = 0;
v_hba->vpid_start = v_hba->fc_service_cap.dev_fc_cap.vp_id_start;
v_hba->vpid_end = v_hba->fc_service_cap.dev_fc_cap.vp_id_end;
fun_index = hifc_global_func_id(v_hba->hw_dev_handle);
exi_base = 0;
exi_base += (fun_index * HIFC_EXIT_STRIDE);
v_hba->exit_base = HIFC_LSW(exi_base);
v_hba->exit_count = HIFC_EXIT_STRIDE;
v_hba->image_count = UNF_HIFC_MAXRPORT_NUM;
v_hba->max_support_speed = max_speed;
v_hba->port_index = HIFC_LSB(fun_index);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) base information: PortIndex=0x%x, ImgCount=0x%x, ExiBase=0x%x, ExiCount=0x%x, VpIdStart=0x%x, VpIdEnd=0x%x, MaxSpeed=0x%x, Speed=0x%x, Topo=0x%x",
v_hba->port_cfg.port_id, v_hba->port_index,
v_hba->image_count, v_hba->exit_base,
v_hba->exit_count, v_hba->vpid_start,
v_hba->vpid_end, v_hba->max_support_speed,
v_hba->port_speed_cfg, v_hba->port_topo_cfg);
}
static unsigned int hifc_init_host_res(struct hifc_hba_s *v_hba)
{
unsigned int ret = RETURN_OK;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
/* Initialize spin lock */
spin_lock_init(&hba->hba_lock);
spin_lock_init(&hba->flush_state_lock);
spin_lock_init(&hba->delay_info.srq_lock);
/* Initialize init_completion */
init_completion(&hba->hba_init_complete);
init_completion(&hba->mbox_complete);
/* Step-1: initialize the communication channel between driver and uP */
ret = hifc_initial_chip_access(hba);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't initialize chip access",
hba->port_cfg.port_id);
goto out_unmap_memory;
}
/* Step-2: get chip configuration information before creating
* queue resources
*/
hifc_get_chip_info(hba);
/* Step-3: create queue resources */
ret = hifc_create_queues(hba);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't create queues",
hba->port_cfg.port_id);
goto out_release_chip_access;
}
/* Initialize status parameters */
hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN;
hba->active_topo = UNF_ACT_TOP_UNKNOWN;
hba->sfp_on = UNF_FALSE;
hba->port_loop_role = UNF_LOOP_ROLE_MASTER_OR_SLAVE;
hba->phy_link = UNF_PORT_LINK_DOWN;
hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT;
/* Initialize parameters referring to the lowlevel */
hba->remote_rttov_tag = 0;
hba->port_bbscn_cfg = HIFC_LOWLEVEL_DEFAULT_BB_SCN;
/* Initialize timer, and the unit of E_D_TOV is ms */
hba->remote_edtov_tag = 0;
hba->remote_bbcredit = 0;
hba->compared_bbscn = 0;
hba->compared_edtov_val = UNF_DEFAULT_EDTOV;
hba->compared_ratov_val = UNF_DEFAULT_RATOV;
hba->removing = UNF_FALSE;
hba->dev_present = UNF_TRUE;
/* Initialize parameters about cos */
hba->cos_bit_map = cos_bit_map;
memset(hba->cos_rport_cnt, 0, HIFC_MAX_COS_NUM * sizeof(atomic_t));
/* Mailbox access completion */
complete(&hba->mbox_complete);
/* Notify uP to open timer after creating scq */
ret = hifc_notify_up_open_timer(hba);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't open timer",
hba->port_cfg.port_id);
goto out_destroy_queues;
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]HIFC port(0x%x) initialize host resources succeeded",
hba->port_cfg.port_id);
return ret;
out_destroy_queues:
hifc_flush_scq_ctx(hba);
hifc_flush_srq_ctx(hba);
hifc_flush_root_ctx(hba);
hifc_destroy_queues(hba);
out_release_chip_access:
hifc_release_chip_access(hba);
out_unmap_memory:
return ret;
}
static void hifc_update_lport_config(
struct hifc_hba_s *v_hba,
struct unf_low_level_function_op_s *v_low_level_fun)
{
#define HIFC_MULTI_CONF_NONSUPPORT 0
struct unf_lport_cfg_item_s *lport_cfg_items = NULL;
lport_cfg_items = &v_low_level_fun->lport_cfg_items;
if (v_hba->port_cfg.max_login < v_low_level_fun->support_max_rport)
lport_cfg_items->max_login = v_hba->port_cfg.max_login;
else
lport_cfg_items->max_login = v_low_level_fun->support_max_rport;
if ((v_hba->port_cfg.sest_num / 2) < UNF_RESERVE_SFS_XCHG)
lport_cfg_items->max_io = v_hba->port_cfg.sest_num;
else
lport_cfg_items->max_io = v_hba->port_cfg.sest_num -
UNF_RESERVE_SFS_XCHG;
lport_cfg_items->max_sfs_xchg = UNF_MAX_SFS_XCHG;
lport_cfg_items->port_id = v_hba->port_cfg.port_id;
lport_cfg_items->port_mode = v_hba->port_cfg.port_mode;
lport_cfg_items->port_topology = v_hba->port_cfg.port_topology;
lport_cfg_items->max_queue_depth = v_hba->port_cfg.max_queue_depth;
lport_cfg_items->port_speed = v_hba->port_cfg.port_speed;
lport_cfg_items->tape_support = v_hba->port_cfg.tape_support;
lport_cfg_items->res_mgmt_enabled = UNF_FALSE;
v_low_level_fun->sys_port_name =
*(unsigned long long *)v_hba->sys_port_name;
v_low_level_fun->sys_node_name =
*(unsigned long long *)v_hba->sys_node_name;
/* Update chip information */
v_low_level_fun->dev = v_hba->pci_dev;
v_low_level_fun->chip_info.chip_work_mode = v_hba->work_mode;
v_low_level_fun->chip_info.chip_type = v_hba->chip_type;
v_low_level_fun->chip_info.disable_err_flag = 0;
v_low_level_fun->support_max_speed = v_hba->max_support_speed;
v_low_level_fun->chip_id = 0;
v_low_level_fun->sfp_type = UNF_PORT_TYPE_FC_SFP;
v_low_level_fun->multi_conf_support = HIFC_MULTI_CONF_NONSUPPORT;
v_low_level_fun->support_max_xid_range = v_hba->port_cfg.sest_num;
v_low_level_fun->update_fw_reset_active =
UNF_PORT_UNGRADE_FW_RESET_INACTIVE;
v_low_level_fun->port_type = DRV_PORT_ENTITY_TYPE_PHYSICAL;
if ((lport_cfg_items->port_id & UNF_FIRST_LPORT_ID_MASK) ==
lport_cfg_items->port_id) {
v_low_level_fun->support_upgrade_report =
UNF_PORT_SUPPORT_UPGRADE_REPORT;
} else {
v_low_level_fun->support_upgrade_report =
UNF_PORT_UNSUPPORT_UPGRADE_REPORT;
}
v_low_level_fun->low_level_type |= UNF_FC_PROTOCOL_TYPE;
}
static unsigned int hifc_create_lport(struct hifc_hba_s *v_hba)
{
void *lport = NULL;
struct unf_low_level_function_op_s low_level_fun;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
hifc_fun_op.dev = v_hba->pci_dev;
memcpy(&low_level_fun, &hifc_fun_op,
sizeof(struct unf_low_level_function_op_s));
/* Update port configuration table */
hifc_update_lport_config(v_hba, &low_level_fun);
/* Apply for lport resources */
UNF_LOWLEVEL_ALLOC_LPORT(lport, v_hba, &low_level_fun);
if (!lport) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) can't allocate Lport",
v_hba->port_cfg.port_id);
return UNF_RETURN_ERROR;
}
v_hba->lport = lport;
return RETURN_OK;
}
void hifc_release_probe_index(unsigned int probe_index)
{
if (probe_index >= HIFC_MAX_PROBE_PORT_NUM) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Probe index(0x%x) is invalid", probe_index);
return;
}
spin_lock(&probe_spin_lock);
if (!test_bit((int)probe_index, (const unsigned long *)probe_bit_map)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Probe index(0x%x) is not probed",
probe_index);
spin_unlock(&probe_spin_lock);
return;
}
clear_bit((int)probe_index, probe_bit_map);
spin_unlock(&probe_spin_lock);
}
static void hifc_release_host_res(struct hifc_hba_s *v_hba)
{
hifc_destroy_queues(v_hba);
hifc_release_chip_access(v_hba);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) release low level resource done",
v_hba->port_cfg.port_id);
}
static struct hifc_hba_s *hifc_init_hba(struct pci_dev *v_dev,
void *v_hwdev_handle,
struct hifc_chip_info_s *v_chip_info,
unsigned char v_card_num)
{
unsigned int ret = RETURN_OK;
struct hifc_hba_s *hba = NULL;
/* Allocate HBA */
hba = kmalloc(sizeof(*hba), GFP_ATOMIC);
HIFC_CHECK(INVALID_VALUE32, hba, return NULL);
memset(hba, 0, sizeof(struct hifc_hba_s));
/* Heartbeat default */
hba->heart_status = 1;
/* Private data in pciDev */
hba->pci_dev = v_dev; /* PCI device */
hba->hw_dev_handle = v_hwdev_handle;
/* Work mode */
hba->work_mode = v_chip_info->work_mode;
/* Create work queue */
hba->work_queue = create_singlethread_workqueue("hifc");
if (!hba->work_queue) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Hifc creat workqueue failed");
goto out_free_hba;
}
/* Init delay work */
INIT_DELAYED_WORK(&hba->delay_info.del_work,
hifc_rcvd_els_from_srq_time_out);
/* Notice: Only use FC features */
(void)hifc_support_fc(v_hwdev_handle, &hba->fc_service_cap);
/* Check parent context available */
if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]FC parent context is not allocated in this function");
goto out_destroy_workqueue;
}
max_parent_qpc_num = hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num;
/* Get port configuration */
ret = hifc_get_port_cfg(hba, v_chip_info, v_card_num);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Can't get port configuration");
goto out_destroy_workqueue;
}
/* Get WWN */
*(unsigned long long *)hba->sys_node_name = v_chip_info->wwnn;
*(unsigned long long *)hba->sys_port_name = v_chip_info->wwpn;
/* Initialize host resources */
ret = hifc_init_host_res(hba);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't initialize host resource",
hba->port_cfg.port_id);
goto out_destroy_workqueue;
}
/* Local Port create */
ret = hifc_create_lport(hba);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't create lport",
hba->port_cfg.port_id);
goto out_release_host_res;
}
complete(&hba->hba_init_complete);
/* Print reference count */
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[info]Port(0x%x) probe succeeded.",
hba->port_cfg.port_id);
return hba;
out_release_host_res:
hifc_flush_scq_ctx(hba);
hifc_flush_srq_ctx(hba);
hifc_flush_root_ctx(hba);
hifc_release_host_res(hba);
out_destroy_workqueue:
flush_workqueue(hba->work_queue);
destroy_workqueue(hba->work_queue);
hba->work_queue = NULL;
out_free_hba:
kfree(hba);
return NULL;
}
void hifc_get_total_probed_num(unsigned int *v_probe_cnt)
{
unsigned int i = 0;
unsigned int count = 0;
spin_lock(&probe_spin_lock);
for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) {
if (test_bit((int)i, (const unsigned long *)probe_bit_map))
count++;
}
*v_probe_cnt = count;
spin_unlock(&probe_spin_lock);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Probed port total number is 0x%x", count);
}
static unsigned int hifc_assign_card_num(struct hifc_lld_dev *lld_dev,
struct hifc_chip_info_s *v_chip_info,
unsigned char *v_card_num)
{
unsigned char i = 0;
unsigned long long card_index = 0;
card_index = (!pci_is_root_bus(lld_dev->pdev->bus)) ?
lld_dev->pdev->bus->parent->number :
lld_dev->pdev->bus->number;
spin_lock(&probe_spin_lock);
for (i = 0; i < HIFC_MAX_CARD_NUM; i++) {
if (test_bit((int)i, (const unsigned long *)card_num_bit_map)) {
if ((card_num_manage[i].card_number == card_index) &&
(card_num_manage[i].is_removing == UNF_FALSE)) {
card_num_manage[i].port_count++;
*v_card_num = i;
spin_unlock(&probe_spin_lock);
return RETURN_OK;
}
}
}
for (i = 0; i < HIFC_MAX_CARD_NUM; i++) {
if (!test_bit((int)i,
(const unsigned long *)card_num_bit_map)) {
card_num_manage[i].card_number = card_index;
card_num_manage[i].port_count = 1;
card_num_manage[i].is_removing = UNF_FALSE;
*v_card_num = i;
set_bit(i, card_num_bit_map);
spin_unlock(&probe_spin_lock);
return RETURN_OK;
}
}
spin_unlock(&probe_spin_lock);
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Have probe more than 0x%x port, probe failed", i);
return UNF_RETURN_ERROR;
}
static void hifc_dec_and_free_card_num(unsigned char v_card_num)
{
/* 2 ports per card */
if (v_card_num >= HIFC_MAX_CARD_NUM) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Card number(0x%x) is invalid", v_card_num);
return;
}
spin_lock(&probe_spin_lock);
if (test_bit((int)v_card_num,
(const unsigned long *)card_num_bit_map)) {
card_num_manage[v_card_num].port_count--;
card_num_manage[v_card_num].is_removing = UNF_TRUE;
if (card_num_manage[v_card_num].port_count == 0) {
card_num_manage[v_card_num].card_number = 0;
card_num_manage[v_card_num].is_removing = UNF_FALSE;
clear_bit((int)v_card_num, card_num_bit_map);
}
} else {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Can not find card number(0x%x)", v_card_num);
}
spin_unlock(&probe_spin_lock);
}
unsigned int hifc_assign_probe_index(unsigned int *v_probe_index)
{
unsigned int i = 0;
spin_lock(&probe_spin_lock);
for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) {
if (!test_bit((int)i, (const unsigned long *)probe_bit_map)) {
*v_probe_index = i;
set_bit(i, probe_bit_map);
spin_unlock(&probe_spin_lock);
return RETURN_OK;
}
}
spin_unlock(&probe_spin_lock);
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Have probe more than 0x%x port, probe failed", i);
return UNF_RETURN_ERROR;
}
int hifc_probe(struct hifc_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name)
{
struct pci_dev *dev = NULL;
struct hifc_hba_s *hba = NULL;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int probe_index = 0;
unsigned int probe_total_num = 0;
unsigned char card_num = INVALID_VALUE8;
struct hifc_chip_info_s chip_info;
HIFC_CHECK(INVALID_VALUE32, lld_dev, return UNF_RETURN_ERROR_S32);
HIFC_CHECK(INVALID_VALUE32, lld_dev->hwdev,
return UNF_RETURN_ERROR_S32);
HIFC_CHECK(INVALID_VALUE32, lld_dev->pdev, return UNF_RETURN_ERROR_S32);
HIFC_CHECK(INVALID_VALUE32, uld_dev, return UNF_RETURN_ERROR_S32);
HIFC_CHECK(INVALID_VALUE32, uld_dev_name, return UNF_RETURN_ERROR_S32);
dev = lld_dev->pdev; /* pcie device */
memset(&chip_info, 0, sizeof(struct hifc_chip_info_s));
/* 1. Get & check Total_Probed_number */
hifc_get_total_probed_num(&probe_total_num);
if (probe_total_num >= HIFC_MAX_PORT_NUM) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Total probe num (0x%x) is larger than allowed number(64)",
probe_total_num);
return UNF_RETURN_ERROR_S32;
}
/* 2. Check device work mode */
if (hifc_support_fc(lld_dev->hwdev, NULL)) {
chip_info.work_mode = HIFC_SMARTIO_WORK_MODE_FC;
} else {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port work mode is not FC");
return UNF_RETURN_ERROR_S32;
}
/* 4. Assign & Get new Probe index */
ret = hifc_assign_probe_index(&probe_index);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]AssignProbeIndex fail");
return UNF_RETURN_ERROR_S32;
}
ret = hifc_get_chip_capability((void *)lld_dev->hwdev, &chip_info);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]GetChipCapability fail");
return UNF_RETURN_ERROR_S32;
}
/* Assign & Get new Card number */
ret = hifc_assign_card_num(lld_dev, &chip_info, &card_num);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]hifc_assign_card_num fail");
hifc_release_probe_index(probe_index);
return UNF_RETURN_ERROR_S32;
}
/* Init HBA resource */
hba = hifc_init_hba(dev, lld_dev->hwdev, &chip_info, card_num);
if (!hba) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Probe HBA(0x%x) failed.", probe_index);
hifc_release_probe_index(probe_index);
hifc_dec_and_free_card_num(card_num);
return UNF_RETURN_ERROR_S32;
}
/* Name by the order of probe */
*uld_dev = hba;
snprintf(uld_dev_name, HIFC_PORT_NAME_STR_LEN, "%s%02x%02x",
HIFC_PORT_NAME_LABEL,
hba->card_info.card_num, hba->card_info.func_num);
memcpy(hba->port_name, uld_dev_name, HIFC_PORT_NAME_STR_LEN);
hba->probe_index = probe_index;
hifc_hba[probe_index] = hba;
return RETURN_OK;
}
static unsigned int hifc_port_check_fw_ready(struct hifc_hba_s *v_hba)
{
#define HIFC_PORT_CLEAR_DONE 0
#define HIFC_PORT_CLEAR_DOING 1
unsigned int clear_state = HIFC_PORT_CLEAR_DOING;
unsigned int ret = RETURN_OK;
unsigned int wait_time_out = 0;
do {
msleep(1000);
wait_time_out += 1000;
ret = hifc_mbx_get_fw_clear_stat(v_hba, &clear_state);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
/* Total time more than 30s, retry more than 3 times, failed */
if ((wait_time_out > 30000) &&
(clear_state != HIFC_PORT_CLEAR_DONE))
return UNF_RETURN_ERROR;
} while (clear_state != HIFC_PORT_CLEAR_DONE);
return RETURN_OK;
}
static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in)
{
struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba;
int turn_on = UNF_FALSE;
unsigned int ret = RETURN_OK;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR);
/* Redundancy check */
turn_on = *((int *)v_para_in);
if (turn_on == hba->sfp_on) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Port(0x%x) FC physical port is already %s",
hba->port_cfg.port_id, (turn_on) ? "on" : "off");
return ret;
}
if (turn_on == UNF_TRUE) {
ret = hifc_port_check_fw_ready(hba);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_WARN,
"[warn]Get port(0x%x) clear state failed, turn on fail",
hba->port_cfg.port_id);
return ret;
}
/* At first, configure port table info if necessary */
ret = hifc_config_port_table(hba);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Port(0x%x) can't configurate port table",
hba->port_cfg.port_id);
return ret;
}
}
/* Switch physical port */
ret = hifc_port_switch(hba, turn_on);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Port(0x%x) switch failed",
hba->port_cfg.port_id);
return ret;
}
/* Update HBA's sfp state */
hba->sfp_on = turn_on;
return ret;
}
static unsigned int hifc_destroy_lport(struct hifc_hba_s *v_hba)
{
unsigned int ret = UNF_RETURN_ERROR;
UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, v_hba->lport);
v_hba->lport = NULL;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) destroy L_Port done",
v_hba->port_cfg.port_id);
return ret;
}
unsigned int hifc_port_reset(struct hifc_hba_s *v_hba)
{
unsigned int ret = RETURN_OK;
unsigned long time_out = 0;
int sfp_before_reset = UNF_FALSE;
int off_para_in = UNF_FALSE;
struct pci_dev *dev = NULL;
struct hifc_hba_s *hba = v_hba;
HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR);
dev = hba->pci_dev;
HIFC_CHECK(INVALID_VALUE32, dev, return UNF_RETURN_ERROR);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Port(0x%x) reset HBA begin",
hba->port_cfg.port_id);
/* Wait for last init/reset completion */
time_out = wait_for_completion_timeout(
&hba->hba_init_complete,
(unsigned long)HIFC_PORT_INIT_TIME_SEC_MAX * HZ);
if (time_out == UNF_ZERO) {
UNF_TRACE(INVALID_VALUE32, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Last HBA initialize/reset timeout: %d second",
HIFC_PORT_INIT_TIME_SEC_MAX);
return UNF_RETURN_ERROR;
}
/* Save current port state */
sfp_before_reset = hba->sfp_on;
/* Inform the reset event to CM level before beginning */
UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_START, NULL);
hba->reset_time = jiffies;
/* Close SFP */
ret = hifc_sfp_switch(hba, &off_para_in);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) can't close SFP",
hba->port_cfg.port_id);
hba->sfp_on = sfp_before_reset;
complete(&hba->hba_init_complete);
return ret;
}
ret = hifc_port_check_fw_ready(hba);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Get port(0x%x) clear state failed, hang port and report chip error",
hba->port_cfg.port_id);
complete(&hba->hba_init_complete);
return ret;
}
hifc_queue_pre_process(hba, UNF_FALSE);
ret = hifc_mbox_reset_chip(hba, HIFC_MBOX_SUBTYPE_LIGHT_RESET);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]HIFC port(0x%x) can't reset chip mailbox",
hba->port_cfg.port_id);
UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport,
UNF_PORT_GET_FWLOG, NULL);
UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport,
UNF_PORT_DEBUG_DUMP, NULL);
}
/* Inform the success to CM level */
UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_END, NULL);
/* Queue open */
hifc_enable_queues_dispatch(hba);
/* Open SFP */
(void)hifc_sfp_switch(hba, &sfp_before_reset);
complete(&hba->hba_init_complete);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[event]Port(0x%x) reset HBA done",
hba->port_cfg.port_id);
return ret;
#undef HIFC_WAIT_LINKDOWN_EVENT_MS
}
static unsigned int hifc_delete_scqc_via_cmdq_sync(struct hifc_hba_s *v_hba,
unsigned int scqn)
{
/* Via CMND Queue */
#define HIFC_DEL_SCQC_TIMEOUT 3000
int ret;
struct hifcoe_cmdqe_delete_scqc_s del_scqc_cmd;
struct hifc_cmd_buf *cmdq_in_buf;
/* Alloc cmd buffer */
cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle);
if (!cmdq_in_buf) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]cmdq in_cmd_buf alloc failed");
HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC);
return UNF_RETURN_ERROR;
}
/* Build & Send Cmnd */
memset(&del_scqc_cmd, 0, sizeof(del_scqc_cmd));
del_scqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SCQC;
del_scqc_cmd.wd1.scqn = HIFC_LSW(scqn);
hifc_cpu_to_big32(&del_scqc_cmd, sizeof(del_scqc_cmd));
memcpy(cmdq_in_buf->buf, &del_scqc_cmd, sizeof(del_scqc_cmd));
cmdq_in_buf->size = sizeof(del_scqc_cmd);
ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ,
HIFC_MOD_FCOE, 0,
cmdq_in_buf, NULL, HIFC_DEL_SCQC_TIMEOUT);
/* Free cmnd buffer */
hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf);
if (ret) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Send del scqc via cmdq failed, ret=0x%x", ret);
HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC);
return UNF_RETURN_ERROR;
}
HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC);
return RETURN_OK;
}
void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba)
{
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Start destroy total 0x%x SCQC", HIFC_TOTAL_SCQ_NUM);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return);
(void)hifc_delete_scqc_via_cmdq_sync(v_hba, 0);
}
void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush)
{
unsigned long flag = 0;
spin_lock_irqsave(&v_hba->flush_state_lock, flag);
v_hba->in_flushing = in_flush;
spin_unlock_irqrestore(&v_hba->flush_state_lock, flag);
}
static int hifc_hba_is_present(struct hifc_hba_s *v_hba)
{
int ret = RETURN_OK;
int present = UNF_FALSE;
unsigned int vendor_id = 0;
ret = pci_read_config_dword(v_hba->pci_dev, 0, &vendor_id);
vendor_id &= HIFC_PCI_VENDOR_ID_MASK;
if ((ret == RETURN_OK) && (vendor_id == HIFC_PCI_VENDOR_ID)) {
present = UNF_TRUE;
} else {
present = UNF_FALSE;
v_hba->dev_present = UNF_FALSE;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[info]Port %s remove: vender_id=0x%x, ret=0x%x",
present ? "normal" : "surprise", vendor_id, ret);
return present;
}
static void hifc_exit(struct pci_dev *v_dev, struct hifc_hba_s *v_hba)
{
unsigned int ret = UNF_RETURN_ERROR;
int sfp_switch = UNF_FALSE;
int present = UNF_TRUE;
v_hba->removing = UNF_TRUE;
/* 1. Check HBA present or not */
present = hifc_hba_is_present(v_hba);
if (present == UNF_TRUE) {
if (v_hba->phy_link == UNF_PORT_LINK_DOWN)
v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE;
/* At first, close sfp */
sfp_switch = UNF_FALSE;
(void)hifc_sfp_switch((void *)v_hba, (void *)&sfp_switch);
}
/* 2. Report COM with HBA removing: delete route timer delay work */
UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_BEGIN_REMOVE, NULL);
/* 3. Report COM with HBA Nop, COM release I/O(s) & R_Port(s) forcely */
UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_NOP, NULL);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]PCI device(%p) remove port(0x%x) failed",
v_dev, v_hba->port_index);
}
if (present == UNF_TRUE) {
/* 4.1 Wait for all SQ empty, free SRQ buffer & SRQC */
hifc_queue_pre_process(v_hba, UNF_TRUE);
}
/* 5. Destroy L_Port */
(void)hifc_destroy_lport(v_hba);
/* 6. With HBA is present */
if (present == UNF_TRUE) {
/* Enable Queues dispatch */
hifc_enable_queues_dispatch(v_hba);
/* Need reset port if necessary */
(void)hifc_mbox_reset_chip(v_hba,
HIFC_MBOX_SUBTYPE_HEAVY_RESET);
/* Flush SCQ context */
hifc_flush_scq_ctx(v_hba);
/* Flush SRQ context */
hifc_flush_srq_ctx(v_hba);
/* Flush Root context in order to prevent DMA */
hifc_flush_root_ctx(v_hba);
/*
* NOTE: while flushing txrx, hash bucket will be cached out in
* UP. Wait to clear resources completely
*/
msleep(1000);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) flush scq & srq & root context done",
v_hba->port_cfg.port_id);
}
/* 7. Notify uP to close timer before delete SCQ */
ret = hifc_notify_up_close_timer(v_hba);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]HIFC port(0x%x) can't close timer",
v_hba->port_cfg.port_id);
}
/* 8. Release host resources */
hifc_release_host_res(v_hba);
/* 9. Destroy FC work queue */
if (v_hba->work_queue) {
flush_workqueue(v_hba->work_queue);
destroy_workqueue(v_hba->work_queue);
v_hba->work_queue = NULL;
}
/* 10. Release Probe index & Decrease card number */
hifc_release_probe_index(v_hba->probe_index);
hifc_dec_and_free_card_num((unsigned char)v_hba->card_info.card_num);
/* 11. Free HBA memory */
kfree(v_hba);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[event]PCI device(%p) remove succeed", v_dev);
}
void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev)
{
struct pci_dev *dev = NULL;
struct hifc_hba_s *hba = (struct hifc_hba_s *)uld_dev;
unsigned int probe_total_num = 0;
unsigned int probe_index = 0;
HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return);
HIFC_CHECK(INVALID_VALUE32, NULL != uld_dev, return);
HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return);
HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return);
dev = hba->pci_dev;
/* Get total probed port number */
hifc_get_total_probed_num(&probe_total_num);
if (probe_total_num < 1) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port manager is empty and no need to remove");
return;
}
/* check pci vendor id */
if (dev->vendor != HIFC_PCI_VENDOR_ID_HUAWEI) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Wrong vendor id(0x%x) and exit", dev->vendor);
return;
}
/* Check function ability */
if (!(hifc_support_fc(lld_dev->hwdev, NULL))) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]FC is not enable in this function");
return;
}
/* Get probe index */
probe_index = hba->probe_index;
/* Parent context allocation check */
if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]FC parent context not allocate in this function");
return;
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]HBA(0x%x) start removing...", hba->port_index);
/* HBA removinig... */
hifc_exit(dev, hba);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Port(0x%x) pci device removed, vendorid(0x%04x) devid(0x%04x)",
probe_index, dev->vendor, dev->device);
/* Probe index check */
if (probe_index < HIFC_HBA_PORT_MAX_NUM) {
hifc_hba[probe_index] = NULL;
} else {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Probe index(0x%x) is invalid and remove failed",
probe_index);
}
hifc_get_total_probed_num(&probe_total_num);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[event]Removed index=%u, RemainNum=%u",
probe_index, probe_total_num);
}
void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev,
struct hifc_event_info *event)
{
struct hifc_hba_s *hba = uld_dev;
HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return);
HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return);
HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return);
HIFC_CHECK(INVALID_VALUE32, NULL != hba, return);
HIFC_CHECK(INVALID_VALUE32, NULL != event, return);
switch (event->type) {
case HIFC_EVENT_HEART_LOST:
hba->heart_status = 0;
HIFC_COM_UP_ERR_EVENT_STAT(hba, HIFC_EVENT_HEART_LOST);
break;
default:
break;
}
}
static unsigned int hifc_get_hba_pcie_link_state(void *v_hba,
void *v_link_state)
{
int *link_state = v_link_state;
int present = UNF_TRUE;
struct hifc_hba_s *hba = v_hba;
int ret;
int last_dev_state = UNF_TRUE;
int cur_dev_state = UNF_TRUE;
HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR);
HIFC_CHECK(INVALID_VALUE32, v_link_state, return UNF_RETURN_ERROR);
last_dev_state = hba->dev_present;
ret = hifc_get_card_present_state(hba->hw_dev_handle, (bool *)&present);
if (ret || present != UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]port(0x%x) is not present,ret:%d, present:%d",
hba->port_cfg.port_id, ret, present);
cur_dev_state = UNF_FALSE;
} else {
cur_dev_state = UNF_TRUE;
}
hba->dev_present = cur_dev_state;
/* the heartbeat is considered lost only when the PCIE link is down for
* two times.
*/
if ((last_dev_state == UNF_FALSE) && (cur_dev_state == UNF_FALSE))
hba->heart_status = UNF_FALSE;
*link_state = hba->dev_present;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_INFO,
"Port:0x%x,get dev present:%d", hba->port_cfg.port_id,
*link_state);
return RETURN_OK;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_HBA_H__
#define __HIFC_HBA_H__
#include "unf_common.h"
#include "hifc_queue.h"
#include "hifc_api_cmd.h"
#include "hifc_mgmt.h"
#define HIFC_PCI_VENDOR_ID_MASK (0xffff)
#define HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT 8
#define HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT 255
#define HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT 255
#define HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT 255
#define HIFC_LOWLEVEL_DEFAULT_BB_SCN 0
#define HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE 28081
#define HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE 14100
#define HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE 7000
#define HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE 0x2000
#define HIFC_SMARTIO_WORK_MODE_FC 0x1
#define UNF_FUN_ID_MASK 0x07
#define UNF_HIFC_FC 0x01
#define UNF_HIFC_MAXNPIV_NUM 64
#define HIFC_MAX_COS_NUM 8
#define HIFC_PCI_VENDOR_ID_HUAWEI 0x19e5
#define HIFC_SCQ_CNTX_SIZE 32
#define HIFC_SRQ_CNTX_SIZE 64
#define HIFC_PORT_INIT_TIME_SEC_MAX 1
#define HIFC_PORT_NAME_LABEL "hifc"
#define HIFC_PORT_NAME_STR_LEN 16
#define HIFC_MAX_PROBE_PORT_NUM 64
#define HIFC_PORT_NUM_PER_TABLE 64
#define HIFC_MAX_CARD_NUM 32
#define HIFC_HBA_PORT_MAX_NUM HIFC_MAX_PROBE_PORT_NUM
/* Heart Lost Flag */
#define HIFC_EVENT_HEART_LOST 0
#define HIFC_GET_HBA_PORT_ID(__hba) ((__hba)->port_index)
#define HIFC_HBA_NOT_PRESENT(__hba) ((__hba)->dev_present == UNF_FALSE)
struct hifc_port_cfg_s {
unsigned int port_id; /* Port ID */
unsigned int port_mode; /* Port mode:INI(0x20) TGT(0x10) BOTH(0x30) */
unsigned int port_topology; /* Port topo:0x3:loop,0xc:p2p,0xf:auto */
unsigned int port_alpa; /* Port ALPA */
unsigned int max_queue_depth;/* Max Queue depth Registration to SCSI */
unsigned int sest_num; /* IO burst num:512-4096 */
unsigned int max_login; /* Max Login Session. */
unsigned int node_name_hi; /* nodename high 32 bits */
unsigned int node_name_lo; /* nodename low 32 bits */
unsigned int port_name_hi; /* portname high 32 bits */
unsigned int port_name_lo; /* portname low 32 bits */
/* Port speed 0:auto 4:4Gbps 8:8Gbps 16:16Gbps */
unsigned int port_speed;
unsigned int interrupt_delay; /* Delay times(ms) in interrupt */
unsigned int tape_support; /* tape support */
};
#define HIFC_VER_INFO_SIZE 128
struct hifc_drv_version_s {
char ver[HIFC_VER_INFO_SIZE];
};
struct hifc_card_info_s {
unsigned int card_num : 8;
unsigned int func_num : 8;
unsigned int base_func : 8;
/*
* Card type:UNF_FC_SERVER_BOARD_32_G(6) 32G mode,
* UNF_FC_SERVER_BOARD_16_G(7)16G mode
*/
unsigned int card_type : 8;
};
struct hifc_card_num_manage_s {
int is_removing;
unsigned int port_count;
unsigned long long card_number;
};
struct hifc_led_state_s {
unsigned char green_speed_led;
unsigned char yellow_speed_led;
unsigned char ac_led;
unsigned char reserved;
};
enum hifc_queue_set_stage_e {
HIFC_QUEUE_SET_STAGE_INIT = 0,
HIFC_QUEUE_SET_STAGE_SCANNING,
HIFC_QUEUE_SET_STAGE_FLUSHING,
HIFC_QUEUE_SET_STAGE_FLUSHDONE,
HIFC_QUEUE_SET_STAGE_BUTT
};
struct hifc_srq_delay_info_s {
unsigned char srq_delay_flag; /* Check whether need to delay */
unsigned char root_rq_rcvd_flag;
unsigned short rsd;
spinlock_t srq_lock;
struct unf_frame_pkg_s pkg;
struct delayed_work del_work;
};
struct hifc_fw_ver_detail_s {
unsigned char ucode_ver[HIFC_VER_LEN];
unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char up_ver[HIFC_VER_LEN];
unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN];
unsigned char boot_ver[HIFC_VER_LEN];
unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN];
};
/* get wwpn and wwnn */
struct hifc_chip_info_s {
unsigned char work_mode;
unsigned char tape_support;
unsigned long long wwpn;
unsigned long long wwnn;
};
struct hifc_hba_s {
struct pci_dev *pci_dev;
void *hw_dev_handle;
struct fc_service_cap fc_service_cap;
struct hifc_scq_info_s scq_info[HIFC_TOTAL_SCQ_NUM];
struct hifc_srq_info_s els_srq_info;
/* PCI IO Memory */
void __iomem *bar0;
unsigned int bar0_len;
struct hifc_root_info_s root_info;
struct hifc_parent_queue_mgr_s *parent_queue_mgr;
/* Link list Sq WqePage Pool */
struct hifc_sq_wqe_page_pool_s sq_wpg_pool;
enum hifc_queue_set_stage_e q_set_stage;
unsigned int next_clearing_sq;
unsigned int default_sq_id;
/* Port parameters, Obtained through firmware */
unsigned short q_s_max_count;
unsigned char port_type; /* FC Port */
unsigned char port_index; /* Phy Port */
unsigned int default_scqn;
unsigned char chip_type; /* chiptype:Smart or fc */
unsigned char work_mode;
struct hifc_card_info_s card_info;
char port_name[HIFC_PORT_NAME_STR_LEN];
unsigned int probe_index;
unsigned short exit_base;
unsigned short exit_count;
unsigned short image_count;
unsigned char vpid_start;
unsigned char vpid_end;
spinlock_t flush_state_lock;
int in_flushing;
struct hifc_port_cfg_s port_cfg; /* Obtained through Config */
void *lport; /* Used in UNF level */
unsigned char sys_node_name[UNF_WWN_LEN];
unsigned char sys_port_name[UNF_WWN_LEN];
struct completion hba_init_complete;
struct completion mbox_complete;
unsigned short removing;
int sfp_on;
int dev_present;
int heart_status;
spinlock_t hba_lock;
unsigned int port_topo_cfg;
unsigned int port_bbscn_cfg;
unsigned int port_loop_role;
unsigned int port_speed_cfg;
unsigned int max_support_speed;
unsigned char remote_rttov_tag;
unsigned char remote_edtov_tag;
unsigned short compared_bbscn;
unsigned short remote_bbcredit;
unsigned int compared_edtov_val;
unsigned int compared_ratov_val;
enum unf_act_topo_e active_topo;
unsigned int active_port_speed;
unsigned int active_rx_bb_credit;
unsigned int active_bb_scn;
unsigned int phy_link;
unsigned int fcp_conf_cfg;
/* loop */
unsigned char active_al_pa;
unsigned char loop_map_valid;
unsigned char loop_map[UNF_LOOPMAP_COUNT];
unsigned int cos_bit_map;
atomic_t cos_rport_cnt[HIFC_MAX_COS_NUM];
struct hifc_led_state_s led_states;
unsigned int fec_status;
struct workqueue_struct *work_queue;
unsigned long long reset_time;
struct hifc_srq_delay_info_s delay_info;
};
enum drv_port_entity_type_e {
DRV_PORT_ENTITY_TYPE_PHYSICAL = 0,
DRV_PORT_ENTITY_TYPE_VIRTUAL = 1,
DRV_PORT_ENTITY_TYPE_BUTT
};
extern struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM];
extern spinlock_t probe_spin_lock;
extern unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM /
HIFC_PORT_NUM_PER_TABLE];
unsigned int hifc_port_reset(struct hifc_hba_s *v_hba);
void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba);
void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush);
void hifc_get_total_probed_num(unsigned int *v_probe_cnt);
#endif
因为 它太大了无法显示 source diff 。你可以改为 查看blob
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_QUEUE_H__
#define __HIFC_QUEUE_H__
#include "hifc_wqe.h"
#include "hifc_hw.h"
#include "hifc_hwif.h"
#include "hifc_cqm_main.h"
#define WQE_MARKER_0 0x0
#define WQE_MARKER_6B 0x6b
#define HIFC_SQE_SIZE 128
#define HIFC_MIN_WP_NUM 2
/* Counter */
#define HIFC_STAT_SESSION_IO
/*************** PARENT SQ&Context defines *******************************/
#define HIFC_MAX_MSN (65535)
#define HIFC_MSN_MASK (0xffff000000000000LL)
#define HIFC_SQE_TS_SIZE (72)
#define HIFC_SQE_FIRST_OBIT_DW_POS (0)
#define HIFC_SQE_SECOND_OBIT_DW_POS (30)
#define HIFC_SQE_OBIT_SET_MASK_BE (0x80)
#define HIFC_SQE_OBIT_CLEAR_MASK_BE (0xffffff7f)
#define HIFC_MAX_SQ_TASK_TYPE_CNT (128)
/*
* Note: if the location of flush done bit changes, the definition must be
* modifyed again
*/
#define HIFC_CTXT_FLUSH_DONE_DW_POS (58)
#define HIFC_CTXT_FLUSH_DONE_MASK_BE (0x4000)
#define HIFC_GET_SQ_HEAD(v_sq) \
list_entry((&(v_sq)->list_linked_list_sq)->next,\
struct hifc_sq_wqe_page_s, entry_wpg)
#define HIFC_GET_SQ_TAIL(v_sq) \
list_entry((&(v_sq)->list_linked_list_sq)->prev, \
struct hifc_sq_wqe_page_s, entry_wpg)
#ifdef HIFC_STAT_SESSION_IO
#define HIFC_SQ_IO_STAT(v_sq, io_type) \
(atomic_inc(&(v_sq)->io_stat[io_type]))
#define HIFC_SQ_IO_STAT_READ(v_sq, io_type) \
(atomic_read(&(v_sq)->io_stat[io_type]))
#endif
#define HIFC_GET_QUEUE_CMSN(v_sq)\
((unsigned int)(be64_to_cpu(((((v_sq)->queue_header)->ci_record) \
& HIFC_MSN_MASK))))
#define HIFC_GET_WP_END_CMSN(head_start_cmsn, wqe_num_per_buf) \
(unsigned short)(((unsigned int)(head_start_cmsn) +\
(unsigned int)(wqe_num_per_buf) - 1) % (HIFC_MAX_MSN + 1))
#define HIFC_MSN_INC(msn) (((HIFC_MAX_MSN) == (msn)) ? 0 : ((msn) + 1))
#define HIFC_MSN_DEC(msn) ((0 == (msn)) ? (HIFC_MAX_MSN) : ((msn) - 1))
#define HIFC_QUEUE_MSN_OFFSET(start_cmsn, end_cmsn) \
(unsigned int)((((unsigned int)(end_cmsn) + (HIFC_MAX_MSN)) - \
(unsigned int)(start_cmsn)) % (HIFC_MAX_MSN + 1))
/******************* ROOT SQ&RQ defines ***********************************/
#define HIFC_ROOT_Q_CTX_SIZE (48)
#define HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT (44)
#define HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT (12)
#define HIFC_ROOT_Q_CTX_CLA_HI_SHIFT (41)
#define HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT (9)
#define HIFC_ROOT_TSO_LRO_SPACE (0)
#define HIFC_ROOT_CTX_WQE_PREFETCH_MAX (3)
#define HIFC_ROOT_CTX_WQE_PREFETCH_MIN (1)
#define HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD (2)
#define HIFC_CI_WQE_PAGE_HIGH_ADDR(x) \
(unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT) & 0xffffffff)
#define HIFC_CI_WQE_PAGE_LOW_ADDR(x) \
(unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT) & 0xffffffff)
#define HIFC_CLA_HIGH_ADDR(x)\
(unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_HI_SHIFT) & 0xffffffff)
#define HIFC_CLA_LOW_ADDR(x) \
(unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT) & 0xffffffff)
/*********************** ROOT SQ defines ***********************************/
#define HIFC_ROOT_SQ_NUM (1)
#define HIFC_ROOT_SQ_DEPTH (2048)
#define HIFC_ROOT_SQ_WQEBB (64)
#define HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE (4)
#define HIFC_ROOT_SQ_LOOP_OWNER (1)
#define HIFC_ROOT_SQ_CI_ATTRIBUTE_ADDRESS_SHIFT (2)
#define HIFC_DOORBELL_SQ_TYPE (1)
#define HIFC_DOORBELL_SQ_PI_HIGH_BITS_SHIFT (8)
#define HIFC_DOORBELL_SQ_PI_LOW_BITS_MASK (0xFF)
#define HIFC_INT_NUM_PER_QUEUE (1)
#define HIFC_INT_ENABLE (1)
#define HIFC_ROOT_CFG_SQ_NUM_MAX (42)
#define HIFC_CMDQ_QUEUE_TYPE_SQ (0)
#define HIFC_GET_ROOT_SQ_CI_ADDR(addr, index) \
((addr) + (unsigned int)((index) * HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE))
#define HIFC_ROOT_SQ_CTX_OFFSET(q_num, q_id) \
((HIFC_ROOT_TSO_LRO_SPACE * 2 * (q_num) +\
HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16)
/********************** ROOT RQ defines ***********************************/
#define HIFC_ROOT_RQ_NUM (1)
#define HIFC_ROOT_RQ_DEPTH (1024)
#define HIFC_ROOT_RQ_WQEBB (32)
#define HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE (4)
#define HIFC_ROOT_RQ_LOOP_OWNER (1)
#define HIFC_ROOT_RQ_RECV_BUFF_SIZE (1024)
#define HIFC_ROOT_Q_INT_ID_MAX (1024) /* 10bit */
#define HIFC_ROOT_CFG_RQ_NUM_MAX (42)
#define HIFC_CMDQ_QUEUE_TYPE_RQ (1)
#define HIFC_RQE_MAX_PROCESS_NUM_PER_INTR (128)
#define HIFC_ROOT_RQ_CTX_OFFSET(q_num, q_id)\
(((HIFC_ROOT_TSO_LRO_SPACE * 2 + HIFC_ROOT_Q_CTX_SIZE) * (q_num) +\
HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16)
/************************** SCQ defines ***********************************/
#define HIFC_SCQ_INT_ID_MAX (2048) /* 11BIT */
#define HIFC_SCQE_SIZE (64)
#define HIFC_CQE_GPA_SHIFT (4)
#define HIFC_NEXT_CQE_GPA_SHIFT (12)
/* 1-Update Ci by Tile, 0-Update Ci by Hardware */
#define HIFC_PMSN_CI_TYPE_FROM_HOST (0)
#define HIFC_PMSN_CI_TYPE_FROM_UCODE (1)
#define HIFC_ARMQ_IDLE (0)
#define HIFC_CQ_INT_MODE (2)
#define HIFC_CQ_HEADER_OWNER_SHIFT (15)
/*
* SCQC_CQ_DEPTH: 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k.
* include LinkWqe
*/
#define HIFC_CMD_SCQ_DEPTH (4096)
#define HIFC_STS_SCQ_DEPTH (8192)
#define HIFC_CMD_SCQC_CQ_DEPTH (hifc_log2n(HIFC_CMD_SCQ_DEPTH >> 8))
#define HIFC_STS_SCQC_CQ_DEPTH (hifc_log2n(HIFC_STS_SCQ_DEPTH >> 8))
#define HIFC_STS_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_HOST
#define HIFC_CMD_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_UCODE
#define HIFC_SCQ_INTR_LOW_LATENCY_MODE 0
#define HIFC_SCQ_INTR_POLLING_MODE 1
#define HIFC_CQE_MAX_PROCESS_NUM_PER_INTR (128)
#define HIFC_SESSION_SCQ_NUM (16)
/*
* SCQ[0, 2, 4 ...]CMD SCQ,SCQ[1, 3, 5 ...]STS SCQ,SCQ[HIFC_TOTAL_SCQ_NUM-1]
* Defaul SCQ
*/
#define HIFC_CMD_SCQN_START (0)
#define HIFC_STS_SCQN_START (1)
#define HIFC_SCQS_PER_SESSION (2)
#define HIFC_TOTAL_SCQ_NUM (HIFC_SESSION_SCQ_NUM + 1)
#define HIFC_SCQ_IS_STS(scq_index) \
(((scq_index) % HIFC_SCQS_PER_SESSION) || \
((scq_index) == HIFC_SESSION_SCQ_NUM))
#define HIFC_SCQ_IS_CMD(scq_index)\
(!HIFC_SCQ_IS_STS(scq_index))
#define HIFC_RPORTID_TO_CMD_SCQN(rport_index) \
(((rport_index) * HIFC_SCQS_PER_SESSION) % HIFC_SESSION_SCQ_NUM)
#define HIFC_RPORTID_TO_STS_SCQN(rport_index) \
((((rport_index) * HIFC_SCQS_PER_SESSION) + 1) % HIFC_SESSION_SCQ_NUM)
/************************** SRQ defines ***********************************/
#define HIFC_SRQE_SIZE (32)
#define HIFC_SRQ_INIT_LOOP_O (1)
#define HIFC_QUEUE_RING (1)
#define HIFC_SRQ_ELS_DATA_NUM (1)
#define HIFC_SRQ_ELS_SGE_LEN (256)
#define HIFC_SRQ_ELS_DATA_DEPTH (4096)
#define HIFC_IRQ_NAME_MAX (30)
/* Support 2048 sessions(xid) */
#define HIFC_CQM_XID_MASK (0x7ff)
#define HIFC_QUEUE_FLUSH_DOING (0)
#define HIFC_QUEUE_FLUSH_DONE (1)
#define HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS (2000)
#define HIFC_QUEUE_FLUSH_WAIT_MS (2)
/************************* RPort defines ***********************************/
#define HIFC_EXIT_STRIDE (4096)
#define UNF_HIFC_MAXRPORT_NUM (2048)
#define HIFC_RPORT_OFFLOADED(prnt_qinfo) \
((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADED)
#define HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo) \
((prnt_qinfo)->offload_state != HIFC_QUEUE_STATE_OFFLOADED)
#define HIFC_RPORT_FLUSH_NOT_NEEDED(prnt_qinfo)\
(((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_INITIALIZED) || \
((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADING) || \
((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_FREE))
#define HIFC_CHECK_XID_MATCHED(sq_xid, sqe_xid) \
(((sq_xid) & HIFC_CQM_XID_MASK) == ((sqe_xid) & HIFC_CQM_XID_MASK))
#define HIFC_PORT_MODE_TGT (0) /* Port mode */
#define HIFC_PORT_MODE_INI (1)
#define HIFC_PORT_MODE_BOTH (2)
/********** Hardware Reserved Queue Info defines ***************************/
#define HIFC_HRQI_SEQ_ID_MAX (255)
#define HIFC_HRQI_SEQ_INDEX_MAX (64)
#define HIFC_HRQI_SEQ_INDEX_SHIFT (6)
#define HIFC_HRQI_SEQ_SEPCIAL_ID (3)
#define HIFC_HRQI_SEQ_INVALID_ID (~0LL)
/************************* OQID defines ***********************************/
#define HIFC_OQID_HOST_XID_OFFSET (5)
#define HIFC_OQID_HOST_RW_OFFSET (4)
#define HIFC_OQID_HOST_ST_OFFSET (2)
#define HIFC_OQID_HOST_OQID_LEN (11)
#define HIFC_OQID_HOST_READ_FROM_HOST (0UL)
#define HIFC_OQID_HOST_WRITE_TO_HOST (1)
#define HIFC_CPI_CHNL_ID_XOE_READ (1UL)
#define HIFC_CPI_CHNL_ID_XOE_WRITE (3UL)
#define HIFC_SERVICE_TYPE_FC_FCOE (2)
/********************* sdk config defines ***********************************/
#define HIFC_CNTX_SIZE_256B 256
#define HIFC_QUEUE_LINK_STYLE 0
#define HIFC_PACKET_COS_FC_CMD 0
#define HIFC_PACKET_COS_FC_DATA 1
#define HIFC_DB_ARM_DISABLE 0
#define HIFC_DMA_ATTR_OFST 0
#define HIFC_PCIE_TEMPLATE 0
#define HIFC_PCIE_RELAXED_ORDERING 1
#define HIFC_OWNER_DRIVER_PRODUCT 1
#define HIFC_CMDQE_BUFF_LEN_MAX 2040
#define HIFC_CNTX_SIZE_T_256B 0
#define HIFC_OQID_IO_HOST_SET(xid, rw, cidx, vf_id, m, oqid) \
{ \
oqid = (unsigned short)(((unsigned short)\
((xid) << HIFC_OQID_HOST_XID_OFFSET)) \
| ((unsigned short)((rw) << HIFC_OQID_HOST_RW_OFFSET)) \
| ((unsigned short)(HIFC_SERVICE_TYPE_FC_FCOE << \
HIFC_OQID_HOST_ST_OFFSET)) | (cidx)); \
oqid = (unsigned short)\
(((unsigned short)(oqid & (0x7ff >> (m))))\
| ((unsigned short)((vf_id) << \
(HIFC_OQID_HOST_OQID_LEN - (m))))); \
}
#define HIFC_OQID_RD(xid, vf_id, m, oq_id) \
HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_READ_FROM_HOST,\
HIFC_CPI_CHNL_ID_XOE_READ, vf_id, m, oq_id)
#define HIFC_OQID_WR(xid, vf_id, m, oq_id) \
HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_WRITE_TO_HOST,\
HIFC_CPI_CHNL_ID_XOE_WRITE, vf_id, m, oq_id)
enum hifc_session_reset_mode_e {
HIFC_SESS_RST_DELETE_IO_ONLY = 1,
HIFC_SESS_RST_DELETE_CONN_ONLY = 2,
HIFC_SESS_RST_DELETE_IO_CONN_BOTH = 3,
HIFC_SESS_RST_MODE_BUTT
};
/* linkwqe */
#define CQM_LINK_WQE_CTRLSL_VALUE 2
#define CQM_LINK_WQE_LP_VALID 1
#define CQM_LINK_WQE_LP_INVALID 0
/****************** ROOT SQ&RQ&CTX defines ****************************/
struct nic_tx_doorbell {
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 srv_type : 5;
u32 cos : 3;
u32 c_flag : 1;
u32 rsvd0 : 5;
u32 queue_id : 10;
u32 pi_high : 8;
#else
u32 pi_high : 8;
u32 queue_id : 10;
u32 rsvd0 : 5;
u32 c_flag : 1;
u32 cos : 3;
u32 srv_type : 5;
#endif
} bs0;
u32 dw0;
};
u32 rsvd1;
};
struct hifc_qp_ctxt_header {
u16 num_queues;
u16 queue_type;
u32 addr_offset;
};
/*
* nic_sq_ctx_1822 table define
*/
struct hifc_sq_ctxt {
union {
struct sq_ctx_dw0 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* whether generate CEQ */
u32 ceq_arm : 1;
u32 rsvd1 : 7;
/* whether enable CEQ */
u32 ceq_en : 1;
u32 global_sq_id : 10;
u32 ceq_num : 5;
u32 pkt_template : 6;
u32 rsvd2 : 2;
#else
u32 rsvd2 : 2;
u32 pkt_template : 6;
u32 ceq_num : 5;
u32 global_sq_id : 10;
/* whether enable CEQ */
u32 ceq_en : 1;
u32 rsvd1 : 7;
/* whether generate CEQ */
u32 ceq_arm : 1;
#endif
} sq_ctx_dw0;
u32 dw0;
};
union {
struct sq_ctx_dw1 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 wqe_template : 6;
u32 rsvd3 : 2;
u32 owner : 1;
/* customer index */
u32 ci : 12;
u32 tso_doing : 1;
/* indicate how many sge left in current tso wqe */
u32 sge_num_left : 6;
/* number of sge processing */
u32 processing_sge : 3;
u32 rsvd4 : 1;
#else
u32 rsvd4 : 1;
/* number of sge processing */
u32 processing_sge : 3;
/* indicate how many sge left in current tso wqe */
u32 sge_num_left : 6;
u32 tso_doing : 1;
/* customer index */
u32 ci : 12;
u32 owner : 1;
u32 rsvd3 : 2;
u32 wqe_template : 6;
#endif
} sq_ctx_dw1;
u32 dw1;
};
union {
struct sq_ctx_dw2 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd5 : 12;
/* the wqe page address that current ci point to */
u32 ci_wqe_page_addr_hi : 20;
#else
/* the wqe page address that current ci point to */
u32 ci_wqe_page_addr_hi : 20;
u32 rsvd5 : 12;
#endif
} sq_ctx_dw2;
u32 dw2;
};
u32 ci_wqe_page_addr_lo;
union {
struct sq_ctx_dw4 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/*
* The minimum prefetch WQE cacheline number of this SQ
*/
u32 prefetch_min : 7;
/*
* The maximum prefetch WQE cacheline number of this SQ
*/
u32 prefetch_max : 11;
u32 prefetch_cache_threshold : 14;
#else
u32 prefetch_cache_threshold : 14;
/*
* The maximum prefetch WQE cacheline number of this SQ
*/
u32 prefetch_max : 11;
/*
* The minimum prefetch WQE cacheline number of this SQ
*/
u32 prefetch_min : 7;
#endif
} sq_ctx_dw4;
u32 dw4;
};
union {
struct sq_ctx_dw5 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd6 : 31;
u32 prefetch_owner : 1;
#else
u32 prefetch_owner : 1;
u32 rsvd6 : 31;
#endif
} sq_ctx_dw5;
u32 dw5;
};
union {
struct sq_ctx_dw6 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 prefetch_ci : 12;
u32 prefetch_ci_wqe_addr_hi : 20;
#else
u32 prefetch_ci_wqe_addr_hi : 20;
u32 prefetch_ci : 12;
#endif
} sq_ctx_dw6;
u32 dw6;
};
u32 prefetch_ci_wqe_addr_lo;
union {
struct sq_ctx_dw8 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* processed length of current seg */
u32 processed_seg_len : 16;
u32 rsvd7 : 16;
#else
u32 rsvd7 : 16;
/* processed length of current seg */
u32 processed_seg_len : 16;
#endif
} sq_ctx_dw8;
u32 dw8;
};
u32 qsf;
union {
struct sq_ctx_dw10 {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd8 : 9;
/* CI CLA table address */
u32 cla_addr_hi : 23;
#else
/* CI CLA table address */
u32 cla_addr_hi : 23;
u32 rsvd8 : 9;
#endif
} sq_ctx_dw10;
u32 dw10;
};
u32 cla_addr_lo;
};
struct hifc_sq_ctxt_block {
struct hifc_qp_ctxt_header cmdq_hdr;
struct hifc_sq_ctxt sq_ctx[HIFC_ROOT_CFG_SQ_NUM_MAX];
};
/*
* nic_rq_ctx_1822 table define
*/
struct hifc_rq_ctxt {
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 max_count : 10;
u32 cqe_tmpl : 6;
u32 pkt_tmpl : 6;
u32 wqe_tmpl : 6;
u32 psge_valid : 1;
u32 rsvd1 : 1;
u32 owner : 1;
u32 ceq_en : 1;
#else
u32 ceq_en : 1;
u32 owner : 1;
u32 rsvd1 : 1;
u32 psge_valid : 1;
u32 wqe_tmpl : 6;
u32 pkt_tmpl : 6;
u32 cqe_tmpl : 6;
u32 max_count : 10;
#endif
} bs;
u32 dw0;
};
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/*
* Interrupt number that L2NIC engine tell SW if
* generate int instead of CEQ
*/
u32 int_num : 10;
u32 ceq_count : 10;
/* product index */
u32 pi : 12;
#else
/* product index */
u32 pi : 12;
u32 ceq_count : 10;
/*
* Interrupt number that L2NIC engine tell SW if
* generate int instead of CEQ
*/
u32 int_num : 10;
#endif
} bs0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/*
* CEQ arm, L2NIC engine will clear it after send ceq,
* driver should set it by CMD Q after receive all pkt.
*/
u32 ceq_arm : 1;
u32 eq_id : 5;
u32 rsvd2 : 4;
u32 ceq_count : 10;
/* product index */
u32 pi : 12;
#else
/* product index */
u32 pi : 12;
u32 ceq_count : 10;
u32 rsvd2 : 4;
u32 eq_id : 5;
/* CEQ arm, L2NIC engine will clear it after send ceq,
* driver should set it by CMD Q after receive all pkt.
*/
u32 ceq_arm : 1;
#endif
} bs1;
u32 dw1;
};
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* consumer index */
u32 ci : 12;
/* WQE page address of current CI point to, high part */
u32 ci_wqe_page_addr_hi : 20;
#else
/* WQE page address of current CI point to, high part */
u32 ci_wqe_page_addr_hi : 20;
/* consumer index */
u32 ci : 12;
#endif
} bs2;
u32 dw2;
};
/* WQE page address of current CI point to, low part */
u32 ci_wqe_page_addr_lo;
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 prefetch_min : 7;
u32 prefetch_max : 11;
u32 prefetch_cache_threshold : 14;
#else
u32 prefetch_cache_threshold : 14;
u32 prefetch_max : 11;
u32 prefetch_min : 7;
#endif
} bs3;
u32 dw3;
};
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd3 : 31;
/* ownership of WQE */
u32 prefetch_owner : 1;
#else
/* ownership of WQE */
u32 prefetch_owner : 1;
u32 rsvd3 : 31;
#endif
} bs4;
u32 dw4;
};
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 prefetch_ci : 12;
/* high part */
u32 prefetch_ci_wqe_page_addr_hi : 20;
#else
/* high part */
u32 prefetch_ci_wqe_page_addr_hi : 20;
u32 prefetch_ci : 12;
#endif
} bs5;
u32 dw5;
};
/* low part */
u32 prefetch_ci_wqe_page_addr_lo;
/* host mem GPA, high part */
u32 pi_gpa_hi;
/* host mem GPA, low part */
u32 pi_gpa_lo;
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd4 : 9;
u32 ci_cla_tbl_addr_hi : 23;
#else
u32 ci_cla_tbl_addr_hi : 23;
u32 rsvd4 : 9;
#endif
} bs6;
u32 dw6;
};
u32 ci_cla_tbl_addr_lo;
};
struct hifc_rq_ctxt_block {
struct hifc_qp_ctxt_header cmdq_hdr;
struct hifc_rq_ctxt rq_ctx[HIFC_ROOT_CFG_RQ_NUM_MAX];
};
struct hifc_root_qsf_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* packet priority, engine pass pri to ucode */
u32 pri : 3;
/* unicast flag, engine pass uc to ucode */
u32 uc : 1;
/* sctp packet, engine pass sctp to ucode */
u32 sctp : 1;
/* mss */
u32 mss : 14;
/* when set, hi1822 calculates the tcp/udp check sum of the packet */
u32 tcp_udp_cs : 1;
/*
* transmit segmentation offload is activated when the tso flag is set
*/
u32 tso : 1;
/* for udp packet, engine read the whole udp packet from host by 1 dma
* read, and ipsu calculate udp checksum, ucode do ip segment
*/
u32 ufo : 1;
/* payload offset. it is the start position to calculate tcp/udp
* checksum or sctp crc
*/
u32 payload_offset : 8;
/* reserved */
u32 route_to_ucode : 2;
#else
/* reserved */
u32 route_to_ucode : 2;
/*
* payload offset. it is the start position to calculate tcp/udp
* checksum or sctp crc
*/
u32 payload_offset : 8;
/*
* for udp packet, engine read the whole udp packet from host by 1 dma
* read, and ipsu calculate udp checksum, ucode do ip segment
*/
u32 ufo : 1;
/*
* transmit segmentation offload is activated when the tso flag is set
*/
u32 tso : 1;
/* when set, hi1822 calculates the tcp/udp check sum of the packet */
u32 tcp_udp_cs : 1;
/* mss */
u32 mss : 14;
/* sctp packet, engine pass sctp to ucode */
u32 sctp : 1;
/* unicast flag, engine pass uc to ucode */
u32 uc : 1;
/* packet priority, engine pass pri to ucode */
u32 pri : 3;
#endif
};
struct hifc_root_db_addr_s {
unsigned long long phy_addr;
void __iomem *virt_map_addr;
};
/* send queue management structure */
struct hifc_root_sq_info_s {
spinlock_t root_sq_spin_lock;
unsigned short qid;
unsigned short max_qnum;
unsigned short pi; /* ring buffer Pi */
unsigned short ci; /* ring buffer Ci */
unsigned short owner;
unsigned short hardware_write_back_value;
unsigned short q_depth;
unsigned short wqe_bb_size; /* WQE Basic size */
char irq_name[HIFC_IRQ_NAME_MAX];
unsigned int irq_id;
unsigned short msix_entry_idx;
unsigned short *ci_addr;
dma_addr_t ci_dma_addr;
unsigned long long cla_addr;
void *sq_handle;
struct hifc_root_db_addr_s direct_db;
struct hifc_root_db_addr_s normal_db;
unsigned int db_idx;
unsigned int global_qpn;
int in_flush;
void *root_info;
};
struct hifc_root_rq_info_s {
unsigned short qid;
unsigned short max_qnum;
unsigned short pi;
unsigned short ci;
unsigned short owner;
unsigned short q_depth;
unsigned short q_mask;
unsigned short wqe_bb_size;
char irq_name[HIFC_IRQ_NAME_MAX];
unsigned int irq_id;
unsigned short msix_entry_idx;
unsigned short *pi_vir_addr;
dma_addr_t pi_dma_addr;
/* Root RQ Receive Buffer size and completion buff */
unsigned int rqc_buff_size;
void *rq_completion_buff;
dma_addr_t rq_completion_dma;
unsigned int rq_rcv_buff_size;
void *rq_rcv_buff;
dma_addr_t rq_rcv_dma;
void *rq_handle;
/* for queue context init */
unsigned long long ci_cla_tbl_addr;
unsigned int global_qpn;
struct tasklet_struct tasklet;
atomic_t flush_state;
void *root_info;
};
struct hifc_root_info_s {
void *phba;
unsigned int sq_num;
unsigned int sq_ci_table_size;
void *virt_sq_ci_table_buff;
dma_addr_t sq_ci_table_dma;
void *sq_info;
unsigned int rq_num;
unsigned int rq_pi_table_size;
void *virt_rq_pi_table_buff;
dma_addr_t rq_pi_table_dma;
void *rq_info;
};
/**************************** SCQ defines ********************************/
struct hifc_scq_info_s {
struct cqm_queue_s *cqm_scq_info;
unsigned int wqe_num_per_buf;
unsigned int wqe_size;
/* 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k */
unsigned int scqc_cq_depth;
unsigned short scqc_ci_type;
unsigned short valid_wqe_num; /* ScQ depth include link wqe */
unsigned short ci;
unsigned short ci_owner;
unsigned int queue_id;
unsigned int scqn;
char irq_name[HIFC_IRQ_NAME_MAX];
unsigned short msix_entry_idx;
unsigned int irq_id;
struct tasklet_struct tasklet;
atomic_t flush_state;
void *phba;
unsigned int reserved;
struct task_struct *delay_task;
int task_exit;
unsigned int intrmode;
};
/************************* SRQ depth ***********************************/
struct hifc_srq_ctx_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* DW0 */
unsigned long long last_rq_pmsn : 16;
unsigned long long cur_rqe_msn : 16;
unsigned long long cur_rqe_user_id : 16;
unsigned long long parity : 8;
unsigned long long rsvd0 : 2;
unsigned long long pcie_template : 6;
/* DW1 */
unsigned long long cur_rqe_gpa;
/* DW2 */
unsigned long long cur_sge_v : 1;
unsigned long long cur_sge_l : 1;
unsigned long long int_mode : 2;
unsigned long long ceqn_msix : 11;
unsigned long long cur_sge_remain_len : 17;
unsigned long long cur_sge_id : 4;
unsigned long long consant_sge_len : 17;
unsigned long long cur_wqe : 1;
unsigned long long pmsn_type : 1;
unsigned long long bdsl : 4;
unsigned long long cr : 1;
unsigned long long csl : 2;
unsigned long long cf : 1;
unsigned long long ctrl_sl : 1;
/* DW3 */
unsigned long long cur_sge_gpa;
/* DW4 */
unsigned long long cur_pmsn_gpa;
/* DW5 */
unsigned long long pre_fetch_max_msn : 16;
unsigned long long cqe_max_cnt : 8;
unsigned long long cur_cqe_cnt : 8;
unsigned long long arm_q : 1;
unsigned long long rsvd1 : 7;
unsigned long long cq_so_ro : 2;
unsigned long long cqe_dma_attr_idx : 6;
unsigned long long rq_so_ro : 2;
unsigned long long rqe_dma_attr_idx : 6;
unsigned long long rsvd2 : 1;
unsigned long long loop_o : 1;
unsigned long long ring : 1;
unsigned long long rsvd3 : 5;
#else
/* DW0 */
unsigned long long pcie_template : 6;
unsigned long long rsvd0 : 2;
unsigned long long parity : 8;
unsigned long long cur_rqe_user_id : 16;
unsigned long long cur_rqe_msn : 16;
unsigned long long last_rq_pmsn : 16;
/* DW1 */
unsigned long long cur_rqe_gpa;
/* DW2 */
unsigned long long ctrl_sl : 1;
unsigned long long cf : 1;
unsigned long long csl : 2;
unsigned long long cr : 1;
unsigned long long bdsl : 4;
unsigned long long pmsn_type : 1;
unsigned long long cur_wqe : 1;
unsigned long long consant_sge_len : 17;
unsigned long long cur_sge_id : 4;
unsigned long long cur_sge_remain_len : 17;
unsigned long long ceqn_msix : 11;
unsigned long long int_mode : 2;
unsigned long long cur_sge_l : 1;
unsigned long long cur_sge_v : 1;
/* DW3 */
unsigned long long cur_sge_gpa;
/* DW4 */
unsigned long long cur_pmsn_gpa;
/* DW5 */
unsigned long long rsvd3 : 5;
unsigned long long ring : 1;
unsigned long long loop_o : 1;
unsigned long long rsvd2 : 1;
unsigned long long rqe_dma_attr_idx : 6;
unsigned long long rq_so_ro : 2;
unsigned long long cqe_dma_attr_idx : 6;
unsigned long long cq_so_ro : 2;
unsigned long long rsvd1 : 7;
unsigned long long arm_q : 1;
unsigned long long cur_cqe_cnt : 8;
unsigned long long cqe_max_cnt : 8;
unsigned long long pre_fetch_max_msn : 16;
#endif
/* DW6~DW7 */
unsigned long long rsvd4;
unsigned long long rsvd5;
};
struct hifc_srq_buff_entry_s {
unsigned short buff_id;
void *buff_addr;
dma_addr_t buff_dma;
};
enum hifc_clean_state_e {
HIFC_CLEAN_DONE,
HIFC_CLEAN_DOING,
HIFC_CLEAN_BUTT
};
enum hifc_srq_type_e {
HIFC_SRQ_ELS = 1,
HIFC_SRQ_BUTT
};
struct hifc_srq_info_s {
enum hifc_srq_type_e srq_type;
struct cqm_queue_s *cqm_srq_info;
/* Wqe number per buf, dont't inlcude link wqe */
unsigned int wqe_num_per_buf;
unsigned int wqe_size;
/* valid wqe number, dont't include link wqe */
unsigned int valid_wqe_num;
unsigned short pi;
unsigned short pi_owner;
unsigned short pmsn;
unsigned short ci;
unsigned short cmsn;
unsigned int srqn;
dma_addr_t first_rqe_rcv_dma;
struct hifc_srq_buff_entry_s *els_buff_entry_head;
struct buf_describe_s buff_list;
spinlock_t srq_spin_lock;
int spin_lock_init;
int enable;
enum hifc_clean_state_e state;
struct delayed_work del_work;
unsigned int del_retry_time;
void *phba;
};
/*
* The doorbell record keeps PI of WQE, which will be produced next time.
* The PI is 15 bits width o-bit
*/
struct hifc_db_record {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u64 rsvd0 : 32;
unsigned long long dump_pmsn : 16;
unsigned long long pmsn : 16;
#else
unsigned long long pmsn : 16;
unsigned long long dump_pmsn : 16;
u64 rsvd0 : 32;
#endif
};
/*
* The ci record keeps CI of WQE, which will be consumed next time.
* The ci is 15 bits width with 1 o-bit
*/
struct hifc_ci_record_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u64 rsvd0 : 32;
unsigned long long dump_cmsn : 16;
unsigned long long cmsn : 16;
#else
unsigned long long cmsn : 16;
unsigned long long dump_cmsn : 16;
u64 rsvd0 : 32;
#endif
};
/* The accumulate data in WQ header */
struct hifc_accumulate {
u64 data_2_uc;
u64 data_2_drv;
};
/* The WQ header structure */
struct hifc_wq_header_s {
struct hifc_db_record db_record;
struct hifc_ci_record_s ci_record;
struct hifc_accumulate soft_data;
};
/* Link list Sq WqePage Pool */
/* queue header struct */
struct hifc_queue_header_s {
unsigned long long doorbell_record;
unsigned long long ci_record;
unsigned long long ulrsv1;
unsigned long long ulrsv2;
};
/* WPG-WQEPAGE, LLSQ-LINKED LIST SQ */
struct hifc_sq_wqe_page_s {
struct list_head entry_wpg;
/* Wqe Page virtual addr */
void *wpg_addr;
/* Wqe Page physical addr */
unsigned long long wpg_phy_addr;
};
struct hifc_sq_wqe_page_pool_s {
unsigned int wpg_cnt;
unsigned int wpg_size;
unsigned int wqe_per_wpg;
/* PCI DMA Pool */
struct dma_pool *wpg_dma_pool;
struct hifc_sq_wqe_page_s *wpg_pool_addr;
struct list_head list_free_wpg_pool;
spinlock_t wpg_pool_lock;
atomic_t wpg_in_use;
};
#define HIFC_SQ_DEL_STAGE_TIMEOUT_MS (3 * 1000)
#define HIFC_SRQ_DEL_STAGE_TIMEOUT_MS (10 * 1000)
#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS (10)
#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT (3)
#define HIFC_SRQ_PROCESS_DELAY_MS (20)
/* PLOGI parameters */
struct hifc_plogi_coparams_s {
unsigned int seq_cnt : 1;
unsigned int ed_tov : 1;
unsigned int reserved : 14;
unsigned int tx_mfs : 16;
unsigned int ed_tov_timer_val;
};
struct hifc_delay_sqe_ctrl_info_s {
int valid;
unsigned int rport_index;
unsigned int time_out;
unsigned long long start_jiff;
unsigned int sid;
unsigned int did;
struct hifc_root_sqe_s sqe;
};
struct hifc_destroy_ctrl_info_s {
int valid;
unsigned int rport_index;
unsigned int time_out;
unsigned long long start_jiff;
struct unf_rport_info_s rport_info;
};
/* PARENT SQ Info */
struct hifc_parent_sq_info_s {
void *phba;
spinlock_t parent_sq_enqueue_lock;
atomic_t wqe_page_cnt;
unsigned int rport_index;
unsigned int context_id;
/* Fixed value,used for Doorbell */
unsigned int sq_queue_id;
/* When a session is offloaded, tile will return the CacheId to the
* driver,which is used for Doorbell
*/
unsigned int cache_id;
/* service type, fc */
unsigned int service_type;
/* OQID */
unsigned short oqid_rd;
unsigned short oqid_wr;
unsigned int max_sqe_num; /* SQ depth */
unsigned int wqe_num_per_buf;
unsigned int wqe_size;
unsigned int wqe_offset;
unsigned short head_start_cmsn;
unsigned short head_end_cmsn;
unsigned short last_pmsn;
unsigned short last_pi_owner;
unsigned int local_port_id;
unsigned int remote_port_id;
int port_in_flush;
int sq_in_sess_rst;
atomic_t sq_valid;
void *queue_header_original;
struct hifc_queue_header_s *queue_header;
dma_addr_t queue_hdr_phy_addr_original;
dma_addr_t queue_hdr_phy_addr;
/* Linked List SQ */
struct list_head list_linked_list_sq;
unsigned char vport_id;
struct delayed_work del_work;
struct delayed_work flush_done_tmo_work;
unsigned long long del_start_jiff;
dma_addr_t srq_ctx_addr;
atomic_t sq_cashed;
atomic_t fush_done_wait_cnt;
struct hifc_plogi_coparams_s plogi_coparams;
/* dif control info for immi */
struct unf_dif_control_info_s sirt_dif_control;
atomic_t sq_dbl_cnt;
atomic_t sq_wqe_cnt;
atomic_t sq_cqe_cnt;
atomic_t sqe_minus_cqe_cnt;
struct hifc_delay_sqe_ctrl_info_s delay_sqe;
struct hifc_destroy_ctrl_info_s destroy_sqe;
atomic_t io_stat[HIFC_MAX_SQ_TASK_TYPE_CNT];
};
/* parent context doorbell */
struct hifc_parent_sq_db_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 service_type : 5;
u32 cos : 3;
u32 c : 1;
u32 arm : 1;
u32 cntx_size : 2;
u32 vport : 7;
u32 xid : 13;
#else
u32 xid : 13;
u32 vport : 7;
u32 cntx_size : 2;
u32 arm : 1;
u32 c : 1;
u32 cos : 3;
u32 service_type : 5;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 qid : 4;
u32 sm_data : 20;
u32 pi_hi : 8;
#else
u32 pi_hi : 8;
u32 sm_data : 20;
u32 qid : 4;
#endif
} wd1;
};
struct hifc_parent_cmd_scq_info_s {
unsigned int cqm_queue_id;
unsigned int local_queue_id;
};
struct hifc_parent_st_scq_info_s {
unsigned int cqm_queue_id;
unsigned int local_queue_id;
};
struct hifc_parent_els_srq_info_s {
unsigned int cqm_queue_id;
unsigned int local_queue_id;
};
enum hifc_parent_queue_state_e {
HIFC_QUEUE_STATE_INITIALIZED = 0,
HIFC_QUEUE_STATE_OFFLOADING = 1,
HIFC_QUEUE_STATE_OFFLOADED = 2,
HIFC_QUEUE_STATE_DESTROYING = 3,
HIFC_QUEUE_STATE_FREE = 4,
HIFC_QUEUE_STATE_BUTT
};
struct hifc_parent_ctx_s {
dma_addr_t parent_ctx;
/* Allocated by driver, Driver filled it when a session offload */
void *virt_parent_ctx;
/* Allocated by CQM,used by Hardware */
struct cqm_qpc_mpt_s *cqm_parent_ctx_obj;
};
struct hifc_parent_queue_info_s {
spinlock_t parent_queue_state_lock;
struct hifc_parent_ctx_s parent_ctx;
enum hifc_parent_queue_state_e offload_state;
struct hifc_parent_sq_info_s parent_sq_info;
/* Cmd Scq info which is assocaiated with parent queue */
struct hifc_parent_cmd_scq_info_s parent_cmd_scq_info;
/* Sts Scq info which is assocaiated with parent queue */
struct hifc_parent_st_scq_info_s parent_sts_scq_info;
/* ELS Srq info which is assocaiated with parent queue */
unsigned char queue_vport_id;
struct hifc_parent_els_srq_info_s parent_els_srq_info;
unsigned char queue_data_cos;
};
struct hifc_parent_queue_mgr_s {
struct hifc_parent_queue_info_s parent_queues[UNF_HIFC_MAXRPORT_NUM];
struct buf_describe_s parent_sq_buf_list;
};
struct hifc_get_global_base_qpn_s {
/* for new version interface */
unsigned char status;
unsigned char version;
unsigned char rsvd0[6];
unsigned short func_id;
unsigned short base_qpn;
};
#define HIFC_SRQC_BUS_ROW 8
#define HIFC_SRQC_BUS_COL 19
#define HIFC_SQC_BUS_ROW 8
#define HIFC_SQC_BUS_COL 13
#define HIFC_HW_SCQC_BUS_ROW 6
#define HIFC_HW_SCQC_BUS_COL 10
#define HIFC_HW_SRQC_BUS_ROW 4
#define HIFC_HW_SRQC_BUS_COL 15
#define HIFC_SCQC_BUS_ROW 3
#define HIFC_SCQC_BUS_COL 29
#define HIFC_QUEUE_INFO_BUS_NUM 4
struct hifc_queue_info_bus_s {
unsigned long long bus[HIFC_QUEUE_INFO_BUS_NUM];
};
unsigned int hifc_free_parent_resource(void *v_hba,
struct unf_rport_info_s *v_rport_info);
unsigned int hifc_alloc_parent_resource(void *v_hba,
struct unf_rport_info_s *v_rport_info);
unsigned int hifc_create_root_queues(void *v_hba);
void hifc_destroy_root_queues(void *v_hba);
unsigned int hifc_alloc_parent_queue_mgr(void *v_hba);
void hifc_free_parent_queue_mgr(void *v_hba);
unsigned int hifc_create_common_share_queues(void *v_hba);
void hifc_destroy_common_share_queues(void *v_hba);
unsigned int hifc_alloc_parent_sq_wqe_page_pool(void *v_hba);
void hifc_free_parent_sq_wqe_page_pool(void *v_hba);
struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_pkg(
void *v_hba,
struct unf_frame_pkg_s *v_pkg);
struct hifc_parent_sq_info_s *hifc_find_parent_sq_by_pkg(
void *v_hba, struct unf_frame_pkg_s *v_pkg);
struct hifc_parent_ctx_s *hifc_get_parnt_ctx_virt_addr_by_pkg(
void *v_hba,
struct unf_frame_pkg_s *v_pkg);
unsigned int hifc_get_parent_ctx_xid_by_pkg(void *v_hba,
struct unf_frame_pkg_s *v_pkg);
unsigned int hifc_root_sq_enqueue(void *v_hba,
struct hifc_root_sqe_s *v_sqe);
void hifc_process_root_rqe(unsigned long v_rq_info);
unsigned int hifc_root_cmdq_enqueue(void *v_hba,
union hifc_cmdqe_u *v_cmd_qe,
unsigned short v_cmd_len);
void hifc_process_scq_cqe(unsigned long scq_info);
unsigned int hifc_process_scq_cqe_entity(unsigned long v_scq_info,
unsigned int proc_cnt);
void hifc_post_els_srq_wqe(struct hifc_srq_info_s *v_srq_info,
unsigned short buf_id);
void hifc_process_aeqe(void *v_srv_handle, unsigned char evt_type, u64 evt_val);
unsigned int hifc_parent_sq_enqueue(struct hifc_parent_sq_info_s *v_sq,
struct hifcoe_sqe_s *v_sqe);
void hifc_free_sq_wqe_page(struct hifc_parent_sq_info_s *v_sq,
unsigned int cur_cmsn);
unsigned int hifc_reclaim_sq_wqe_page(void *v_hba, union hifcoe_scqe_u *v_scqe);
void hifc_set_root_sq_flush_state(void *v_hba, int in_flush);
void hifc_set_rport_flush_state(void *v_hba, int in_flush);
unsigned int hifc_clear_fetched_sq_wqe(void *v_hba);
unsigned int hifc_clear_pending_sq_wqe(void *v_hba);
void hifc_free_parent_queues(void *v_hba);
void hifc_enable_queues_dispatch(void *v_hba);
void hifc_queue_pre_process(void *v_hba, int v_clean);
void hifc_free_parent_queue_info(
void *v_hba,
struct hifc_parent_queue_info_s *v_parent_queue_info);
unsigned int hifc_send_session_rst_cmd(
void *v_hba,
struct hifc_parent_queue_info_s *v_parent_queue_info,
unsigned int v_mode);
void hifc_build_session_rst_wqe(void *v_hba,
struct hifc_parent_sq_info_s *v_sq,
struct hifcoe_sqe_s *v_sqe,
enum hifc_session_reset_mode_e v_mode,
unsigned int scqn);
unsigned int hifc_rport_session_rst(void *v_hba,
struct unf_rport_info_s *v_rport_info);
unsigned int hifc_get_rport_maped_cmd_scqn(void *v_hba,
unsigned int rport_index);
unsigned int hifc_get_rport_maped_sts_scqn(void *v_hba,
unsigned int rport_index);
void hifc_destroy_srq(void *v_hba);
unsigned int hifc_push_delay_sqe(
void *v_hba,
struct hifc_parent_queue_info_s *v_offload_parent_queue,
struct hifc_root_sqe_s *v_sqe,
struct unf_frame_pkg_s *v_pkg);
void hifc_push_destroy_parent_queue_sqe(
void *v_hba,
struct hifc_parent_queue_info_s *v_offload_parent_queue,
struct unf_rport_info_s *v_rport_info);
void hifc_pop_destroy_parent_queue_sqe(
void *v_hba,
struct hifc_destroy_ctrl_info_s *v_destroy_sqe_info);
struct hifc_parent_queue_info_s *hifc_find_offload_parent_queue(
void *v_hba,
unsigned int v_local_id,
unsigned int v_remote_id,
unsigned int v_rport_index);
unsigned int hifc_flush_ini_resp_queue(void *v_hba);
void hifc_rcvd_els_from_srq_time_out(struct work_struct *work);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_event.h"
#include "unf_lport.h"
#include "unf_rport.h"
#include "unf_exchg.h"
#include "unf_service.h"
#include "unf_portman.h"
#include "unf_npiv.h"
static void unf_lport_timeout(struct work_struct *work);
void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport,
enum unf_lport_dirty_flag_e v_etype)
{
UNF_CHECK_VALID(0x1801, UNF_TRUE, v_lport, return);
v_lport->dirty_flag |= v_etype;
}
unsigned int unf_init_lport_route(struct unf_lport_s *v_lport)
{
int ret = 0;
UNF_CHECK_VALID(0x1802, UNF_TRUE,
v_lport, return UNF_RETURN_ERROR);
/* Init L_Port route work */
INIT_DELAYED_WORK(&v_lport->route_timer_work, unf_lport_route_work);
/* Delay route work */
ret = queue_delayed_work(
unf_work_queue,
&v_lport->route_timer_work,
(unsigned long)msecs_to_jiffies(UNF_LPORT_POLL_TIMER));
if (unlikely(ret == UNF_FALSE)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x) schedule route work failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
return unf_lport_refinc(v_lport);
}
void unf_destroy_lport_route(struct unf_lport_s *v_lport)
{
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x1803, UNF_TRUE, v_lport, return);
/* Cancel (route timer) delay work */
UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id,
&v_lport->route_timer_work,
"Route Timer work");
if (ret == RETURN_OK) {
/* Corresponding to ADD operation */
unf_lport_ref_dec(v_lport);
}
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE;
}
static void unf_lport_config(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x1816, UNF_TRUE, v_lport, return);
INIT_DELAYED_WORK(&v_lport->retry_work, unf_lport_timeout);
v_lport->max_retry_count = UNF_MAX_RETRY_COUNT; /* 3 */
v_lport->retries = 0;
}
void unf_init_portparms(struct unf_lport_s *v_lport)
{
INIT_LIST_HEAD(&v_lport->list_vports_head);
INIT_LIST_HEAD(&v_lport->list_intergrad_vports);
INIT_LIST_HEAD(&v_lport->list_destroy_vports);
INIT_LIST_HEAD(&v_lport->entry_lport);
spin_lock_init(&v_lport->lport_state_lock);
v_lport->max_frame_size = max_frame_size;
v_lport->ed_tov = UNF_DEFAULT_EDTOV;
v_lport->ra_tov = UNF_DEFAULT_RATOV;
v_lport->rr_tov = UNF_DEFAULT_RRTOV;
v_lport->fabric_node_name = 0;
v_lport->b_priority = UNF_PRIORITY_DISABLE;
v_lport->b_port_dir_exchange = UNF_FALSE;
/* Delay (retry) work init */
unf_lport_config(v_lport);
unf_set_lport_state(v_lport, UNF_LPORT_ST_ONLINE); /* online */
v_lport->link_up = UNF_PORT_LINK_DOWN;
v_lport->b_port_removing = UNF_FALSE;
v_lport->lport_free_completion = NULL;
v_lport->last_tx_fault_jif = 0;
v_lport->enhanced_features = 0;
v_lport->destroy_step = INVALID_VALUE32;
v_lport->dirty_flag = 0;
v_lport->b_switch_state = UNF_FALSE;
v_lport->b_bbscn_support = UNF_FALSE;
v_lport->en_start_work_state = UNF_START_WORK_STOP;
v_lport->sfp_power_fault_count = 0;
v_lport->sfp_9545_fault_count = 0;
atomic_set(&v_lport->port_no_operater_flag, UNF_LPORT_NORMAL);
atomic_set(&v_lport->lport_ref_cnt, 0);
atomic_set(&v_lport->scsi_session_add_success, 0);
atomic_set(&v_lport->scsi_session_add_failed, 0);
atomic_set(&v_lport->scsi_session_del_success, 0);
atomic_set(&v_lport->scsi_session_del_failed, 0);
atomic_set(&v_lport->add_start_work_failed, 0);
atomic_set(&v_lport->add_closing_work_failed, 0);
atomic_set(&v_lport->alloc_scsi_id, 0);
atomic_set(&v_lport->resume_scsi_id, 0);
atomic_set(&v_lport->reuse_scsi_id, 0);
atomic_set(&v_lport->device_alloc, 0);
atomic_set(&v_lport->device_destroy, 0);
atomic_set(&v_lport->session_loss_tmo, 0);
atomic64_set(&v_lport->exchg_index, 1);
atomic_inc(&v_lport->lport_ref_cnt);
atomic_set(&v_lport->err_code_obtain_freq, 0);
memset(&v_lport->link_service_info, 0,
sizeof(struct unf_link_service_collect_s));
memset(&v_lport->err_code_sum, 0, sizeof(struct unf_err_code_s));
}
void unf_reset_lport_params(struct unf_lport_s *v_lport)
{
struct unf_lport_s *lport = v_lport;
UNF_CHECK_VALID(0x1804, UNF_TRUE, v_lport, return);
lport->link_up = UNF_PORT_LINK_DOWN;
lport->nport_id = 0; /* Need do FLOGI again to clear N_Port_ID */
lport->max_frame_size = max_frame_size;
lport->ed_tov = UNF_DEFAULT_EDTOV;
lport->ra_tov = UNF_DEFAULT_RATOV;
lport->rr_tov = UNF_DEFAULT_RRTOV;
lport->fabric_node_name = 0;
}
static enum unf_lport_login_state_e unf_lport_stat_online(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_LINK_UP:
/* EVENT_LINK_UP --->>> ST_LINK_UP */
next_state = UNF_LPORT_ST_LINK_UP;
break;
case UNF_EVENT_LPORT_NORMAL_ENTER:
/* EVENT_NORMAL_ENTER --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_stat_initial(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_LINK_UP:
/* EVENT_LINK_UP --->>> ST_LINK_UP */
next_state = UNF_LPORT_ST_LINK_UP;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_stat_linkup(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_NORMAL_ENTER:
/* EVENT_NORMAL_ENTER --->>> FLOGI_WAIT */
next_state = UNF_LPORT_ST_FLOGI_WAIT;
break;
case UNF_EVENT_LPORT_READY:
/* EVENT_READY --->>> ST_READY */
next_state = UNF_LPORT_ST_READY;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_stat_flogi_wait(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_REMOTE_ACC:
/* EVENT_REMOTE_ACC --->>> ST_PLOGI_WAIT */
next_state = UNF_LPORT_ST_PLOGI_WAIT;
break;
case UNF_EVENT_LPORT_READY:
/* EVENT_READY --->>> ST_READY */
next_state = UNF_LPORT_ST_READY;
break;
case UNF_EVENT_LPORT_REMOTE_TIMEOUT:
/* EVENT_REMOTE_TIMEOUT --->>> ST_LOGO */
next_state = UNF_LPORT_ST_LOGO;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_stat_plogi_wait(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_REMOTE_ACC:
/* EVENT_REMOTE_ACC --->>> ST_RFT_ID_WAIT */
next_state = UNF_LPORT_ST_RFT_ID_WAIT;
break;
case UNF_EVENT_LPORT_REMOTE_TIMEOUT:
/* EVENT_TIMEOUT --->>> ST_LOGO */
next_state = UNF_LPORT_ST_LOGO;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_stat_rftid_wait(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_REMOTE_ACC:
/* EVENT_REMOTE_ACC --->>> ST_RFF_ID_WAIT */
next_state = UNF_LPORT_ST_RFF_ID_WAIT;
break;
case UNF_EVENT_LPORT_REMOTE_TIMEOUT:
/* EVENT_TIMEOUT --->>> ST_LOGO */
next_state = UNF_LPORT_ST_LOGO;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_stat_rffid_wait(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_REMOTE_ACC:
/* EVENT_REMOTE_ACC --->>> ST_SCR_WAIT */
next_state = UNF_LPORT_ST_SCR_WAIT;
break;
case UNF_EVENT_LPORT_REMOTE_TIMEOUT:
/* EVENT_TIMEOUT --->>> ST_LOGO */
next_state = UNF_LPORT_ST_LOGO;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_state_scr_wait(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_REMOTE_ACC:
/* EVENT_REMOTE_ACC --->>> ST_READY */
next_state = UNF_LPORT_ST_READY;
break;
case UNF_EVENT_LPORT_REMOTE_TIMEOUT:
/* EVENT_TIMEOUT --->>> ST_LOGO */
next_state = UNF_LPORT_ST_LOGO;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_state_logo(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_NORMAL_ENTER:
/* EVENT_NORMAL_ENTER --->>> ST_OFFLINE */
next_state = UNF_LPORT_ST_OFFLINE;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_state_offline(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_ONLINE:
/* EVENT_ONLINE --->>> ST_ONLINE */
next_state = UNF_LPORT_ST_ONLINE;
break;
case UNF_EVENT_LPORT_RESET:
/* EVENT_RESET --->>> ST_RESET */
next_state = UNF_LPORT_ST_RESET;
break;
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_state_reset(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_NORMAL_ENTER:
/* EVENT_NORMAL_ENTER --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
static enum unf_lport_login_state_e unf_lport_state_ready(
enum unf_lport_login_state_e old_state,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
switch (event) {
case UNF_EVENT_LPORT_LINK_DOWN:
/* EVENT_LINK_DOWN --->>> ST_INITIAL */
next_state = UNF_LPORT_ST_INITIAL;
break;
case UNF_EVENT_LPORT_RESET:
/* EVENT_RESET --->>> ST_RESET */
next_state = UNF_LPORT_ST_RESET;
break;
case UNF_EVENT_LPORT_OFFLINE:
/* EVENT_OFFLINE --->>> ST_LOGO */
next_state = UNF_LPORT_ST_LOGO;
break;
default:
next_state = old_state;
break;
}
return next_state;
}
void unf_lport_stat_ma(struct unf_lport_s *v_lport,
enum unf_lport_event_e event)
{
enum unf_lport_login_state_e old_state = UNF_LPORT_ST_ONLINE;
enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE;
UNF_CHECK_VALID(0x1805, UNF_TRUE, v_lport, return);
old_state = v_lport->en_states;
switch (v_lport->en_states) {
case UNF_LPORT_ST_ONLINE:
next_state = unf_lport_stat_online(old_state, event);
break;
case UNF_LPORT_ST_INITIAL:
next_state = unf_lport_stat_initial(old_state, event);
break;
case UNF_LPORT_ST_LINK_UP:
next_state = unf_lport_stat_linkup(old_state, event);
break;
case UNF_LPORT_ST_FLOGI_WAIT:
next_state = unf_lport_stat_flogi_wait(old_state, event);
break;
case UNF_LPORT_ST_PLOGI_WAIT:
next_state = unf_lport_stat_plogi_wait(old_state, event);
break;
case UNF_LPORT_ST_RFT_ID_WAIT:
next_state = unf_lport_stat_rftid_wait(old_state, event);
break;
case UNF_LPORT_ST_RFF_ID_WAIT:
next_state = unf_lport_stat_rffid_wait(old_state, event);
break;
case UNF_LPORT_ST_SCR_WAIT:
next_state = unf_lport_state_scr_wait(old_state, event);
break;
case UNF_LPORT_ST_LOGO:
next_state = unf_lport_state_logo(old_state, event);
break;
case UNF_LPORT_ST_OFFLINE:
next_state = unf_lport_state_offline(old_state, event);
break;
case UNF_LPORT_ST_RESET:
next_state = unf_lport_state_reset(old_state, event);
break;
case UNF_LPORT_ST_READY:
next_state = unf_lport_state_ready(old_state, event);
break;
default:
next_state = old_state;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) hold state(0x%x)",
v_lport->port_id, v_lport->en_states);
break;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%x) with old state(0x%x) event(0x%x) next state(0x%x)",
v_lport->port_id, old_state, event, next_state);
unf_set_lport_state(v_lport, next_state);
}
unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x1806, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
v_lport->lport_mgr_temp.pfn_unf_vport_get_free_and_init = NULL;
v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index =
unf_lookup_vport_by_vp_index;
v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_port_id =
unf_lookup_vport_by_port_id;
v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did =
unf_lookup_vport_by_did;
v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn =
unf_lookup_vport_by_wwpn;
v_lport->lport_mgr_temp.pfn_unf_vport_remove = unf_vport_remove;
return RETURN_OK;
}
void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x1807, UNF_TRUE, v_lport, return);
memset(&v_lport->lport_mgr_temp, 0,
sizeof(struct unf_cm_lport_template_s));
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP;
}
unsigned int unf_lport_retry_flogi(struct unf_lport_s *v_lport)
{
struct unf_rport_s *rport = NULL;
unsigned int ret = UNF_RETURN_ERROR;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1808, UNF_TRUE,
v_lport, return UNF_RETURN_ERROR);
/* Get (new) R_Port */
rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI);
rport = unf_get_safe_rport(v_lport, rport,
UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI);
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate RPort failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
/* Check L_Port state */
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
if (v_lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) no need to retry FLOGI with state(0x%x)",
v_lport->port_id, v_lport->en_states);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
return RETURN_OK;
}
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
spin_lock_irqsave(&rport->rport_state_lock, flag);
rport->nport_id = UNF_FC_FID_FLOGI;
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
/* Send FLOGI or FDISC */
if (v_lport != v_lport->root_lport) {
ret = unf_send_fdisc(v_lport, rport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]LOGIN: Port(0x%x) send FDISC failed",
v_lport->port_id);
/* Do L_Port recovery */
unf_lport_error_recovery(v_lport);
}
} else {
ret = unf_send_flogi(v_lport, rport);
if (ret != RETURN_OK) {
UNF_TRACE(
UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]LOGIN: Port(0x%x) send FLOGI failed\n",
v_lport->port_id);
/* Do L_Port recovery */
unf_lport_error_recovery(v_lport);
}
}
return ret;
}
unsigned int unf_lport_name_server_register(
struct unf_lport_s *v_lport,
enum unf_lport_login_state_e states)
{
struct unf_rport_s *rport = NULL;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x1809, UNF_TRUE,
v_lport, return UNF_RETURN_ERROR);
/* Get (safe) R_Port 0xfffffc */
rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV);
rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_ONLY,
UNF_FC_FID_DIR_SERV);
if (!rport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate RPort failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
/* Update R_Port & L_Port state */
spin_lock_irqsave(&rport->rport_state_lock, flag);
rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
switch (states) {
/* RFT_ID */
case UNF_LPORT_ST_RFT_ID_WAIT:
ret = unf_send_rft_id(v_lport, rport);
break;
/* RFF_ID */
case UNF_LPORT_ST_RFF_ID_WAIT:
ret = unf_send_rff_id(v_lport, rport);
break;
/* SCR */
case UNF_LPORT_ST_SCR_WAIT:
ret = unf_send_scr(v_lport, NULL);
break;
/* PLOGI */
case UNF_LPORT_ST_PLOGI_WAIT:
default:
spin_lock_irqsave(&rport->rport_state_lock, flag);
unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
ret = unf_send_plogi(v_lport, rport);
break;
}
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]LOGIN: Port(0x%x) register fabric(0xfffffc) failed",
v_lport->nport_id);
/* Do L_Port recovery */
unf_lport_error_recovery(v_lport);
}
return ret;
}
unsigned int unf_lport_enter_sns_logo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
struct unf_rport_s *rport = NULL;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x1810, UNF_TRUE,
v_lport, return UNF_RETURN_ERROR);
if (!v_rport) {
rport = unf_get_rport_by_nport_id(v_lport,
UNF_FC_FID_DIR_SERV);
} else {
rport = v_rport;
}
if (!rport) {
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
return RETURN_OK;
}
/* Update L_Port & R_Port state */
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
spin_lock_irqsave(&rport->rport_state_lock, flag);
unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
/* Do R_Port LOGO state */
unf_rport_enter_logo(v_lport, rport);
return ret;
}
void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport)
{
/* Fabric or Public Loop Mode: Login with Name server */
struct unf_lport_s *lport = v_lport;
struct unf_rport_s *rport = NULL;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x1811, UNF_TRUE, v_lport, return);
/* Get (safe) R_Port 0xfffffc */
rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV);
if (rport) {
/* for port swap: Delete old R_Port if necessary */
if (rport->local_nport_id != v_lport->nport_id) {
unf_rport_immediate_linkdown(v_lport, rport);
rport = NULL;
}
}
rport = unf_get_safe_rport(v_lport, rport,
UNF_RPORT_REUSE_ONLY, UNF_FC_FID_DIR_SERV);
if (!rport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate RPort failed",
v_lport->port_id);
unf_lport_error_recovery(lport);
return;
}
spin_lock_irqsave(&rport->rport_state_lock, flag);
rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
/* Send PLOGI to Fabric(0xfffffc) */
ret = unf_send_plogi(lport, rport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]LOGIN: Port(0x%x) send PLOGI to name server failed",
v_lport->port_id);
unf_lport_error_recovery(lport);
}
}
int unf_get_port_params(void *v_argin, void *v_argout)
{
struct unf_lport_s *lport = (struct unf_lport_s *)v_argin;
struct unf_low_level_port_mgr_op_s *port_mg = NULL;
struct unf_port_params_s port_params = { 0 };
int ret = RETURN_OK;
UNF_REFERNCE_VAR(v_argout);
UNF_CHECK_VALID(0x1812, UNF_TRUE,
v_argin, return UNF_RETURN_ERROR);
port_mg = &lport->low_level_func.port_mgr_op;
if (!port_mg->pfn_ll_port_config_get) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN,
"[warn]Port(0x%x) low level port_config_get function is NULL",
lport->port_id);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO,
"[warn]Port(0x%x) get parameters with default:R_A_TOV(%d) E_D_TOV(%d)",
lport->port_id, UNF_DEFAULT_FABRIC_RATOV, UNF_DEFAULT_EDTOV);
port_params.ra_tov = UNF_DEFAULT_FABRIC_RATOV;
port_params.ed_tov = UNF_DEFAULT_EDTOV;
/* Update parameters with Fabric mode */
if ((lport->en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP) ||
(lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) {
lport->ra_tov = port_params.ra_tov;
lport->ed_tov = port_params.ed_tov;
}
return ret;
}
unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport)
{
struct unf_rport_s *rport = NULL;
struct unf_cm_event_report *event = NULL;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int nport_id = 0;
UNF_CHECK_VALID(0x1813, UNF_TRUE,
v_lport, return UNF_RETURN_ERROR);
/* Get (safe) R_Port */
nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */
rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI);
rport = unf_get_safe_rport(v_lport, rport,
UNF_RPORT_REUSE_ONLY, nport_id);
if (!rport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate RPort failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
/* Updtae L_Port state */
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
/* LPort: LINK UP --> FLOGI WAIT */
unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
/* Update R_Port N_Port_ID */
spin_lock_irqsave(&rport->rport_state_lock, flag);
rport->nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
event = unf_get_one_event_node(v_lport);
if (event) {
event->lport = v_lport;
event->event_asy_flag = UNF_EVENT_ASYN;
/* NULL for timer */
event->pfn_unf_event_task = unf_get_port_params;
event->para_in = (void *)v_lport;
unf_post_one_event_node(v_lport, event);
}
if (v_lport != v_lport->root_lport) {
/* for NPIV */
ret = unf_send_fdisc(v_lport, rport);
if (ret != RETURN_OK)
/* Do L_Port recovery */
unf_lport_error_recovery(v_lport);
} else {
/* for Physical Port */
ret = unf_send_flogi(v_lport, rport);
if (ret != RETURN_OK)
/* Do L_Port recovery */
unf_lport_error_recovery(v_lport);
}
return ret;
}
void unf_set_lport_state(struct unf_lport_s *v_lport,
enum unf_lport_login_state_e states)
{
UNF_CHECK_VALID(0x1814, UNF_TRUE, v_lport, return);
if (states != v_lport->en_states) {
/* Reset L_Port retry count */
v_lport->retries = 0;
}
v_lport->en_states = states;
}
static void unf_lport_timeout(struct work_struct *work)
{
struct unf_lport_s *lport = NULL;
enum unf_lport_login_state_e state = UNF_LPORT_ST_READY;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1815, UNF_TRUE, work, return);
lport = container_of(work, struct unf_lport_s, retry_work.work);
spin_lock_irqsave(&lport->lport_state_lock, flag);
state = lport->en_states;
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) is timeout with state(0x%x)",
lport->port_id, state);
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
switch (state) {
/* FLOGI retry */
case UNF_LPORT_ST_FLOGI_WAIT:
(void)unf_lport_retry_flogi(lport);
break;
case UNF_LPORT_ST_PLOGI_WAIT:
case UNF_LPORT_ST_RFT_ID_WAIT:
case UNF_LPORT_ST_RFF_ID_WAIT:
case UNF_LPORT_ST_SCR_WAIT:
(void)unf_lport_name_server_register(lport, state);
break;
/* Send LOGO External */
case UNF_LPORT_ST_LOGO:
break;
/* Do nothing */
case UNF_LPORT_ST_OFFLINE:
case UNF_LPORT_ST_READY:
case UNF_LPORT_ST_RESET:
case UNF_LPORT_ST_ONLINE:
case UNF_LPORT_ST_INITIAL:
case UNF_LPORT_ST_LINK_UP:
lport->retries = 0;
break;
default:
break;
}
unf_lport_ref_dec_to_destroy(lport);
}
void unf_lport_error_recovery(struct unf_lport_s *v_lport)
{
unsigned long delay = 0;
unsigned long flag = 0;
int ret = 0;
UNF_CHECK_VALID(0x1817, UNF_TRUE, v_lport, return);
if (unlikely(unf_lport_refinc(v_lport) != RETURN_OK)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) is removing and no need process",
v_lport->port_id);
return;
}
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
/* Port State: removing */
if (v_lport->b_port_removing == UNF_TRUE) {
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) is removing and no need process",
v_lport->port_id);
unf_lport_ref_dec_to_destroy(v_lport);
return;
}
/* Port State: offline */
if (v_lport->en_states == UNF_LPORT_ST_OFFLINE) {
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) is offline and no need process",
v_lport->port_id);
unf_lport_ref_dec_to_destroy(v_lport);
return;
}
/* Queue work state check */
if (delayed_work_pending(&v_lport->retry_work)) {
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
unf_lport_ref_dec_to_destroy(v_lport);
return;
}
/* Do retry operation */
if (v_lport->retries < v_lport->max_retry_count) {
v_lport->retries++;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x_0x%x) enter recovery and retry %u times",
v_lport->port_id, v_lport->nport_id,
v_lport->retries);
delay = (unsigned long)v_lport->ed_tov;
ret = queue_delayed_work(unf_work_queue,
&v_lport->retry_work,
(unsigned long)msecs_to_jiffies(
(unsigned int)delay));
if (ret) {
atomic_inc(&v_lport->lport_ref_cnt);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) queue work success and reference count is %d",
v_lport->port_id,
atomic_read(&v_lport->lport_ref_cnt));
}
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
} else {
unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_REMOTE_TIMEOUT);
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) register operation timeout and do LOGO",
v_lport->port_id);
/* Do L_Port LOGO */
(void)unf_lport_enter_sns_logo(v_lport, NULL);
}
unf_lport_ref_dec_to_destroy(v_lport);
}
struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport,
unsigned short v_vp_index)
{
UNF_CHECK_VALID(0x1819, UNF_TRUE, v_lport, return NULL);
if (v_vp_index == 0)
return v_lport;
if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) function do look up vport by index is NULL",
v_lport->port_id);
return NULL;
}
return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index(
v_lport, v_vp_index);
}
struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport,
unsigned int v_did)
{
UNF_CHECK_VALID(0x1821, UNF_TRUE, v_lport, return NULL);
if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) function do look up vport by D_ID is NULL",
v_lport->port_id);
return NULL;
}
return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did(v_lport,
v_did);
}
struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport,
unsigned long long v_wwpn)
{
UNF_CHECK_VALID(0x1822, UNF_TRUE, v_lport, return NULL);
if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) function do look up vport by WWPN is NULL",
v_lport->port_id);
return NULL;
}
return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn(v_lport,
v_wwpn);
}
void unf_cm_vport_remove(struct unf_lport_s *v_vport)
{
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x1823, UNF_TRUE, v_vport, return);
lport = v_vport->root_lport;
UNF_CHECK_VALID(0x1824, UNF_TRUE, lport, return);
if (!lport->lport_mgr_temp.pfn_unf_vport_remove) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) function do vport remove is NULL",
lport->port_id);
return;
}
lport->lport_mgr_temp.pfn_unf_vport_remove(v_vport);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_LPORT_H
#define __UNF_LPORT_H
#include "unf_disc.h"
#include "unf_event.h"
#include "unf_common.h"
#define UNF_PORT_TYPE_FC 0
#define UNF_PORT_TYPE_DISC 1
#define UNF_FW_UPDATE_PATH_LEN_MAX 255
#define UNF_EXCHG_MGR_NUM (4)
#define UNF_MAX_IO_RETURN_VALUE 0x12
#define UNF_MAX_SCSI_CMD 0xFF
enum unf_scsi_error_handle_type {
UNF_SCSI_ABORT_IO_TYPE = 0,
UNF_SCSI_DEVICE_RESET_TYPE,
UNF_SCSI_TARGET_RESET_TYPE,
UNF_SCSI_BUS_RESET_TYPE,
UNF_SCSI_HOST_RESET_TYPE,
UNF_SCSI_VIRTUAL_RESET_TYPE,
UNF_SCSI_ERROR_HANDLE_BUTT
};
enum unf_lport_destroy_step_e {
UNF_LPORT_DESTROY_STEP_0_SET_REMOVING = 0,
UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT,
UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE,
UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER,
UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR,
UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL,
UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR,
UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP,
UNF_LPORT_DESTROY_STEP_8_DESTROY_RPORT_MG_TMP,
UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP,
UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE,
UNF_LPORT_DESTROY_STEP_11_UNREG_TGT_HOST,
UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST,
UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE,
UNF_LPORT_DESTROY_STEP_BUTT
};
enum unf_lport_enhanced_feature_e {
/* Enhance GFF feature connect even if fail to get GFF feature */
UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF = 0x0001,
/* Enhance IO balance */
UNF_LPORT_ENHANCED_FEATURE_IO_TRANSFERLIST = 0x0002,
/* Enhance IO check */
UNF_LPORT_ENHANCED_FEATURE_IO_CHECKPOINT = 0x0004,
/* Close FW ROUTE */
UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE = 0x0008,
/* lowest frequency read SFP information */
UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE = 0x0010,
UNF_LPORT_ENHANCED_FEATURE_BUTT
};
enum unf_lport_login_state_e {
UNF_LPORT_ST_ONLINE = 0x2000, /* uninitialized */
UNF_LPORT_ST_INITIAL, /* initialized and LinkDown */
UNF_LPORT_ST_LINK_UP, /* initialized and Link UP */
UNF_LPORT_ST_FLOGI_WAIT, /* waiting for FLOGI completion */
UNF_LPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */
UNF_LPORT_ST_RNN_ID_WAIT, /* waiting for RNN_ID completion */
UNF_LPORT_ST_RSNN_NN_WAIT, /* waiting for RSNN_NN completion */
UNF_LPORT_ST_RSPN_ID_WAIT, /* waiting for RSPN_ID completion */
UNF_LPORT_ST_RPN_ID_WAIT, /* waiting for RPN_ID completion */
UNF_LPORT_ST_RFT_ID_WAIT, /* waiting for RFT_ID completion */
UNF_LPORT_ST_RFF_ID_WAIT, /* waiting for RFF_ID completion */
UNF_LPORT_ST_SCR_WAIT, /* waiting for SCR completion */
UNF_LPORT_ST_READY, /* ready for use */
UNF_LPORT_ST_LOGO, /* waiting for LOGO completion */
UNF_LPORT_ST_RESET, /* being reset and will restart */
UNF_LPORT_ST_OFFLINE, /* offline */
UNF_LPORT_ST_BUTT
};
enum unf_lport_event_e {
UNF_EVENT_LPORT_NORMAL_ENTER = 0x8000, /* next state enter */
UNF_EVENT_LPORT_ONLINE = 0x8001, /* LPort link up */
UNF_EVENT_LPORT_LINK_UP = 0x8002, /* LPort link up */
UNF_EVENT_LPORT_LINK_DOWN = 0x8003, /* LPort link down */
UNF_EVENT_LPORT_OFFLINE = 0x8004, /* lPort bing stopped */
UNF_EVENT_LPORT_RESET = 0x8005,
UNF_EVENT_LPORT_REMOTE_ACC = 0x8006, /* next state enter */
UNF_EVENT_LPORT_REMOTE_RJT = 0x8007, /* rport reject */
UNF_EVENT_LPORT_REMOTE_TIMEOUT = 0x8008, /* rport time out */
UNF_EVENT_LPORT_READY = 0x8009,
UNF_EVENT_LPORT_REMOTE_BUTT
};
struct unf_cm_disc_mg_template_s {
/* start input:L_Port,return:ok/fail */
unsigned int (*pfn_unf_disc_start)(void *v_lport);
/* stop input: L_Port,return:ok/fail */
unsigned int (*pfn_unf_disc_stop)(void *v_lport);
/* Callback after disc complete[with event:ok/fail]. */
void (*pfn_unf_disc_callback)(void *v_lport, unsigned int v_result);
};
struct unf_chip_manage_info_s {
struct list_head list_chip_thread_entry;
struct list_head list_head;
spinlock_t chip_event_list_lock;
struct task_struct *data_thread;
unsigned int list_num;
unsigned int slot_id;
unsigned char chip_id;
unsigned char rsv;
unsigned char sfp_9545_fault; /* 9545 fault */
unsigned char sfp_power_fault; /* SFP power fault */
atomic_t ref_cnt;
unsigned int b_thread_exit;
struct unf_chip_info_s chip_info;
atomic_t card_loop_test_flag;
spinlock_t card_loop_back_state_lock;
char update_path[UNF_FW_UPDATE_PATH_LEN_MAX];
};
enum unf_timer_type_e {
UNF_TIMER_TYPE_INI_IO,
UNF_TIMER_TYPE_REQ_IO,
UNF_TIMER_TYPE_INI_RRQ,
UNF_TIMER_TYPE_SFS,
UNF_TIMER_TYPE_INI_ABTS
};
struct unf_cm_xchg_mgr_template_s {
/* Get new Xchg */
/* input:L_Port,ini/tgt type,return:initialized Xchg */
void *(*pfn_unf_xchg_get_free_and_init)(void *, unsigned int,
unsigned short);
/* OXID,SID lookup Xchg */
/* input: L_Port,OXID,SID,return:Xchg */
void *(*pfn_unf_look_up_xchg_by_id)(void *, unsigned short,
unsigned int);
/* input:L_Port,tag,return:Xchg */
void *(*pfn_unf_look_up_xchg_by_tag)(void *, unsigned short);
/* free Xchg */
/* input:L_Port,Xchg,return:void */
void (*pfn_unf_xchg_release)(void *, void *);
/* Abort IO Xchg by SID/DID */
/* input:L_Port,SID,DID,return:void */
void (*pfn_unf_xchg_mgr_io_xchg_abort)(void *, void *, unsigned int,
unsigned int, unsigned int);
/* Abort SFS Xchg by SID/DID */
/* input:L_Port,SID,DID,return:void */
void (*pfn_unf_xchg_mgr_sfs_xchg_abort)(void *, void *,
unsigned int, unsigned int);
/* Clean Xchg by SID/DID */
/* input:L_Port,SID,DID,return:void */
void (*pfn_unf_xchg_mgr_xchg_clean)(void *, unsigned int,
unsigned int);
/* Add Xchg timer */
void (*pfn_unf_xchg_add_timer)(void *, unsigned long,
enum unf_timer_type_e);
/* Cancel Xchg timer */
void (*pfn_unf_xchg_cancel_timer)(void *);
/* L_Port, Abort flag */
void (*pfn_unf_xchg_abort_all_io)(void *, unsigned int, int);
/* find Xchg by scsi Cmnd sn */
void *(*pfn_unf_look_up_xchg_by_cmnd_sn)(void *, unsigned long long,
unsigned int);
/* input:L_Port,unsigned long long */
void (*pfn_unf_xchg_abort_by_lun)(void *, void *, unsigned long long,
void *, int);
void (*pfn_unf_xchg_abort_by_session)(void *, void *);
};
struct unf_rport_pool_s {
unsigned int rport_pool_count;
void *rport_pool_add;
struct list_head list_rports_pool;
spinlock_t rport_free_pool_lock;
/* for synchronous reuse RPort POOL completion */
struct completion *rport_pool_completion;
unsigned long *pul_rpi_bitmap;
};
struct unf_cm_lport_template_s {
/* Get VPort struct and init */
/* input:pstLport,ini/tgt type,return:pstVport */
void *(*pfn_unf_vport_get_free_and_init)(void *, unsigned int);
/* For fast IO path */
/* input: pstLport, VpIndex, return:pstVport */
void *(*pfn_unf_lookup_vport_by_vp_index)(void *, unsigned short);
/* input: pstLport, PortId,return:pstVport */
void *(*pfn_unf_lookup_vport_by_port_id)(void *, unsigned int);
/* input:pstLport, wwpn, return:pstVport */
void *(*pfn_unf_lookup_vport_by_wwpn)(void *, unsigned long long);
/* input:L_Port, DID, return:pstVport */
void *(*pfn_unf_lookup_vport_by_did)(void *, unsigned int);
/* input:L_Port,return:void */
void (*pfn_unf_vport_remove)(void *);
};
struct unf_vport_pool_s {
unsigned short vport_pool_count;
void *vport_pool_addr;
struct list_head list_vport_pool;
spinlock_t vport_pool_lock;
struct completion *vport_pool_completion;
unsigned short slab_next_index; /* Next free vport */
unsigned short slab_total_sum; /* Total Vport num */
struct unf_lport_s *vport_slab[0];
};
struct unf_esgl_pool_s {
unsigned int esgl_pool_count;
void *esgl_pool_addr;
struct list_head list_esgl_pool;
spinlock_t esgl_pool_lock;
struct buf_describe_s esgl_buf_list;
};
/* little endium */
struct unf_port_id_page_s {
struct list_head list_node_rscn;
unsigned char port_id_port;
unsigned char port_id_area;
unsigned char port_id_domain;
unsigned char uc_addr_format : 2;
unsigned char uc_event_qualifier : 4;
unsigned char uc_reserved : 2;
};
struct unf_rscn_mg_s {
spinlock_t rscn_id_list_lock;
unsigned int free_rscn_count;
/* free RSCN page list */
struct list_head list_free_rscn_page;
/* using RSCN page list */
struct list_head list_using_rscn_page;
/* All RSCN PAGE Address */
void *rscn_pool_add;
struct unf_port_id_page_s *(*pfn_unf_get_free_rscn_node)(
void *v_rscn_mg);
void (*pfn_unf_release_rscn_node)(void *v_rscn_mg, void *v_rscn_node);
};
struct unf_disc_rport_mg_s {
void *disc_pool_add;
struct list_head list_disc_rports_pool; /* discovery DISC Rport pool */
struct list_head list_disc_rport_busy; /* Busy discovery DiscRport */
};
struct unf_disc_manage_info_s {
struct list_head list_head;
spinlock_t disc_event_list_lock;
atomic_t disc_contrl_size;
unsigned int b_thread_exit;
struct task_struct *data_thread;
};
struct unf_disc_s {
unsigned int retry_count; /* current retry counter */
unsigned int max_retry_count; /* retry counter */
unsigned int disc_flag; /* Disc flag :Loop Disc,Fabric Disc */
struct completion *disc_completion;
atomic_t disc_ref_cnt;
struct list_head list_busy_rports; /* Busy RPort list */
struct list_head list_delete_rports; /* Delete RPort list */
struct list_head list_destroy_rports;
spinlock_t rport_busy_pool_lock;
struct unf_lport_s *lport;
enum unf_disc_state_e en_states;
struct delayed_work disc_work;
/* Disc operation template */
struct unf_cm_disc_mg_template_s unf_disc_temp;
/* UNF_INIT_DISC/UNF_RSCN_DISC */
unsigned int disc_option;
/* RSCN list */
struct unf_rscn_mg_s rscn_mgr;
struct unf_disc_rport_mg_s disc_rport_mgr;
struct unf_disc_manage_info_s disc_thread_info;
unsigned long long last_disc_jiff;
};
enum unf_service_item_e {
UNF_SERVICE_ITEM_FLOGI = 0,
UNF_SERVICE_ITEM_PLOGI,
UNF_SERVICE_ITEM_PRLI,
UNF_SERVICE_ITEM_RSCN,
UNF_SERVICE_ITEM_ABTS,
UNF_SERVICE_ITEM_PDISC,
UNF_SERVICE_ITEM_ADISC,
UNF_SERVICE_ITEM_LOGO,
UNF_SERVICE_ITEM_SRR,
UNF_SERVICE_ITEM_RRQ,
UNF_SERVICE_ITEM_ECHO,
UNF_SERVICE_ITEM_RLS,
UNF_SERVICE_BUTT
};
/* Link service counter */
struct unf_link_service_collect_s {
unsigned long long service_cnt[UNF_SERVICE_BUTT];
};
struct unf_pcie_error_count_s {
unsigned int pcie_error_count[UNF_PCIE_BUTT];
};
#define INVALID_WWPN 0
enum unf_device_scsi_state_e {
UNF_SCSI_ST_INIT = 0,
UNF_SCSI_ST_OFFLINE,
UNF_SCSI_ST_ONLINE,
UNF_SCSI_ST_DEAD,
UNF_SCSI_ST_BUTT
};
struct unf_wwpn_dfx_counter_info_s {
atomic64_t io_done_cnt[UNF_MAX_IO_RETURN_VALUE];
atomic64_t scsi_cmd_cnt[UNF_MAX_SCSI_CMD];
atomic64_t target_busy;
atomic64_t host_busy;
atomic_t error_handle[UNF_SCSI_ERROR_HANDLE_BUTT];
atomic_t error_handle_result[UNF_SCSI_ERROR_HANDLE_BUTT];
atomic_t device_alloc;
atomic_t device_destroy;
};
#define UNF_MAX_LUN_PER_TARGET 256
struct unf_wwpn_rport_info_s {
unsigned long long wwpn;
struct unf_rport_s *rport; /* Rport which linkup */
void *lport; /* Lport */
unsigned int target_id; /* target_id distribute by scsi */
unsigned int last_en_scis_state;
atomic_t en_scsi_state;
struct unf_wwpn_dfx_counter_info_s *dfx_counter;
struct delayed_work loss_tmo_work;
int b_need_scan;
struct list_head fc_lun_list;
};
struct unf_rport_scsi_id_image_s {
spinlock_t scsi_image_table_lock;
/* ScsiId Wwpn table */
struct unf_wwpn_rport_info_s *wwn_rport_info_table;
unsigned int max_scsi_id;
};
enum unf_lport_dirty_flag_e {
UNF_LPORT_DIRTY_FLAG_NONE = 0,
UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY = 0x100,
UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY = 0x200,
UNF_LPORT_DIRTY_FLAG_DISC_DIRTY = 0x400,
UNF_LPORT_DIRTY_FLAG_BUTT
};
typedef struct unf_rport_s *(*pfn_unf_rport_set_qualifier)(
struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport_by_nport_id,
struct unf_rport_s *v_rport_by_wwpn,
unsigned long long v_wwpn,
unsigned int v_sid);
typedef unsigned int (*pfn_unf_tmf_status_recovery)(void *v_rport,
void *v_xchg);
enum unf_start_work_state_e {
UNF_START_WORK_STOP,
UNF_START_WORK_BEGIN,
UNF_START_WORK_COMPLETE
};
struct unf_ini_private_info_s {
unsigned int driver_type; /* Driver Type */
void *lower; /* driver private pointer */
};
struct unf_product_hosts_info_s {
void *p_tgt_host;
unf_scsi_host_s *p_scsi_host;
struct unf_ini_private_info_s drv_private_info;
unf_scsi_host_s scsi_host;
};
struct unf_lport_s {
unsigned int port_type; /* Port Type: fc */
atomic_t lport_ref_cnt; /* LPort reference counter */
void *fc_port; /* hard adapter hba pointer */
void *rport; /* Used for SCSI interface */
void *vport;
struct unf_product_hosts_info_s host_info; /* scsi host mg */
struct unf_rport_scsi_id_image_s rport_scsi_table;
int b_port_removing;
int b_port_dir_exchange;
spinlock_t xchg_mgr_lock;
struct list_head list_xchg_mgr_head;
struct list_head list_dirty_xchg_mgr_head;
void *p_xchg_mgr[UNF_EXCHG_MGR_NUM];
enum int_e b_priority;
struct list_head list_vports_head; /* Vport Mg */
struct list_head list_intergrad_vports; /* Vport intergrad list */
struct list_head list_destroy_vports; /* Vport destroy list */
/* VPort entry, hook in list_vports_head */
struct list_head entry_vport;
struct list_head entry_lport; /* LPort entry */
spinlock_t lport_state_lock; /* UL Port Lock */
struct unf_disc_s disc; /* Disc and rport Mg */
/* rport pool,Vport share Lport pool */
struct unf_rport_pool_s rport_pool;
struct unf_esgl_pool_s esgl_pool; /* external sgl pool */
unsigned int port_id; /* Port Management ,0x11000 etc. */
enum unf_lport_login_state_e en_states;
unsigned int link_up;
unsigned int speed;
unsigned long long node_name;
unsigned long long port_name;
unsigned long long fabric_node_name;
unsigned int nport_id;
unsigned int max_frame_size;
unsigned int ed_tov;
unsigned int ra_tov;
unsigned int rr_tov;
unsigned int options; /* ini or tgt */
unsigned int retries;
unsigned int max_retry_count;
enum unf_act_topo_e en_act_topo;
enum int_e b_switch_state; /* 1---->ON,FALSE---->OFF */
enum int_e b_bbscn_support; /* 1---->ON,FALSE---->OFF */
enum unf_start_work_state_e en_start_work_state;
/* Xchg Mg operation template */
struct unf_cm_xchg_mgr_template_s xchg_mgr_temp;
struct unf_cm_lport_template_s lport_mgr_temp;
struct unf_low_level_function_op_s low_level_func;
struct unf_event_mgr event_mgr; /* Disc and rport Mg */
struct delayed_work retry_work; /* poll work or delay work */
struct workqueue_struct *link_event_wq;
struct workqueue_struct *xchg_wq;
struct unf_err_code_s err_code_sum; /* Error code counter */
struct unf_link_service_collect_s link_service_info;
struct unf_pcie_error_count_s pcie_error_cnt;
pfn_unf_rport_set_qualifier pfn_unf_qualify_rport; /* Qualify Rport */
/* tmf marker recovery */
pfn_unf_tmf_status_recovery pfn_unf_tmf_abnormal_recovery;
struct delayed_work route_timer_work; /* L_Port timer route */
unsigned short vp_index; /* Vport Index, Lport:0 */
struct unf_vport_pool_s *vport_pool; /* Only for Lport */
void *root_lport; /* Point to physic Lport */
struct completion *lport_free_completion; /* Free LPort Completion */
#define UNF_LPORT_NOP 1
#define UNF_LPORT_NORMAL 0
atomic_t port_no_operater_flag;
unsigned int enhanced_features; /* Enhanced Features */
unsigned int destroy_step;
unsigned int dirty_flag;
struct unf_lport_sfp_info sfp_info;
struct unf_chip_manage_info_s *chip_info;
#define UNF_LOOP_BACK_TESTING 1
#define UNF_LOOP_BACK_TEST_END 0
unsigned char sfp_power_fault_count;
unsigned char sfp_9545_fault_count;
unsigned long long last_tx_fault_jif; /* SFP last tx fault jiffies */
/* Server card: UNF_FC_SERVER_BOARD_32_G(6)for 32G mode,
* UNF_FC_SERVER_BOARD_16_G(7)for 16G mode
*/
unsigned int card_type;
atomic_t scsi_session_add_success;
atomic_t scsi_session_add_failed;
atomic_t scsi_session_del_success;
atomic_t scsi_session_del_failed;
atomic_t add_start_work_failed;
atomic_t add_closing_work_failed;
atomic_t device_alloc;
atomic_t device_destroy;
atomic_t session_loss_tmo;
atomic_t alloc_scsi_id;
atomic_t resume_scsi_id;
atomic_t reuse_scsi_id;
atomic64_t last_exchg_mgr_idx;
atomic64_t exchg_index;
unsigned int pcie_link_down_cnt;
int b_pcie_linkdown;
unsigned char fw_version[HIFC_VER_LEN];
atomic_t link_lose_tmo;
atomic_t err_code_obtain_freq;
};
void unf_lport_stat_ma(struct unf_lport_s *v_lport,
enum unf_lport_event_e v_event);
void unf_lport_error_recovery(struct unf_lport_s *v_lport);
void unf_set_lport_state(struct unf_lport_s *v_lport,
enum unf_lport_login_state_e v_states);
void unf_init_portparms(struct unf_lport_s *v_lport);
unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport);
void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport);
unsigned int unf_init_disc_mgr(struct unf_lport_s *v_pst_lport);
unsigned int unf_init_lport_route(struct unf_lport_s *v_lport);
void unf_destroy_lport_route(struct unf_lport_s *v_lport);
void unf_reset_lport_params(struct unf_lport_s *v_lport);
void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport,
enum unf_lport_dirty_flag_e v_etype);
struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport,
unsigned short v_vp_index);
struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport,
unsigned int v_did);
struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport,
unsigned long long v_wwpn);
void unf_cm_vport_remove(struct unf_lport_s *v_vport);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 1, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_lport.h"
#include "unf_rport.h"
#include "unf_exchg.h"
#include "unf_service.h"
#include "unf_portman.h"
#include "unf_rport.h"
#include "unf_io.h"
#include "unf_npiv.h"
/* Note:
* The function related with resources allocation in Vport is shared with Lport,
* and rootLport is acted as parameters in this function including :
* stEsglPool;
* event_mgr;
* stRportPool
* ExchMgr
*/
#define UNF_DELETE_VPORT_MAX_WAIT_TIME_MS 60000
unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport)
{
unsigned int ret = RETURN_OK;
unsigned int i = 0;
unsigned short vport_cnt = 0;
struct unf_lport_s *vport = NULL;
struct unf_vport_pool_s *vport_pool;
unsigned int vport_pool_size = 0;
unsigned long flags = 0;
UNF_CHECK_VALID(0x1950, UNF_TRUE, v_lport, return RETURN_ERROR);
UNF_TOU16_CHECK(vport_cnt, v_lport->low_level_func.support_max_npiv_num,
return RETURN_ERROR);
if (vport_cnt == 0) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) do not support NPIV",
v_lport->port_id);
return RETURN_OK;
}
vport_pool_size = sizeof(struct unf_vport_pool_s) +
sizeof(struct unf_lport_s *) * vport_cnt;
v_lport->vport_pool = vmalloc(vport_pool_size);
if (!v_lport->vport_pool) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) cannot allocate vport pool",
v_lport->port_id);
return RETURN_ERROR;
}
memset(v_lport->vport_pool, 0, vport_pool_size);
vport_pool = v_lport->vport_pool;
vport_pool->vport_pool_count = vport_cnt;
vport_pool->vport_pool_completion = NULL;
spin_lock_init(&vport_pool->vport_pool_lock);
INIT_LIST_HEAD(&vport_pool->list_vport_pool);
vport_pool->vport_pool_addr = vmalloc(
(size_t)(vport_cnt * sizeof(struct unf_lport_s)));
if (!vport_pool->vport_pool_addr) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) cannot allocate vport pool address",
v_lport->port_id);
vfree(v_lport->vport_pool);
v_lport->vport_pool = NULL;
return RETURN_ERROR;
}
memset(vport_pool->vport_pool_addr, 0, vport_cnt *
sizeof(struct unf_lport_s));
vport = (struct unf_lport_s *)vport_pool->vport_pool_addr;
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
for (i = 0; i < vport_cnt; i++) {
list_add_tail(&vport->entry_vport,
&vport_pool->list_vport_pool);
vport++;
}
vport_pool->slab_next_index = 0;
vport_pool->slab_total_sum = vport_cnt;
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
return ret;
}
void unf_free_vport_pool(struct unf_lport_s *v_lport)
{
struct unf_vport_pool_s *vport_pool = NULL;
int wait = UNF_FALSE;
unsigned long flag = 0;
unsigned int remain = 0;
struct completion vport_pool_completion =
COMPLETION_INITIALIZER(vport_pool_completion);
UNF_CHECK_VALID(0x1951, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x1952, UNF_TRUE, v_lport->vport_pool, return);
vport_pool = v_lport->vport_pool;
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
if (vport_pool->slab_total_sum != vport_pool->vport_pool_count) {
vport_pool->vport_pool_completion = &vport_pool_completion;
remain = vport_pool->slab_total_sum -
vport_pool->vport_pool_count;
wait = UNF_TRUE;
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
if (wait == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait for vport pool completion(%ld) remain(%d)",
v_lport->port_id, jiffies, remain);
wait_for_completion(vport_pool->vport_pool_completion);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait for vport pool completion end(%ld)",
v_lport->port_id, jiffies);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
vport_pool->vport_pool_completion = NULL;
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
}
if (v_lport->vport_pool->vport_pool_addr) {
vfree(v_lport->vport_pool->vport_pool_addr);
v_lport->vport_pool->vport_pool_addr = NULL;
}
vfree(v_lport->vport_pool);
v_lport->vport_pool = NULL;
UNF_REFERNCE_VAR(remain);
}
static inline struct unf_lport_s *unf_get_vport_by_slab_index(
struct unf_vport_pool_s *v_vport_pool,
unsigned short v_slab_index)
{
UNF_CHECK_VALID(0x1953, UNF_TRUE, v_vport_pool, return NULL);
return v_vport_pool->vport_slab[v_slab_index];
}
static inline void unf_vport_pool_slab_set(
struct unf_vport_pool_s *v_vport_pool,
unsigned short v_slab_index,
struct unf_lport_s *v_vport)
{
UNF_CHECK_VALID(0x1954, UNF_TRUE, v_vport_pool, return);
v_vport_pool->vport_slab[v_slab_index] = v_vport;
}
unsigned int unf_alloc_vp_index(struct unf_vport_pool_s *v_vport_pool,
struct unf_lport_s *v_vport,
unsigned short v_vpid)
{
unsigned short slab_index = 0;
unsigned long flags = 0;
UNF_CHECK_VALID(0x1955, UNF_TRUE, v_vport_pool, return RETURN_ERROR);
UNF_CHECK_VALID(0x1956, UNF_TRUE, v_vport, return RETURN_ERROR);
spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags);
if (v_vpid == 0) {
slab_index = v_vport_pool->slab_next_index;
while (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) {
slab_index = (slab_index + 1) %
v_vport_pool->slab_total_sum;
if (slab_index == v_vport_pool->slab_next_index) {
spin_unlock_irqrestore(
&v_vport_pool->vport_pool_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_REG_ATT, UNF_WARN,
"[warn]VPort pool has no slab ");
return RETURN_ERROR;
}
}
} else {
slab_index = v_vpid - 1;
if (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) {
spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock,
flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT,
UNF_WARN,
"[warn]VPort Index(0x%x) is occupy", v_vpid);
return RETURN_ERROR;
}
}
unf_vport_pool_slab_set(v_vport_pool, slab_index, v_vport);
v_vport_pool->slab_next_index = (slab_index + 1) %
v_vport_pool->slab_total_sum;
spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags);
spin_lock_irqsave(&v_vport->lport_state_lock, flags);
v_vport->vp_index = slab_index + 1; /* VpIndex=SlabIndex+1 */
spin_unlock_irqrestore(&v_vport->lport_state_lock, flags);
return RETURN_OK;
}
void unf_free_vp_index(struct unf_vport_pool_s *v_vport_pool,
struct unf_lport_s *v_vport)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x1957, UNF_TRUE, v_vport_pool, return);
UNF_CHECK_VALID(0x1958, UNF_TRUE, v_vport, return);
if ((v_vport->vp_index == 0) ||
(v_vport->vp_index > v_vport_pool->slab_total_sum)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Input vpoot index(0x%x) is beyond the normal range, min(0x1), max(0x%x).",
v_vport->vp_index, v_vport_pool->slab_total_sum);
return;
}
spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags);
/* SlabIndex=VpIndex-1 */
unf_vport_pool_slab_set(v_vport_pool, v_vport->vp_index - 1, NULL);
spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags);
spin_lock_irqsave(&v_vport->lport_state_lock, flags);
v_vport->vp_index = INVALID_VALUE16;
spin_unlock_irqrestore(&v_vport->lport_state_lock, flags);
}
struct unf_lport_s *unf_get_free_vport(struct unf_lport_s *v_lport)
{
struct unf_lport_s *vport = NULL;
struct list_head *list_head = NULL;
struct unf_vport_pool_s *vport_pool;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1959, 1, v_lport, return NULL);
UNF_CHECK_VALID(0x1960, UNF_TRUE, v_lport->vport_pool, return NULL);
vport_pool = v_lport->vport_pool;
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
if (!list_empty(&vport_pool->list_vport_pool)) {
list_head = (&vport_pool->list_vport_pool)->next;
list_del(list_head);
vport_pool->vport_pool_count--;
list_add_tail(list_head, &v_lport->list_vports_head);
vport = list_entry(list_head, struct unf_lport_s, entry_vport);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]LPort(0x%x)'s vport pool is empty",
v_lport->port_id);
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
return NULL;
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
return vport;
}
void unf_vport_back_to_pool(void *v_vport)
{
struct unf_lport_s *lport = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *list = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1961, UNF_TRUE, v_vport, return);
vport = v_vport;
lport = (struct unf_lport_s *)(vport->root_lport);
UNF_CHECK_VALID(0x1962, UNF_TRUE, lport, return);
UNF_CHECK_VALID(0x1963, UNF_TRUE, lport->vport_pool, return);
unf_free_vp_index(lport->vport_pool, vport);
spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, flag);
list = &vport->entry_vport;
list_del(list);
list_add_tail(list, &lport->vport_pool->list_vport_pool);
lport->vport_pool->vport_pool_count++;
spin_unlock_irqrestore(&lport->vport_pool->vport_pool_lock, flag);
}
void unf_init_vport_from_lport(struct unf_lport_s *v_vport,
struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x1964, UNF_TRUE, v_vport, return);
UNF_CHECK_VALID(0x1965, UNF_TRUE, v_lport, return);
v_vport->port_type = v_lport->port_type;
v_vport->fc_port = v_lport->fc_port;
v_vport->en_act_topo = v_lport->en_act_topo;
v_vport->root_lport = v_lport;
v_vport->pfn_unf_qualify_rport = v_lport->pfn_unf_qualify_rport;
v_vport->link_event_wq = v_lport->link_event_wq;
v_vport->xchg_wq = v_lport->xchg_wq;
memcpy(&v_vport->xchg_mgr_temp, &v_lport->xchg_mgr_temp,
sizeof(struct unf_cm_xchg_mgr_template_s));
memcpy(&v_vport->event_mgr, &v_lport->event_mgr,
sizeof(struct unf_event_mgr));
memset(&v_vport->lport_mgr_temp, 0,
sizeof(struct unf_cm_lport_template_s));
memcpy(&v_vport->low_level_func, &v_lport->low_level_func,
sizeof(struct unf_low_level_function_op_s));
}
void unf_check_vport_pool_status(struct unf_lport_s *v_lport)
{
struct unf_vport_pool_s *vport_pool = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x1968, UNF_TRUE, v_lport, return);
vport_pool = v_lport->vport_pool;
UNF_CHECK_VALID(0x1969, UNF_TRUE, vport_pool, return);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
if ((vport_pool->vport_pool_completion) &&
(vport_pool->slab_total_sum == vport_pool->vport_pool_count))
complete(vport_pool->vport_pool_completion);
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
}
void unf_vport_fabric_logo(struct unf_lport_s *v_vport)
{
struct unf_rport_s *rport = NULL;
rport = unf_get_rport_by_nport_id(v_vport, UNF_FC_FID_FLOGI);
UNF_CHECK_VALID(0x1970, UNF_TRUE, rport, return);
(void)unf_send_logo(v_vport, rport);
}
void unf_vport_deinit(void *v_vport)
{
struct unf_lport_s *vport = NULL;
UNF_CHECK_VALID(0x1971, UNF_TRUE, v_vport, return);
vport = (struct unf_lport_s *)v_vport;
unf_unregister_scsi_host(vport);
unf_disc_mgr_destroy(vport);
unf_release_xchg_mgr_temp(vport);
unf_release_lport_mgr_temp(vport);
unf_destroy_scsi_id_table(vport);
unf_lport_release_lw_fun_op(vport);
vport->fc_port = NULL;
vport->vport = NULL;
if (vport->lport_free_completion) {
complete(vport->lport_free_completion);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]VPort(0x%x) point(0x%p) completion free function is NULL",
vport->port_id, vport);
dump_stack();
}
}
void unf_vport_ref_dec(struct unf_lport_s *v_vport)
{
UNF_CHECK_VALID(0x1972, UNF_TRUE, v_vport, return);
if (atomic_dec_and_test(&v_vport->lport_ref_cnt)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]VPort(0x%x) point(0x%p) reference count is 0 and freevport",
v_vport->port_id, v_vport);
unf_vport_deinit(v_vport);
}
}
unsigned int unf_vport_init(void *v_vport)
{
struct unf_lport_s *vport = NULL;
UNF_CHECK_VALID(0x1974, UNF_TRUE, v_vport, return RETURN_ERROR);
vport = (struct unf_lport_s *)v_vport;
vport->options = UNF_PORT_MODE_INI;
vport->nport_id = 0;
if (unf_init_scsi_id_table(vport) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Vport(0x%x) can not initialize SCSI ID table",
vport->port_id);
return RETURN_ERROR;
}
if (unf_init_disc_mgr(vport) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Vport(0x%x) can not initialize discover manager",
vport->port_id);
unf_destroy_scsi_id_table(vport);
return RETURN_ERROR;
}
if (unf_register_scsi_host(vport) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Vport(0x%x) vport can not register SCSI host",
vport->port_id);
unf_disc_mgr_destroy(vport);
unf_destroy_scsi_id_table(vport);
return RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Vport(0x%x) Create succeed with wwpn(0x%llx)",
vport->port_id, vport->port_name);
return RETURN_OK;
}
void unf_vport_remove(void *v_vport)
{
struct unf_lport_s *vport = NULL;
struct unf_lport_s *lport = NULL;
struct completion vport_free_completion =
COMPLETION_INITIALIZER(vport_free_completion);
UNF_CHECK_VALID(0x1975, UNF_TRUE, v_vport, return);
vport = (struct unf_lport_s *)v_vport;
lport = (struct unf_lport_s *)(vport->root_lport);
vport->lport_free_completion = &vport_free_completion;
unf_set_lport_removing(vport);
unf_vport_ref_dec(vport);
wait_for_completion(vport->lport_free_completion);
unf_vport_back_to_pool(vport);
unf_check_vport_pool_status(lport);
}
void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index)
{
struct unf_lport_s *lport = NULL;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x1976, UNF_TRUE, v_lport, return NULL);
lport = (struct unf_lport_s *)v_lport;
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) vport pool is NULL",
lport->port_id);
return NULL;
}
if ((v_vp_index == 0) || (v_vp_index > vport_pool->slab_total_sum)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) input vport index(0x%x) is beyond the normal range(0x1~0x%x)",
lport->port_id, v_vp_index,
vport_pool->slab_total_sum);
return NULL;
}
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
/* SlabIndex=VpIndex-1 */
vport = unf_get_vport_by_slab_index(vport_pool, v_vp_index - 1);
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
return (void *)vport;
}
void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id)
{
struct unf_lport_s *lport = NULL;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1977, UNF_TRUE, v_lport, return NULL);
lport = (struct unf_lport_s *)v_lport;
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) vport pool is NULL",
lport->port_id);
return NULL;
}
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->port_id == v_port_id) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
list_for_each_safe(node, next_node, &lport->list_intergrad_vports) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->port_id == v_port_id) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) has no vport ID(0x%x).",
lport->port_id, v_port_id);
return NULL;
}
void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did)
{
struct unf_lport_s *lport = NULL;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1978, UNF_TRUE, v_lport, return NULL);
lport = (struct unf_lport_s *)v_lport;
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) vport pool is NULL",
lport->port_id);
return NULL;
}
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->nport_id == v_did) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
list_for_each_safe(node, next_node, &lport->list_intergrad_vports) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->nport_id == v_did) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) has no vport Nport ID(0x%x)",
lport->port_id, v_did);
return NULL;
}
void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn)
{
struct unf_lport_s *lport = NULL;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1979, UNF_TRUE, v_lport, return NULL);
lport = (struct unf_lport_s *)v_lport;
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) vport pool is NULL",
lport->port_id);
return NULL;
}
spin_lock_irqsave(&vport_pool->vport_pool_lock, flag);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->port_name == v_wwpn) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
list_for_each_safe(node, next_node, &lport->list_intergrad_vports) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
if (vport->port_name == v_wwpn) {
spin_unlock_irqrestore(&vport_pool->vport_pool_lock,
flag);
return vport;
}
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) has no vport WWPN(0x%llx)",
lport->port_id, v_wwpn);
return NULL;
}
struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *lport,
unsigned long long v_wwpn)
{
struct unf_lport_s *vport = NULL;
vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn);
if (vport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again",
lport->port_id, v_wwpn);
return NULL;
}
vport = unf_get_free_vport(lport);
if (!vport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Can not get free vport from pool");
return NULL;
}
vport->root_lport = lport;
vport->port_name = v_wwpn;
unf_init_portparms(vport);
unf_init_vport_from_lport(vport, lport);
if (unf_alloc_vp_index(lport->vport_pool, vport, 0) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Vport can not allocate vport index");
unf_vport_back_to_pool(vport);
return NULL;
}
vport->port_id = (((unsigned int)vport->vp_index) <<
PORTID_VPINDEX_SHIT) | lport->port_id;
return vport;
}
unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn)
{
#define VPORT_WWN_MASK 0xff00ffffffffffff
#define VPORT_WWN_SHIFT 48
struct fc_vport_identifiers vid = { 0 };
struct fc_vport *fc_port = NULL;
struct Scsi_Host *shost = NULL;
struct unf_lport_s *lport = NULL;
struct unf_lport_s *vport = NULL;
unsigned short vport_id = 0;
lport = unf_find_lport_by_port_id(v_port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Cannot find LPort by (0x%x).", v_port_id);
return RETURN_ERROR;
}
vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn);
if (vport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again",
lport->port_id, v_wwpn);
return RETURN_ERROR;
}
vport = unf_get_free_vport(lport);
if (!vport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Can not get free vport from pool");
return RETURN_ERROR;
}
unf_init_portparms(vport);
unf_init_vport_from_lport(vport, lport);
if ((lport->port_name & VPORT_WWN_MASK) == (v_wwpn & VPORT_WWN_MASK)) {
vport_id = (v_wwpn & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT;
if (vport_id == 0) {
vport_id = (lport->port_name & ~VPORT_WWN_MASK) >>
VPORT_WWN_SHIFT;
}
}
if (unf_alloc_vp_index(lport->vport_pool, vport, vport_id) !=
RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Vport can not allocate vport index");
unf_vport_back_to_pool(vport);
return RETURN_ERROR;
}
vport->port_id = (((unsigned int)vport->vp_index) <<
PORTID_VPINDEX_SHIT) | lport->port_id;
vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
vid.vport_type = FC_PORTTYPE_NPIV;
vid.disable = false;
vid.node_name = lport->node_name;
if (v_wwpn != 0) {
vid.port_name = v_wwpn;
} else {
if ((lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT !=
vport->vp_index)
vid.port_name =
(lport->port_name & VPORT_WWN_MASK) |
(((unsigned long long)vport->vp_index) <<
VPORT_WWN_SHIFT);
else
vid.port_name = (lport->port_name & VPORT_WWN_MASK);
}
vport->port_name = vid.port_name;
shost = lport->host_info.p_scsi_host;
fc_port = fc_vport_create(shost, 0, &vid);
if (!fc_port) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) Cannot Failed to create vport wwpn=%llx",
lport->port_id, vid.port_name);
unf_vport_back_to_pool(vport);
return RETURN_ERROR;
}
return RETURN_OK;
}
struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport,
struct vport_config_s *v_vport_config)
{
unsigned int ret = RETURN_OK;
struct unf_lport_s *lport = NULL;
struct unf_lport_s *vport = NULL;
enum unf_act_topo_e lport_topo = UNF_ACT_TOP_UNKNOWN;
enum unf_lport_login_state_e lport_state = UNF_LPORT_ST_ONLINE;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1983, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x1983, UNF_TRUE, v_vport_config, return NULL);
if (v_vport_config->port_mode != FC_PORT_ROLE_FCP_INITIATOR) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Only support INITIATOR port mode(0x%x)",
v_vport_config->port_mode);
return NULL;
}
lport = v_lport;
if (lport != lport->root_lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) not root port return",
lport->port_id);
return NULL;
}
vport = unf_cm_lookup_vport_by_wwpn(lport, v_vport_config->port_name);
if (!vport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Port(0x%x) can not find vport with wwpn(0x%llx)",
lport->port_id, v_vport_config->port_name);
return NULL;
}
ret = unf_vport_init(vport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]VPort(0x%x) can not initialze vport",
vport->port_id);
return NULL;
}
spin_lock_irqsave(&lport->lport_state_lock, flag);
lport_topo = lport->en_act_topo;
lport_state = lport->en_states;
v_vport_config->node_name = lport->node_name;
spin_unlock_irqrestore(&lport->lport_state_lock, flag);
vport->port_name = v_vport_config->port_name;
vport->node_name = v_vport_config->node_name;
vport->nport_id = 0;
/* only fabric topo support NPIV */
if ((lport_topo == UNF_ACT_TOP_P2P_FABRIC) &&
/* after receive flogi acc */
(lport_state >= UNF_LPORT_ST_PLOGI_WAIT) &&
(lport_state <= UNF_LPORT_ST_READY)) {
vport->link_up = lport->link_up;
(void)unf_lport_login(vport, lport_topo);
}
return vport;
}
unsigned int unf_drop_vport(struct unf_lport_s *v_vport)
{
unsigned int ret = RETURN_ERROR;
struct fc_vport *vport = NULL;
UNF_CHECK_VALID(0x1985, UNF_TRUE, v_vport, return RETURN_ERROR);
vport = v_vport->vport;
if (!vport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]VPort(0x%x) find vport in scsi is NULL",
v_vport->port_id);
return ret;
}
ret = fc_vport_terminate(vport);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]VPort(0x%x) terminate vport(%p) in scsi failed",
v_vport->port_id, vport);
return ret;
}
return ret;
}
unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index)
{
struct unf_lport_s *lport = NULL;
unsigned short vp_index = 0;
struct unf_lport_s *vport = NULL;
lport = unf_find_lport_by_port_id(v_port_id);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) can not be found by portid",
v_port_id);
return RETURN_ERROR;
}
if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) is in NOP, destroy all vports function will be called",
lport->port_id);
return RETURN_OK;
}
UNF_TOU16_CHECK(vp_index, v_vp_index, return RETURN_ERROR);
vport = unf_cm_lookup_vport_by_vp_index(lport, vp_index);
if (!vport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Can not lookup VPort by VPort index(0x%x)",
vp_index);
return RETURN_ERROR;
}
return unf_drop_vport(vport);
}
void unf_vport_abort_all_sfs_exch(struct unf_lport_s *vport)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *exch = NULL;
unsigned long pool_lock_flags = 0;
unsigned long exch_lock_flags = 0;
unsigned int i;
UNF_CHECK_VALID(0x1985, UNF_TRUE, vport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(
(struct unf_lport_s *)(vport->root_lport), i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) hot pool is NULL",
((struct unf_lport_s *)
(vport->root_lport))->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->sfs_busylist) {
exch = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&exch->xchg_state_lock,
exch_lock_flags);
if (vport == exch->lport &&
(atomic_read(&exch->ref_cnt) > 0)) {
exch->io_state |= TGT_IO_STATE_ABORT;
spin_unlock_irqrestore(&exch->xchg_state_lock,
exch_lock_flags);
unf_disc_ctrl_size_inc(vport, exch->cmnd_code);
/* Transfer exch to destroy chain */
list_del(xchg_node);
list_add_tail(xchg_node,
&hot_pool->list_destroy_xchg);
} else {
spin_unlock_irqrestore(&exch->xchg_state_lock,
exch_lock_flags);
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
void unf_vport_abort_ini_io_exch(struct unf_lport_s *vport)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *exch = NULL;
unsigned long pool_lock_flags = 0;
unsigned long exch_lock_flags = 0;
unsigned int i;
UNF_CHECK_VALID(0x1986, UNF_TRUE, vport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(
(struct unf_lport_s *)(vport->root_lport), i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) MgrIdex %d hot pool is NULL",
((struct unf_lport_s *)
(vport->root_lport))->port_id, i);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->ini_busylist) {
exch = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
if (vport == exch->lport &&
atomic_read(&exch->ref_cnt) > 0) {
/* Transfer exch to destroy chain */
list_del(xchg_node);
list_add_tail(xchg_node,
&hot_pool->list_destroy_xchg);
spin_lock_irqsave(&exch->xchg_state_lock,
exch_lock_flags);
exch->io_state |= INI_IO_STATE_DRABORT;
spin_unlock_irqrestore(&exch->xchg_state_lock,
exch_lock_flags);
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
void unf_vport_abort_all_exch(struct unf_lport_s *vport)
{
UNF_CHECK_VALID(0x1988, UNF_TRUE, vport, return);
unf_vport_abort_all_sfs_exch(vport);
unf_vport_abort_ini_io_exch(vport);
}
unsigned int unf_vport_wait_all_exch_removed(struct unf_lport_s *vport)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *exch = NULL;
unsigned int vport_uses = 0;
unsigned long flags = 0;
unsigned long long cur_jif = jiffies;
unsigned int i = 0;
UNF_CHECK_VALID(0x1989, UNF_TRUE, vport, return RETURN_ERROR);
while (1) {
vport_uses = 0;
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(
(struct unf_lport_s *)
(vport->root_lport), i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN,
UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) hot Pool is NULL",
((struct unf_lport_s *)
(vport->root_lport))->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->list_destroy_xchg) {
exch = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
if (vport != exch->lport)
continue;
vport_uses++;
if (jiffies - cur_jif >=
msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_NORMAL, UNF_ERR,
"[error]VPort(0x%x) Abort Exch(0x%p) Type(0x%x) OxRxid(0x%x 0x%x), sid did(0x%x 0x%x) SeqId(0x%x) IOState(0x%x) Ref(0x%x)",
vport->port_id, exch,
(unsigned int)exch->xchg_type,
(unsigned int)exch->ox_id,
(unsigned int)exch->rx_id,
(unsigned int)exch->sid,
(unsigned int)exch->did,
(unsigned int)exch->seq_id,
(unsigned int)exch->io_state,
atomic_read(&exch->ref_cnt));
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
flags);
}
if (vport_uses == 0) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]VPort(0x%x) has removed all exchanges it used",
vport->port_id);
break;
}
if (jiffies - cur_jif >= msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS))
return RETURN_ERROR;
msleep(1000);
}
return RETURN_OK;
}
unsigned int unf_vport_wait_rports_removed(struct unf_lport_s *vport)
{
struct unf_disc_s *disc = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned int vport_uses = 0;
unsigned long flags = 0;
unsigned long long cur_jif = jiffies;
struct unf_rport_s *rport = NULL;
UNF_CHECK_VALID(0x1990, UNF_TRUE, vport, return RETURN_ERROR);
disc = &vport->disc;
while (1) {
vport_uses = 0;
spin_lock_irqsave(&disc->rport_busy_pool_lock, flags);
list_for_each_safe(node, next_node, &disc->list_delete_rports) {
rport = list_entry(node, struct unf_rport_s,
entry_rport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL,
UNF_MAJOR,
"[info]Vport(0x%x) Rport(0x%x) point(%p) is in Delete",
vport->port_id, rport->nport_id, rport);
vport_uses++;
}
list_for_each_safe(node, next_node,
&disc->list_destroy_rports) {
rport = list_entry(node, struct unf_rport_s,
entry_rport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL,
UNF_MAJOR,
"[info]Vport(0x%x) Rport(0x%x) point(%p) is in Destroy",
vport->port_id, rport->nport_id, rport);
vport_uses++;
}
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags);
if (vport_uses == 0) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]VPort(0x%x) has removed all RPorts it used",
vport->port_id);
break;
}
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Vport(0x%x) has %d RPorts not removed wait timeout(30s)",
vport->port_id, vport_uses);
if (jiffies - cur_jif >=
msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS))
return RETURN_ERROR;
msleep(5000);
}
UNF_REFERNCE_VAR(rport);
return RETURN_OK;
}
unsigned int unf_destroy_one_vport(struct unf_lport_s *vport)
{
unsigned int ret = RETURN_ERROR;
struct unf_lport_s *root_port = NULL;
UNF_CHECK_VALID(0x1992, UNF_TRUE, vport, return RETURN_ERROR);
root_port = (struct unf_lport_s *)vport->root_lport;
unf_vport_fabric_logo(vport);
/* 1 set NOP */
atomic_set(&vport->port_no_operater_flag, UNF_LPORT_NOP);
vport->b_port_removing = UNF_TRUE;
/* 2 report linkdown to scsi and delele rpot */
unf_link_down_one_vport(vport);
/* 3 set abort for exchange */
unf_vport_abort_all_exch(vport);
/* 4 wait exch return freepool */
if (!root_port->b_port_dir_exchange) {
ret = unf_vport_wait_all_exch_removed(vport);
if (ret != RETURN_OK) {
if ((root_port->b_port_removing) != UNF_TRUE) {
vport->b_port_removing = UNF_FALSE;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL,
UNF_ERR,
"[err]VPort(0x%x) can not wait Exchange return freepool",
vport->port_id);
return RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_NORMAL, UNF_WARN,
"[warn]Port(0x%x) is removing, there is dirty exchange, continue",
root_port->port_id);
root_port->b_port_dir_exchange = UNF_TRUE;
}
}
/* wait rport return rportpool */
ret = unf_vport_wait_rports_removed(vport);
if (ret != RETURN_OK) {
vport->b_port_removing = UNF_FALSE;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]VPort(0x%x) can not wait Rport return freepool",
vport->port_id);
return RETURN_ERROR;
}
unf_cm_vport_remove(vport);
return RETURN_OK;
}
void unf_link_down_one_vport(struct unf_lport_s *v_vport)
{
unsigned long flag = 0;
struct unf_lport_s *root_lport = NULL;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT,
"[info]VPort(0x%x) linkdown", v_vport->port_id);
spin_lock_irqsave(&v_vport->lport_state_lock, flag);
v_vport->link_up = UNF_PORT_LINK_DOWN;
v_vport->nport_id = 0; /* set nportid 0 before send fdisc again */
unf_lport_stat_ma(v_vport, UNF_EVENT_LPORT_LINK_DOWN);
spin_unlock_irqrestore(&v_vport->lport_state_lock, flag);
root_lport = (struct unf_lport_s *)v_vport->root_lport;
unf_flush_disc_event(&root_lport->disc, v_vport);
unf_clean_linkdown_rport(v_vport);
}
void unf_linkdown_all_vports(void *v_lport)
{
struct unf_lport_s *lport = NULL;
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x1993, UNF_TRUE, v_lport, return);
lport = (struct unf_lport_s *)v_lport;
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) VPort pool is NULL",
lport->port_id);
return;
}
/* Transfer to the transition chain */
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
list_del_init(&vport->entry_vport);
list_add_tail(&vport->entry_vport,
&lport->list_intergrad_vports);
(void)unf_lport_refinc(vport);
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
while (!list_empty(&lport->list_intergrad_vports)) {
node = (&lport->list_intergrad_vports)->next;
vport = list_entry(node, struct unf_lport_s, entry_vport);
list_del_init(&vport->entry_vport);
list_add_tail(&vport->entry_vport, &lport->list_vports_head);
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
unf_link_down_one_vport(vport);
unf_vport_ref_dec(vport);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
}
int unf_process_vports_linkup(void *v_arg_in, void *v_arg_out)
{
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *lport = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
int ret = RETURN_OK;
UNF_REFERNCE_VAR(v_arg_out);
UNF_CHECK_VALID(0x1994, UNF_TRUE, v_arg_in, return RETURN_ERROR);
lport = (struct unf_lport_s *)v_arg_in;
if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) is NOP don't continue",
lport->port_id);
return RETURN_OK;
}
if (lport->link_up != UNF_PORT_LINK_UP) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) is not linkup don't continue.",
lport->port_id);
return RETURN_OK;
}
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) VPort pool is NULL.",
lport->port_id);
return RETURN_OK;
}
/* Transfer to the transition chain */
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
list_del_init(&vport->entry_vport);
list_add_tail(&vport->entry_vport,
&lport->list_intergrad_vports);
(void)unf_lport_refinc(vport);
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
while (!list_empty(&lport->list_intergrad_vports)) {
node = (&lport->list_intergrad_vports)->next;
vport = list_entry(node, struct unf_lport_s, entry_vport);
list_del_init(&vport->entry_vport);
list_add_tail(&vport->entry_vport, &lport->list_vports_head);
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
if (atomic_read(&vport->port_no_operater_flag) ==
UNF_LPORT_NOP) {
unf_vport_ref_dec(vport);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
continue;
}
if ((lport->link_up == UNF_PORT_LINK_UP) &&
(lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Vport(0x%x) begin login",
vport->port_id);
vport->link_up = UNF_PORT_LINK_UP;
(void)unf_lport_login(vport, lport->en_act_topo);
msleep(100);
} else {
unf_link_down_one_vport(vport);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Vport(0x%x) login failed because root port linkdown",
vport->port_id);
}
unf_vport_ref_dec(vport);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
return ret;
}
void unf_linkup_all_vports(struct unf_lport_s *v_lport)
{
struct unf_cm_event_report *event = NULL;
UNF_CHECK_VALID(0x1996, UNF_TRUE, v_lport, return);
if (unlikely((!v_lport->event_mgr.pfn_unf_get_free_event) ||
(!v_lport->event_mgr.pfn_unf_post_event) ||
(!v_lport->event_mgr.pfn_unf_release_event))) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) Event fun is NULL",
v_lport->port_id);
return;
}
event = v_lport->event_mgr.pfn_unf_get_free_event((void *)v_lport);
UNF_CHECK_VALID(0x1997, UNF_TRUE, event, return);
event->lport = v_lport;
event->event_asy_flag = UNF_EVENT_ASYN;
event->pfn_unf_event_task = unf_process_vports_linkup;
event->para_in = (void *)v_lport;
v_lport->event_mgr.pfn_unf_post_event(v_lport, event);
}
void unf_destroy_all_vports(struct unf_lport_s *v_lport)
{
struct unf_vport_pool_s *vport_pool = NULL;
struct unf_lport_s *lport = NULL;
struct unf_lport_s *vport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
lport = v_lport;
UNF_CHECK_VALID(0x1998, UNF_TRUE, lport, return);
vport_pool = lport->vport_pool;
if (unlikely(!vport_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Lport(0x%x) VPort pool is NULL",
lport->port_id);
return;
}
/* Transfer to the transition chain */
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
list_for_each_safe(node, next_node, &lport->list_vports_head) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
list_del_init(&vport->entry_vport);
list_add_tail(&vport->entry_vport, &lport->list_destroy_vports);
}
list_for_each_safe(node, next_node, &lport->list_intergrad_vports) {
vport = list_entry(node, struct unf_lport_s, entry_vport);
list_del_init(&vport->entry_vport);
list_add_tail(&vport->entry_vport,
&lport->list_destroy_vports);
atomic_dec(&vport->lport_ref_cnt);
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
while (!list_empty(&lport->list_destroy_vports)) {
node = (&lport->list_destroy_vports)->next;
vport = list_entry(node, struct unf_lport_s, entry_vport);
list_del_init(&vport->entry_vport);
list_add_tail(&vport->entry_vport, &lport->list_vports_head);
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]VPort(0x%x) Destroy begin",
vport->port_id);
unf_drop_vport(vport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[info]VPort(0x%x) Destroy end",
vport->port_id);
spin_lock_irqsave(&vport_pool->vport_pool_lock, flags);
}
spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __NPIV_H__
#define __NPIV_H__
/* product VPORT configure */
struct vport_config_s {
unsigned long long node_name;
unsigned long long port_name;
unsigned int port_mode; /* INI, TGT or both */
};
/* product Vport function */
#define PORTID_VPINDEX_MASK 0xff000000
#define PORTID_VPINDEX_SHIT 24
unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn);
struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport,
struct vport_config_s *v_vport_config);
unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index);
/* Vport pool creat and release function */
unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport);
void unf_free_vport_pool(struct unf_lport_s *v_lport);
/* Lport resigster stLPortMgTemp function */
void unf_vport_remove(void *v_vport);
void unf_vport_ref_dec(struct unf_lport_s *v_vport);
/* linkdown all Vport after receive linkdown event */
void unf_linkdown_all_vports(void *v_lport);
/* Lport receive Flogi Acc linkup all Vport */
void unf_linkup_all_vports(struct unf_lport_s *v_lport);
/* Lport remove delete all Vport */
void unf_destroy_all_vports(struct unf_lport_s *v_lport);
void unf_vport_fabric_logo(struct unf_lport_s *v_vport);
unsigned int unf_destroy_one_vport(struct unf_lport_s *v_vport);
struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *v_lport,
unsigned long long v_wwpn);
unsigned int unf_drop_vport(struct unf_lport_s *v_vport);
void unf_link_down_one_vport(struct unf_lport_s *v_vport);
void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index);
void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id);
void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did);
void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_lport.h"
#include "unf_rport.h"
#include "unf_exchg.h"
#include "unf_service.h"
#include <scsi/scsi_transport_fc.h>
#include "unf_portman.h"
/* rport state: */
/* ready --->>> link_down --->>> cloing --->>> timeout --->>> delete */
struct unf_rport_feature_pool_s *port_fea_pool;
/*
* Function Name : unf_sesion_loss_timeout
* Function Description: session loss timeout
* Input Parameters : struct work_struct *v_work
* Output Parameters : N/A
* Return Type : unsigned int
*/
void unf_sesion_loss_timeout(struct work_struct *v_work)
{
struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL;
UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return);
wwpn_rport_info = container_of(v_work, struct unf_wwpn_rport_info_s,
loss_tmo_work.work);
if (unlikely(!wwpn_rport_info)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]wwpn_rport_info is NULL");
return;
}
atomic_set(&wwpn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[info]Port(0x%x) wwpn(0x%llx) set target(0x%x) scsi state to dead",
((struct unf_lport_s *)(wwpn_rport_info->lport))->port_id,
wwpn_rport_info->wwpn,
wwpn_rport_info->target_id);
}
/*
* Function Name : unf_alloc_scsi_id
* Function Description: alloc r_port scsi id
* Input Parameters : struct unf_lport_s *v_lport
* : struct unf_rport_s *v_rport
* Output Parameters : N/A
* Return Type : unsigned int
*/
static unsigned int unf_alloc_scsi_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL;
struct unf_wwpn_rport_info_s *wwn_rport_info = NULL;
unsigned long flags = 0;
unsigned int index = 0;
unsigned int ret = UNF_RETURN_ERROR;
rport_scsi_table = &v_lport->rport_scsi_table;
UNF_REFERNCE_VAR(ret);
spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags);
/* 1. At first, existence check */
for (index = 0; index < rport_scsi_table->max_scsi_id; index++) {
wwn_rport_info =
&rport_scsi_table->wwn_rport_info_table[index];
if (v_rport->port_name == wwn_rport_info->wwpn) {
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id),
(&wwn_rport_info->loss_tmo_work),
"loss tmo Timer work");
/* Plug case: reuse again */
spin_lock_irqsave(
&rport_scsi_table->scsi_image_table_lock,
flags);
wwn_rport_info->rport = v_rport;
wwn_rport_info->last_en_scis_state =
atomic_read(&wwn_rport_info->en_scsi_state);
atomic_set(&wwn_rport_info->en_scsi_state,
UNF_SCSI_ST_ONLINE);
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]port(0x%x) find the same scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)",
v_lport->port_id, index,
wwn_rport_info->wwpn,
v_rport, v_rport->nport_id);
atomic_inc(&v_lport->resume_scsi_id);
goto find;
}
}
/* 2. Alloc new SCSI ID */
for (index = 0; index < rport_scsi_table->max_scsi_id; index++) {
wwn_rport_info =
&rport_scsi_table->wwn_rport_info_table[index];
if (wwn_rport_info->wwpn == INVALID_WWPN) {
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id),
(&wwn_rport_info->loss_tmo_work),
"loss tmo Timer work");
/* Use the free space */
spin_lock_irqsave(
&rport_scsi_table->scsi_image_table_lock,
flags);
wwn_rport_info->rport = v_rport;
wwn_rport_info->wwpn = v_rport->port_name;
wwn_rport_info->last_en_scis_state =
atomic_read(&wwn_rport_info->en_scsi_state);
atomic_set(&wwn_rport_info->en_scsi_state,
UNF_SCSI_ST_ONLINE);
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]port(0x%x) allco new scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)",
v_lport->port_id, index,
wwn_rport_info->wwpn,
v_rport, v_rport->nport_id);
atomic_inc(&v_lport->alloc_scsi_id);
goto find;
}
}
/* 3. Reuse space has been used */
for (index = 0; index < rport_scsi_table->max_scsi_id; index++) {
wwn_rport_info =
&rport_scsi_table->wwn_rport_info_table[index];
if (atomic_read(&wwn_rport_info->en_scsi_state) ==
UNF_SCSI_ST_DEAD) {
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id),
(&wwn_rport_info->loss_tmo_work),
"loss tmo Timer work");
spin_lock_irqsave(
&rport_scsi_table->scsi_image_table_lock,
flags);
if (wwn_rport_info->dfx_counter) {
memset(wwn_rport_info->dfx_counter, 0,
sizeof(struct unf_wwpn_dfx_counter_info_s));
}
wwn_rport_info->rport = v_rport;
wwn_rport_info->wwpn = v_rport->port_name;
wwn_rport_info->last_en_scis_state =
atomic_read(&wwn_rport_info->en_scsi_state);
atomic_set(&wwn_rport_info->en_scsi_state,
UNF_SCSI_ST_ONLINE);
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[info]port(0x%x) reuse a dead scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)",
v_lport->port_id, index,
wwn_rport_info->wwpn,
v_rport, v_rport->nport_id);
atomic_inc(&v_lport->reuse_scsi_id);
goto find;
}
}
spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]port(0x%x) there is not enough scsi_id with max_value(0x%x)",
v_lport->port_id, index);
return INVALID_VALUE32;
find:
if (!wwn_rport_info->dfx_counter) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Port(0x%x) allocate Rport(0x%x) DFX buffer",
v_lport->port_id, wwn_rport_info->rport->nport_id);
wwn_rport_info->dfx_counter =
vmalloc(sizeof(struct unf_wwpn_dfx_counter_info_s));
if (!wwn_rport_info->dfx_counter) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Port(0x%x) allocate DFX buffer fail",
v_lport->port_id);
return INVALID_VALUE32;
}
memset(wwn_rport_info->dfx_counter, 0,
sizeof(struct unf_wwpn_dfx_counter_info_s));
}
UNF_REFERNCE_VAR(ret);
return index;
}
static unsigned int unf_get_scsi_id_by_wwpn(struct unf_lport_s *v_lport,
unsigned long long v_wwpn)
{
struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL;
struct unf_wwpn_rport_info_s *wwn_rport_info = NULL;
unsigned long flags = 0;
unsigned int index = 0;
UNF_CHECK_VALID(0x3015, UNF_TRUE,
v_lport, return INVALID_VALUE32);
rport_scsi_table = &v_lport->rport_scsi_table;
if (!v_wwpn)
return INVALID_VALUE32;
spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags);
for (index = 0; index < rport_scsi_table->max_scsi_id; index++) {
wwn_rport_info =
&rport_scsi_table->wwn_rport_info_table[index];
if (v_wwpn == wwn_rport_info->wwpn) {
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
return index;
}
}
spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock,
flags);
return INVALID_VALUE32;
}
static void unf_set_device_state(struct unf_lport_s *v_lport,
unsigned int v_scsi_id,
int en_scsi_state)
{
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL;
if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) RPort scsi_id(0x%x) is max than 0x%x",
v_lport->port_id, v_scsi_id, UNF_MAX_SCSI_ID);
return;
}
scsi_image_table = &v_lport->rport_scsi_table;
wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[v_scsi_id];
atomic_set(&wwpn_rport_info->en_scsi_state, en_scsi_state);
}
static void unf_set_rport_state(struct unf_rport_s *v_rport,
enum unf_rport_login_state_e v_states)
{
UNF_CHECK_VALID(0x3055, UNF_TRUE, v_rport, return);
if (v_states != v_rport->rp_state) {
/* Reset R_Port retry count */
v_rport->retries = 0;
}
v_rport->rp_state = v_states;
}
void unf_rport_linkdown(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
/*
* 1. port_logout
* 2. rcvd_rscn_port_not_in_disc
* 3. each_rport_after_rscn
* 4. rcvd_gpnid_rjt
* 5. rport_after_logout(rport is fabric port)
*/
unsigned long flag = 0;
UNF_CHECK_VALID(0x3000, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x3001, UNF_TRUE, v_rport, return);
UNF_REFERNCE_VAR(v_lport);
/* 1. Update R_Port state: Link Down Event --->>> closing state */
spin_lock_irqsave(&v_rport->rport_state_lock, flag);
unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN);
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
/* 3. Port enter closing (then enter to Delete) process */
unf_rport_enter_closing(v_rport);
}
static struct unf_rport_s *unf_rport_is_changed(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid)
{
if (v_rport) {
/* S_ID or D_ID has been changed */
if ((v_rport->nport_id != v_sid) ||
(v_rport->local_nport_id != v_lport->nport_id)) {
/*
* 1. Swap case: (SID or DID changed):
* Report link down & delete immediately
*/
unf_rport_immediate_linkdown(v_lport, v_rport);
return NULL;
}
}
return v_rport;
}
struct unf_rport_s *unf_rport_set_qualifier_key_reuse(
struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport_by_nport_id,
struct unf_rport_s *v_rport_by_wwpn,
unsigned long long v_wwpn,
unsigned int v_sid)
{
/* Used for HIFC Chip */
struct unf_rport_s *rport = NULL;
struct unf_rport_s *rporta = NULL;
struct unf_rport_s *rportb = NULL;
int bwwpn_flag = 0;
UNF_CHECK_VALID(0x3002, UNF_TRUE, v_lport, return NULL);
/* About R_Port by N_Port_ID */
rporta = unf_rport_is_changed(v_lport, v_rport_by_nport_id, v_sid);
/* About R_Port by WWpn */
rportb = unf_rport_is_changed(v_lport, v_rport_by_wwpn, v_sid);
if (!rporta && !rportb) {
return NULL;
} else if (!rporta && rportb) {
/* 3. Plug case: reuse again */
rport = rportb;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by wwpn",
v_lport->port_id, rport, rport->port_name,
rport->nport_id, rport->local_nport_id);
return rport; /* Get by WWPN */
} else if (rporta && !rportb) {
bwwpn_flag = ((rporta->port_name != v_wwpn) &&
(rporta->port_name != 0) &&
(rporta->port_name != INVALID_VALUE64));
if (bwwpn_flag) {
/* 4. WWPN changed: Report link down
* & delete immediately
*/
unf_rport_immediate_linkdown(v_lport, rporta);
return NULL;
}
/* Updtae WWPN */
rporta->port_name = v_wwpn;
rport = rporta;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by N_Port_ID",
v_lport->port_id,
rport, rport->port_name,
rport->nport_id, rport->local_nport_id);
return rport; /* Get by N_Port_ID */
}
/* 5. Case for A == B && A && B */
if (rporta == rportb) {
rport = rporta;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) find the same RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x)",
v_lport->port_id,
rport, rport->port_name,
rport->nport_id, rport->local_nport_id);
return rport;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]port(0x%x) find two duplicate login. rport(A:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x) rport(B:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x)",
v_lport->port_id,
rporta, rporta->port_name,
rporta->nport_id, rporta->local_nport_id,
rportb, rportb->port_name,
rportb->nport_id, rportb->local_nport_id);
/* 6. Case for A != B && A && B */
unf_rport_immediate_linkdown(v_lport, rporta);
unf_rport_immediate_linkdown(v_lport, rportb);
return NULL;
}
struct unf_rport_s *unf_get_rport_by_wwn(struct unf_lport_s *v_lport,
unsigned long long v_wwpn)
{
struct unf_lport_s *lport = NULL;
struct unf_disc_s *disc = NULL;
struct unf_rport_s *rport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
struct unf_rport_s *find_rport = NULL;
UNF_CHECK_VALID(0x3049, UNF_TRUE, v_lport, return NULL);
lport = (struct unf_lport_s *)v_lport;
disc = &lport->disc;
/* for each r_port from busy_list: compare wwpn(port name) */
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
list_for_each_safe(node, next_node, &disc->list_busy_rports) {
rport = list_entry(node, struct unf_rport_s, entry_rport);
if (rport && rport->port_name == v_wwpn) {
find_rport = rport;
break;
}
}
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
return find_rport;
}
struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport,
unsigned long long v_wwpn,
unsigned int v_sid)
{
struct unf_rport_s *rport = NULL;
struct unf_rport_s *rport_by_nport_id = NULL;
struct unf_rport_s *rport_by_wwpn = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x3005, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x3006, UNF_TRUE,
v_lport->pfn_unf_qualify_rport, return NULL);
/* Get R_Port by WWN & N_Port_ID */
rport_by_nport_id = unf_get_rport_by_nport_id(v_lport, v_sid);
rport_by_wwpn = unf_get_rport_by_wwn(v_lport, v_wwpn);
/* R_Port check: by WWPN */
if (rport_by_wwpn) {
spin_lock_irqsave(&rport_by_wwpn->rport_state_lock, flags);
if (rport_by_wwpn->nport_id == UNF_FC_FID_FLOGI) {
spin_unlock_irqrestore(
&rport_by_wwpn->rport_state_lock,
flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_INFO,
"[err]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid",
v_lport->port_id, rport_by_wwpn, v_wwpn);
rport_by_wwpn = NULL;
} else {
spin_unlock_irqrestore(
&rport_by_wwpn->rport_state_lock,
flags);
}
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x_0x%x) RPort(0x%p) find by N_Port_ID(0x%x) and RPort(0x%p) by WWPN(0x%llx)",
v_lport->port_id, v_lport->nport_id,
rport_by_nport_id, v_sid, rport_by_wwpn, v_wwpn);
/* R_Port validity check: get by WWPN & N_Port_ID */
rport = v_lport->pfn_unf_qualify_rport(v_lport, rport_by_nport_id,
rport_by_wwpn,
v_wwpn, v_sid);
return rport;
}
void unf_rport_delay_login(struct unf_rport_s *v_rport)
{
UNF_CHECK_VALID(0x3009, UNF_TRUE, v_rport, return);
/* Do R_Port recovery: PLOGI or PRLI or LOGO */
unf_rport_error_recovery(v_rport);
}
unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport)
{
UNF_CHECK_VALID(0x3010, UNF_TRUE,
v_rport, return UNF_RETURN_ERROR);
if (atomic_read(&v_rport->rport_ref_cnt) <= 0) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Rport(0x%x) reference count is wrong %d",
v_rport->nport_id,
atomic_read(&v_rport->rport_ref_cnt));
return UNF_RETURN_ERROR;
}
atomic_inc(&v_rport->rport_ref_cnt);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Rport(0x%x) reference count is %d",
v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt));
return RETURN_OK;
}
void unf_rport_enter_logo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
/*
* 1. TMF/ABTS timeout recovery :Y
* 2. L_Port error recovery --->>> larger than retry_count :Y
* 3. R_Port error recovery --->>> larger than retry_count :Y
* 4. Check PLOGI parameters --->>> parameter is error :Y
* 5. PRLI handler --->>> R_Port state is error :Y
* 6. PDISC handler --->>> R_Port state is not PRLI_WAIT :Y
* 7. ADISC handler --->>> R_Port state is not PRLI_WAIT :Y
* 8. PLOGI wait timeout with R_PORT is INI mode :Y
* 9. RCVD GFFID_RJT --->>> R_Port state is INIT :Y
* 10. RCVD GPNID_ACC --->>> R_Port state is error :Y
* 11. Private Loop mode with LOGO case :Y
* 12. P2P mode with LOGO case :Y
* 13. Fabric mode with LOGO case :Y
* 14. RCVD PRLI_ACC with R_Port is INI :Y
* 15. TGT RCVD BLS_REQ with session is error :Y
*/
unsigned long flags = 0;
UNF_CHECK_VALID(0x3013, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x3014, UNF_TRUE, v_rport, return);
spin_lock_irqsave(&v_rport->rport_state_lock, flags);
if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) ||
(v_rport->rp_state == UNF_RPORT_ST_DELETE)) {
/* 1. Already within Closing or Delete: Do nothing */
spin_unlock_irqrestore(&v_rport->rport_state_lock, flags);
return;
} else if (v_rport->rp_state == UNF_RPORT_ST_LOGO) {
/* 2. Update R_Port state:
* Normal Enter Event --->>> closing state
*/
unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_NORMAL_ENTER);
spin_unlock_irqrestore(&v_rport->rport_state_lock, flags);
/* Send Logo if necessary */
if (unf_send_logo(v_lport, v_rport) != RETURN_OK)
unf_rport_enter_closing(v_rport);
} else {
/*
* 3. Update R_Port state: Link Down Event --->>> closing state
* enter closing state
*/
unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN);
spin_unlock_irqrestore(&v_rport->rport_state_lock, flags);
unf_rport_enter_closing(v_rport);
}
}
unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport,
unsigned int v_scsi_id)
{
unsigned long flags = 0;
struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL;
struct unf_wwpn_rport_info_s *wwn_rport_info = NULL;
UNF_CHECK_VALID(0x3016, UNF_TRUE,
v_lport, return UNF_RETURN_ERROR);
if (unlikely(v_lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x_0x%x) is removing and do nothing",
v_lport->port_id, v_lport->nport_id);
return UNF_RETURN_ERROR;
}
if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x_0x%x) scsi_id(0x%x) is bigger than %d",
v_lport->port_id, v_lport->nport_id,
v_scsi_id, UNF_MAX_SCSI_ID);
return UNF_RETURN_ERROR;
}
rport_scsi_table = &v_lport->rport_scsi_table;
if (rport_scsi_table->wwn_rport_info_table) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[warn]Port(0x%x_0x%x) RPort(0x%p) free scsi_id(0x%x) wwpn(0x%llx) target_id(0x%x) succeed",
v_lport->port_id, v_lport->nport_id,
rport_scsi_table->wwn_rport_info_table[v_scsi_id].rport,
v_scsi_id,
rport_scsi_table->wwn_rport_info_table[v_scsi_id].wwpn,
rport_scsi_table->wwn_rport_info_table[v_scsi_id].target_id);
spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock,
flags);
wwn_rport_info =
&rport_scsi_table->wwn_rport_info_table[v_scsi_id];
if (wwn_rport_info->rport) {
wwn_rport_info->rport->rport = NULL;
wwn_rport_info->rport = NULL;
}
wwn_rport_info->target_id = INVALID_VALUE32;
atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD);
/* NOTE: remain WWPN/Port_Name unchanged(un-cleared) */
spin_unlock_irqrestore(
&rport_scsi_table->scsi_image_table_lock,
flags);
return RETURN_OK;
}
return UNF_RETURN_ERROR;
}
static void unf_report_ini_linkup_event(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
UNF_CHECK_VALID(0x3031, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x3032, UNF_TRUE, v_rport, return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"[event]Port(0x%x) RPort(0x%x_0x%p) put INI link up work(%p) to work_queue",
v_lport->port_id, v_rport->nport_id, v_rport,
&v_rport->start_work);
if (unlikely(!queue_work(v_lport->link_event_wq,
&v_rport->start_work))) {
atomic_inc(&v_lport->add_start_work_failed);
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Port(0x%x) RPort(0x%x_0x%p) put INI link up to work_queue failed",
v_lport->port_id, v_rport->nport_id, v_rport);
}
}
static void unf_report_ini_linkdown_event(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
unsigned int scsi_id = 0;
struct fc_rport *rport = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x3033, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x3034, UNF_TRUE, v_rport, return);
/*
* 1. set local device(rport/rport_info_table) state
* -------------------------------------------------OFF_LINE
**
* about rport->scsi_id
* valid during rport link up to link down
*/
spin_lock_irqsave(&v_rport->rport_state_lock, flag);
scsi_id = v_rport->scsi_id;
unf_set_device_state(v_lport, scsi_id, UNF_SCSI_ST_OFFLINE);
/* 2. delete scsi's rport */
rport = (struct fc_rport *)v_rport->rport;
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
if (rport) {
fc_remote_port_delete(rport);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[event]port(0x%x_0x%x) delete rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) succeed",
v_lport->port_id, v_lport->nport_id,
v_rport->nport_id,
v_rport->port_name, scsi_id);
atomic_inc(&v_lport->scsi_session_del_success);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[info]Port(0x%x_0x%x) delete RPort(0x%x_0x%p) failed",
v_lport->port_id, v_lport->nport_id,
v_rport->nport_id, v_rport);
atomic_inc(&v_lport->scsi_session_del_failed);
}
}
void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int rport_att)
{
/* Report R_Port Link Up/Down Event */
unsigned long flag = 0;
enum unf_port_state_e en_lport_state = 0;
UNF_CHECK_VALID(0x3019, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x3020, UNF_TRUE, v_rport, return);
spin_lock_irqsave(&v_rport->rport_state_lock, flag);
/* 1. R_Port does not has TGT mode any more */
if (!(rport_att & UNF_FC4_FRAME_PARM_3_TGT) &&
(v_rport->lport_ini_state == UNF_PORT_STATE_LINKUP)) {
v_rport->last_lport_ini_state = v_rport->lport_ini_state;
// L_Port INI mode: Down
v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN;
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) does not have TGT attribute(0x%x) any more",
v_lport->port_id, v_rport->nport_id, rport_att);
}
/* 2. R_Port with TGT mode, L_Port with INI mode */
if ((rport_att & UNF_FC4_FRAME_PARM_3_TGT) &&
(v_lport->options & UNF_FC4_FRAME_PARM_3_INI)) {
v_rport->last_lport_ini_state = v_rport->lport_ini_state;
// L_Port INI mode: Up
v_rport->lport_ini_state = UNF_PORT_STATE_LINKUP;
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[warn]Port(0x%x) update INI state with last(0x%x) and now(0x%x)",
v_lport->port_id, v_rport->last_lport_ini_state,
v_rport->lport_ini_state);
}
/* 3. Report L_Port INI/TGT Down/Up event to SCSI */
if (v_rport->last_lport_ini_state == v_rport->lport_ini_state) {
if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed",
v_lport->port_id, v_rport->nport_id, v_rport,
v_rport->lport_ini_state);
}
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
return;
}
en_lport_state = v_rport->lport_ini_state;
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
switch (en_lport_state) {
/* Link Down */
case UNF_PORT_STATE_LINKDOWN:
unf_report_ini_linkdown_event(v_lport, v_rport);
break;
/* Link Up */
case UNF_PORT_STATE_LINKUP:
unf_report_ini_linkup_event(v_lport, v_rport);
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) with unknown link status(0x%x)",
v_lport->port_id, v_rport->lport_ini_state);
break;
}
}
static void unf_rport_call_back(void *v_rport,
void *v_lport,
unsigned int v_result)
{
/* Report R_Port link down event */
struct unf_rport_s *rport = NULL;
struct unf_lport_s *lport = NULL;
unsigned long flag = 0;
UNF_REFERNCE_VAR(lport);
UNF_REFERNCE_VAR(v_result);
UNF_CHECK_VALID(0x3037, UNF_TRUE, v_rport, return);
UNF_CHECK_VALID(0x3038, UNF_TRUE, v_lport, return);
rport = (struct unf_rport_s *)v_rport;
lport = (struct unf_lport_s *)v_lport;
spin_lock_irqsave(&rport->rport_state_lock, flag);
rport->last_lport_ini_state = rport->lport_ini_state;
rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN;
rport->last_lport_tgt_state = rport->lport_tgt_state;
rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN;
/* Report R_Port Link Down Event to scsi */
if (rport->last_lport_ini_state == rport->lport_ini_state) {
if (rport->nport_id < UNF_FC_FID_DOM_MGR) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed",
lport->port_id, rport->nport_id, rport,
rport->lport_ini_state);
}
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
return;
}
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
unf_report_ini_linkdown_event(lport, rport);
}
static void unf_rport_recovery_timeout(struct work_struct *v_work)
{
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned int ret = RETURN_OK;
unsigned long flag = 0;
enum unf_rport_login_state_e en_rp_state = UNF_RPORT_ST_INIT;
UNF_CHECK_VALID(0x3039, UNF_TRUE, v_work, return);
rport = container_of(v_work, struct unf_rport_s, recovery_work.work);
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT,
UNF_ERR, "[err]RPort is NULL");
return;
}
lport = rport->lport;
if (unlikely(!lport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]RPort(0x%x) Port is NULL",
rport->nport_id);
/* for timer */
unf_rport_ref_dec(rport);
return;
}
spin_lock_irqsave(&rport->rport_state_lock, flag);
en_rp_state = rport->rp_state;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x_0x%x) RPort(0x%x) state(0x%x) recovery timer timeout",
lport->port_id, lport->nport_id,
rport->nport_id, en_rp_state);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
switch (en_rp_state) {
case UNF_RPORT_ST_PLOGI_WAIT:
if (((lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) &&
(lport->port_name > rport->port_name)) ||
lport->en_act_topo != UNF_ACT_TOP_P2P_DIRECT) {
/* P2P: Name is master with P2P_D or has INI Mode */
ret = unf_send_plogi(rport->lport, rport);
}
break;
case UNF_RPORT_ST_PRLI_WAIT:
ret = unf_send_prli(rport->lport, rport);
break;
default:
break;
}
if (ret != RETURN_OK)
unf_rport_error_recovery(rport);
/* company with timer */
unf_rport_ref_dec(rport);
}
static unsigned int unf_get_dev_loss_tmo_by_rport(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
struct fc_rport *rport = (struct fc_rport *)v_rport->rport;
if (rport)
return rport->dev_loss_tmo;
else
return (unsigned int)unf_get_link_lose_tmo(v_lport);
}
void unf_schedule_closing_work(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
unsigned long flags = 0;
struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL;
struct unf_wwpn_rport_info_s *wwn_rport_info = NULL;
unsigned int scsi_id = 0;
unsigned int ret = 0;
unsigned int delay = 0;
UNF_CHECK_VALID(0x3561, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x3562, UNF_TRUE, v_rport, return);
delay = unf_get_dev_loss_tmo_by_rport(v_lport, v_rport);
rport_scsi_table = &v_lport->rport_scsi_table;
scsi_id = v_rport->scsi_id;
spin_lock_irqsave(&v_rport->rport_state_lock, flags);
/* 1. Cancel recovery_work */
if (cancel_delayed_work(&v_rport->recovery_work)) {
atomic_dec(&v_rport->rport_ref_cnt);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel recovery work succeed",
v_lport->port_id, v_lport->nport_id,
v_rport->nport_id, v_rport);
}
/* 2. Cancel Open_work */
if (cancel_delayed_work(&v_rport->open_work)) {
atomic_dec(&v_rport->rport_ref_cnt);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel open work succeed",
v_lport->port_id, v_lport->nport_id,
v_rport->nport_id, v_rport);
}
spin_unlock_irqrestore(&v_rport->rport_state_lock, flags);
/* 3. Work in-queue (switch to thread context) */
if (!queue_work(v_lport->link_event_wq, &v_rport->closing_work)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_ERR,
"[warn]Port(0x%x) RPort(0x%x_0x%p) add link down to work queue failed",
v_lport->port_id, v_rport->nport_id, v_rport);
atomic_inc(&v_lport->add_closing_work_failed);
} else {
spin_lock_irqsave(&v_rport->rport_state_lock, flags);
(void)unf_rport_ref_inc(v_rport);
spin_unlock_irqrestore(&v_rport->rport_state_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"[info]Port(0x%x) RPort(0x%x_0x%p) add link down to work(%p) queue succeed",
v_lport->port_id, v_rport->nport_id, v_rport,
&v_rport->closing_work);
}
if (v_rport->nport_id > UNF_FC_FID_DOM_MGR)
return;
if (scsi_id >= UNF_MAX_SCSI_ID) {
scsi_id = unf_get_scsi_id_by_wwpn(v_lport, v_rport->port_name);
if (scsi_id >= UNF_MAX_SCSI_ID) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_NORMAL, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%p) NPortId(0x%x) wwpn(0x%llx) option(0x%x) scsi_id(0x%x) is max than(0x%x)",
v_lport->port_id, v_rport, v_rport->nport_id,
v_rport->port_name,
v_rport->options, scsi_id,
UNF_MAX_SCSI_ID);
return;
}
}
wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id];
ret = queue_delayed_work(
unf_work_queue,
&wwn_rport_info->loss_tmo_work,
(unsigned long)delay * msecs_to_jiffies(1000));
if (!ret) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"[info] Port(0x%x) add RPort(0x%p) NPortId(0x%x) scsi_id(0x%x) wwpn(0x%llx) loss timeout work failed",
v_lport->port_id, v_rport,
v_rport->nport_id, scsi_id,
v_rport->port_name);
}
}
static void unf_rport_closing_timeout(struct work_struct *v_work)
{
/* closing --->>>(timeout)--->>> delete */
struct unf_rport_s *rport = NULL;
struct unf_lport_s *lport = NULL;
struct unf_disc_s *disc = NULL;
unsigned long rport_flag = 0;
unsigned long disc_flag = 0;
void (*pfn_unf_rport_call_back)(void *, void *, unsigned int) = NULL;
UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return);
/* Get R_Port & L_Port & Disc */
rport = container_of(v_work, struct unf_rport_s, closing_work);
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT,
UNF_ERR, "[err]RPort is NULL");
return;
}
lport = rport->lport;
if (unlikely(!lport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]RPort(0x%x_0x%p) Port is NULL",
rport->nport_id, rport);
/* Release directly (for timer) */
unf_rport_ref_dec(rport);
return;
}
disc = &lport->disc;
spin_lock_irqsave(&rport->rport_state_lock, rport_flag);
/* 1. Update R_Port state: event_timeout --->>> state_delete */
unf_rport_state_ma(rport, UNF_EVENT_RPORT_CLS_TIMEOUT);
/* Check R_Port state */
if (rport->rp_state != UNF_RPORT_ST_DELETE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x_0x%x) RPort(0x%x) closing timeout with error state(0x%x)",
lport->port_id, lport->nport_id,
rport->nport_id, rport->rp_state);
spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag);
/* Dec ref_cnt for timer */
unf_rport_ref_dec(rport);
return;
}
pfn_unf_rport_call_back = rport->pfn_unf_rport_call_back;
spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag);
/* 2. Put R_Port to delete list */
spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag);
list_del_init(&rport->entry_rport);
list_add_tail(&rport->entry_rport, &disc->list_delete_rports);
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag);
/* 3. Report rport link down event to scsi */
if (pfn_unf_rport_call_back) { /* unf_rport_call_back */
pfn_unf_rport_call_back((void *)rport, (void *)rport->lport,
RETURN_OK);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]RPort(0x%x) callback is NULL",
rport->nport_id);
}
/* 4. Remove/delete R_Port */
unf_rport_ref_dec(rport);
unf_rport_ref_dec(rport);
}
static void unf_rport_linkup_to_scsi(struct work_struct *v_work)
{
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport = NULL;
unsigned long flags = RETURN_OK;
struct unf_wwpn_rport_info_s *wwn_rport_info = NULL;
struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL;
unsigned int scsi_id = 0;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *unf_rport = NULL;
UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return);
unf_rport = container_of(v_work, struct unf_rport_s, start_work);
if (unlikely(!unf_rport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]RPort is NULL for work(%p)", v_work);
return;
}
lport = unf_rport->lport;
if (unlikely(!lport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]RPort(0x%x_0x%p) Port is NULL",
unf_rport->nport_id, unf_rport);
return;
}
/* 1. Alloc R_Port SCSI_ID (image table) */
unf_rport->scsi_id = unf_alloc_scsi_id(lport, unf_rport);
if (unlikely(unf_rport->scsi_id == INVALID_VALUE32)) {
atomic_inc(&lport->scsi_session_add_failed);
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) scsi_id(0x%x) is invalid",
lport->port_id, lport->nport_id,
unf_rport->nport_id, unf_rport,
unf_rport->port_name, unf_rport->scsi_id);
/* NOTE: return */
return;
}
/* 2. Add rport to scsi */
scsi_id = unf_rport->scsi_id;
rport_ids.node_name = unf_rport->node_name;
rport_ids.port_name = unf_rport->port_name;
rport_ids.port_id = unf_rport->nport_id;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
rport = fc_remote_port_add(lport->host_info.p_scsi_host,
0, &rport_ids);
if (unlikely(!rport)) {
atomic_inc(&lport->scsi_session_add_failed);
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) report link up to scsi failed",
lport->port_id, lport->nport_id,
unf_rport->nport_id, unf_rport,
unf_rport->port_name);
unf_free_scsi_id(lport, scsi_id);
return;
}
/* 3. Change rport role save local SCSI_ID to scsi rport */
*((unsigned int *)rport->dd_data) = scsi_id;
rport->supported_classes = FC_COS_CLASS3;
rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
fc_remote_port_rolechg(rport, rport_ids.roles);
/* 4. Save scsi rport info to local R_Port */
spin_lock_irqsave(&unf_rport->rport_state_lock, flags);
unf_rport->rport = rport;
spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags);
rport_scsi_table = &lport->rport_scsi_table;
spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags);
wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id];
wwn_rport_info->target_id = rport->scsi_target_id;
wwn_rport_info->rport = unf_rport;
atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_ONLINE);
spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock,
flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[event]port(0x%x_0x%x) rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) link up to scsi succeed",
lport->port_id, lport->nport_id,
unf_rport->nport_id, unf_rport->port_name,
scsi_id);
atomic_inc(&lport->scsi_session_add_success);
}
static void unf_rport_open_timeout(struct work_struct *v_work)
{
struct unf_rport_s *rport = NULL;
struct unf_lport_s *lport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x3041, UNF_TRUE, v_work, return);
rport = container_of(v_work, struct unf_rport_s, open_work.work);
if (!rport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort is NULL");
return;
}
spin_lock_irqsave(&rport->rport_state_lock, flags);
lport = rport->lport;
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x_0x%x) RPort(0x%x) open work timeout with state(0x%x)",
lport->port_id, lport->nport_id,
rport->nport_id, rport->rp_state);
/* NOTE: R_Port state check */
if (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) {
spin_unlock_irqrestore(&rport->rport_state_lock, flags);
/* Dec ref_cnt for timer case */
unf_rport_ref_dec(rport);
return;
}
/* Report R_Port Link Down event */
unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN);
spin_unlock_irqrestore(&rport->rport_state_lock, flags);
unf_rport_enter_closing(rport);
/* Dec ref_cnt for timer case */
unf_rport_ref_dec(rport);
UNF_REFERNCE_VAR(lport);
}
static unsigned int unf_alloc_index_for_rport(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
unsigned long rport_flag = 0;
unsigned long pool_flag = 0;
unsigned int alloc_indx = 0;
unsigned int max_rport = 0;
struct unf_rport_pool_s *rport_pool = NULL;
rport_pool = &v_lport->rport_pool;
max_rport = v_lport->low_level_func.lport_cfg_items.max_login;
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, pool_flag);
while (alloc_indx < max_rport) {
if (!test_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap)) {
/* Case for HIFC */
if (unlikely(atomic_read(
&v_lport->port_no_operater_flag) ==
UNF_LPORT_NOP)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) is within NOP",
v_lport->port_id);
spin_unlock_irqrestore(
&rport_pool->rport_free_pool_lock,
pool_flag);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&v_rport->rport_state_lock,
rport_flag);
/* set R_Port index */
v_rport->rport_index = alloc_indx;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%x) RPort(0x%x) alloc index(0x%x) succeed",
v_lport->port_id, alloc_indx,
v_rport->nport_id);
spin_unlock_irqrestore(&v_rport->rport_state_lock,
rport_flag);
/* Set (index) bit */
set_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap);
/* Break here */
break;
}
alloc_indx++;
}
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, pool_flag);
if (alloc_indx == max_rport)
return UNF_RETURN_ERROR;
else
return RETURN_OK;
}
static void unf_check_rport_pool_status(struct unf_lport_s *v_lport)
{
struct unf_lport_s *lport = v_lport;
struct unf_rport_pool_s *rport_pool = NULL;
unsigned long flags = 0;
unsigned int max_rport = 0;
UNF_CHECK_VALID(0x3045, UNF_TRUE, v_lport, return);
rport_pool = &lport->rport_pool;
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flags);
max_rport = lport->low_level_func.lport_cfg_items.max_login;
if ((rport_pool->rport_pool_completion) &&
(max_rport == rport_pool->rport_pool_count)) {
complete(rport_pool->rport_pool_completion);
}
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flags);
}
void unf_init_rport_params(struct unf_rport_s *v_rport,
struct unf_lport_s *v_lport)
{
struct unf_rport_s *rport = v_rport;
unsigned long flag = 0;
UNF_CHECK_VALID(0x3046, UNF_TRUE, rport, return);
UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return);
spin_lock_irqsave(&rport->rport_state_lock, flag);
unf_set_rport_state(rport, UNF_RPORT_ST_INIT);
/* set callback function */
rport->pfn_unf_rport_call_back = unf_rport_call_back;
rport->lport = v_lport;
rport->fcp_conf_needed = UNF_FALSE;
rport->tape_support_needed = UNF_FALSE;
rport->mas_retries = UNF_MAX_RETRY_COUNT;
rport->logo_retries = 0;
rport->retries = 0;
rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS;
rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN;
rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN;
rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN;
rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN;
rport->node_name = 0;
rport->port_name = INVALID_WWPN;
rport->disc_done = 0;
rport->scsi_id = INVALID_VALUE32;
rport->data_thread = NULL;
sema_init(&rport->task_sema, 0);
atomic_set(&rport->rport_ref_cnt, 0);
atomic_set(&rport->pending_io_cnt, 0);
rport->rport_alloc_jifs = jiffies;
rport->ed_tov = UNF_DEFAULT_EDTOV + 500;
rport->ra_tov = UNF_DEFAULT_RATOV;
INIT_WORK(&rport->closing_work, unf_rport_closing_timeout);
INIT_WORK(&rport->start_work, unf_rport_linkup_to_scsi);
INIT_DELAYED_WORK(&rport->recovery_work, unf_rport_recovery_timeout);
INIT_DELAYED_WORK(&rport->open_work, unf_rport_open_timeout);
atomic_inc(&rport->rport_ref_cnt);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
}
static unsigned int unf_alloc_llrport_resource(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_nport_id)
{
unsigned int ret = RETURN_OK;
struct unf_rport_info_s rport_info = { 0 };
struct unf_lport_s *lport = NULL;
lport = v_lport->root_lport;
if (lport->low_level_func.service_op.pfn_unf_alloc_rport_res) {
rport_info.nport_id = v_nport_id;
rport_info.rport_index = v_rport->rport_index;
rport_info.local_nport_id = v_lport->nport_id; /* sid */
rport_info.port_name = 0;
ret = lport->low_level_func.service_op.pfn_unf_alloc_rport_res(
lport->fc_port,
&rport_info);
} else {
ret = RETURN_OK;
}
return ret;
}
static void *unf_add_rport_to_busy_list(struct unf_lport_s *v_lport,
struct unf_rport_s *v_new_rport,
unsigned int v_nport_id)
{
struct unf_rport_pool_s *rport_pool = NULL;
struct unf_lport_s *lport = NULL;
struct unf_disc_s *disc = NULL;
struct unf_rport_s *new_rport = v_new_rport;
struct unf_rport_s *old_rport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x3046, UNF_TRUE, v_new_rport, return NULL);
lport = v_lport->root_lport;
disc = &v_lport->disc;
UNF_CHECK_VALID(0x3046, UNF_TRUE, lport, return NULL);
rport_pool = &lport->rport_pool;
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
list_for_each_safe(node, next_node, &disc->list_busy_rports) {
/* According to N_Port_ID */
old_rport = list_entry(node, struct unf_rport_s, entry_rport);
if (old_rport->nport_id == v_nport_id)
break; /* find by N_Port_ID */
old_rport = NULL;
}
if (old_rport) {
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
/* Use old R_Port & Add new R_Port back to R_Port Pool */
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
clear_bit((int)new_rport->rport_index,
rport_pool->pul_rpi_bitmap);
list_add_tail(&new_rport->entry_rport,
&rport_pool->list_rports_pool);
rport_pool->rport_pool_count++;
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock,
flag);
unf_check_rport_pool_status(lport);
return (void *)old_rport;
}
if (unf_alloc_llrport_resource(v_lport, new_rport,
v_nport_id != RETURN_OK)) {
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
/* Add new R_Port back to R_Port Pool */
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
clear_bit((int)new_rport->rport_index,
rport_pool->pul_rpi_bitmap);
list_add_tail(&new_rport->entry_rport,
&rport_pool->list_rports_pool);
rport_pool->rport_pool_count++;
spin_unlock_irqrestore(
&rport_pool->rport_free_pool_lock, flag);
unf_check_rport_pool_status(lport);
return NULL;
}
/* Add new R_Port to busy list */
list_add_tail(&new_rport->entry_rport,
&disc->list_busy_rports);
new_rport->nport_id = v_nport_id; /* set R_Port N_Port_ID */
/* set L_Port N_Port_ID */
new_rport->local_nport_id = v_lport->nport_id;
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
unf_init_rport_params(new_rport, v_lport);
return (void *)new_rport;
}
void *unf_rport_get_free_and_init(void *v_lport,
unsigned int v_rport_type,
unsigned int v_nport_id)
{
struct unf_lport_s *lport = NULL;
struct unf_rport_pool_s *rport_pool = NULL;
struct unf_disc_s *disc = NULL;
struct unf_disc_s *v_port_disc = NULL;
struct unf_rport_s *rport = NULL;
struct list_head *list_head = NULL;
unsigned long flag = 0;
struct unf_disc_rport_s *disc_rport = NULL;
UNF_REFERNCE_VAR(v_rport_type);
UNF_REFERNCE_VAR(rport);
UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL);
lport = ((struct unf_lport_s *)v_lport)->root_lport; /* ROOT L_Port */
UNF_CHECK_VALID(0x3047, UNF_TRUE, lport, return NULL);
/* Check L_Port state: NOP */
if (unlikely(atomic_read(&lport->port_no_operater_flag) ==
UNF_LPORT_NOP)) {
return NULL;
}
rport_pool = &lport->rport_pool;
disc = &lport->disc;
/* 1. UNF_PORT_TYPE_DISC: Get from disc_rport_pool */
if (v_rport_type == UNF_PORT_TYPE_DISC) {
v_port_disc = &(((struct unf_lport_s *)v_lport)->disc);
/* NOTE: list_disc_rports_pool used
* with list_disc_rport_busy
*/
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
if (!list_empty(&disc->disc_rport_mgr.list_disc_rports_pool)) {
/* Get & delete from Disc R_Port Pool &
* Add it to Busy list
*/
list_head =
(&disc->disc_rport_mgr.list_disc_rports_pool)->next;
list_del_init(list_head);
disc_rport = list_entry(list_head,
struct unf_disc_rport_s,
entry_rport);
/* Set R_Port N_Port_ID */
disc_rport->nport_id = v_nport_id;
spin_unlock_irqrestore(&disc->rport_busy_pool_lock,
flag);
/* Add to list_disc_rport_busy */
spin_lock_irqsave(&v_port_disc->rport_busy_pool_lock,
flag);
list_add_tail(
list_head,
&v_port_disc->disc_rport_mgr.list_disc_rport_busy);
spin_unlock_irqrestore(
&v_port_disc->rport_busy_pool_lock, flag);
} else {
disc_rport = NULL;
spin_unlock_irqrestore(&disc->rport_busy_pool_lock,
flag);
}
/* NOTE: return */
return disc_rport;
}
/* 2. UNF_PORT_TYPE_FC (rport_pool): Get from list_rports_pool */
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
if (!list_empty(&rport_pool->list_rports_pool)) {
/* Get & delete from R_Port free Pool */
list_head = (&rport_pool->list_rports_pool)->next;
list_del_init(list_head);
rport_pool->rport_pool_count--;
rport = list_entry(list_head, struct unf_rport_s, entry_rport);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x_0x%x) RPort pool is empty",
lport->port_id, lport->nport_id);
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock,
flag);
/* NOTE: return */
return NULL;
}
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag);
/* 3. Alloc (& set bit) R_Port index */
if (unf_alloc_index_for_rport(lport, rport) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate index for new RPort failed",
lport->nport_id);
/* Alloc failed: Add R_Port back to R_Port Pool */
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
list_add_tail(&rport->entry_rport,
&rport_pool->list_rports_pool);
rport_pool->rport_pool_count++;
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock,
flag);
unf_check_rport_pool_status(lport);
return NULL;
}
/* 4. Add R_Port to busy list */
rport = unf_add_rport_to_busy_list(v_lport, rport, v_nport_id);
UNF_REFERNCE_VAR(rport);
return (void *)rport;
}
static void unf_reset_rport_attribute(struct unf_rport_s *v_rport)
{
unsigned long flag = 0;
UNF_CHECK_VALID(0x3070, 1, v_rport, return);
spin_lock_irqsave(&v_rport->rport_state_lock, flag);
v_rport->pfn_unf_rport_call_back = NULL;
v_rport->lport = NULL;
v_rport->node_name = INVALID_VALUE64;
v_rport->port_name = INVALID_WWPN;
v_rport->nport_id = INVALID_VALUE32;
v_rport->local_nport_id = INVALID_VALUE32;
v_rport->max_frame_size = UNF_MAX_FRAME_SIZE;
v_rport->ed_tov = UNF_DEFAULT_EDTOV;
v_rport->ra_tov = UNF_DEFAULT_RATOV;
v_rport->rport_index = INVALID_VALUE32;
v_rport->scsi_id = INVALID_VALUE32;
v_rport->rport_alloc_jifs = INVALID_VALUE64;
/* ini or tgt */
v_rport->options = 0;
/* fcp conf */
v_rport->fcp_conf_needed = UNF_FALSE;
/* special req retry times */
v_rport->retries = 0;
v_rport->logo_retries = 0;
/* special req retry times */
v_rport->mas_retries = UNF_MAX_RETRY_COUNT;
/* for target mode */
v_rport->session = NULL;
v_rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN;
v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN;
v_rport->rp_state = UNF_RPORT_ST_INIT;
v_rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN;
v_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN;
v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS;
v_rport->disc_done = 0;
/* for scsi */
v_rport->data_thread = NULL;
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
}
static unsigned int unf_rport_remove(void *v_rport)
{
/* remove_old_rport/... --->>> rport_ref_dec --->>> rport_remove */
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_rport_pool_s *rport_pool = NULL;
unsigned long flag = 0;
unsigned int rport_index = 0;
UNF_CHECK_VALID(0x3050, UNF_TRUE,
v_rport, return UNF_RETURN_ERROR);
rport = (struct unf_rport_s *)v_rport;
lport = rport->lport;
UNF_CHECK_VALID(0x3051, UNF_TRUE,
lport, return UNF_RETURN_ERROR);
rport_pool = &((struct unf_lport_s *)lport->root_lport)->rport_pool;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Remove RPort(0x%p) with remote_nport_id(0x%x) local_nport_id(0x%x)",
rport, rport->nport_id, rport->local_nport_id);
/* 1. Terminate open exchange before rport remove: set ABORT tag */
unf_cm_xchg_mgr_abort_io_by_id(lport, rport,
rport->nport_id, lport->nport_id, 0);
/* 2. Abort sfp exchange before rport remove */
unf_cm_xchg_mgr_abort_sfs_by_id(lport, rport,
rport->nport_id, lport->nport_id);
/* 3. Release R_Port resource: session reset/delete */
(void)unf_release_rport_res(lport, rport);
/* 4.1 Delete R_Port from disc destroy/delete list */
spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag);
list_del_init(&rport->entry_rport);
spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag);
rport_index = rport->rport_index; /* according to bitmap */
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[event]Port(0x%x) release RPort(0x%x_%p) with index(0x%x)",
lport->port_id, rport->nport_id, rport, rport->rport_index);
unf_reset_rport_attribute(rport);
/* 4.2 Add rport to --->>> rport_pool (free pool) & clear bitmap */
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
if (lport->low_level_func.rport_release_type ==
UNF_LOW_LEVEL_RELEASE_RPORT_SYNC) {
clear_bit((int)rport_index, rport_pool->pul_rpi_bitmap);
}
list_add_tail(&rport->entry_rport, &rport_pool->list_rports_pool);
rport_pool->rport_pool_count++;
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag);
unf_check_rport_pool_status((struct unf_lport_s *)lport->root_lport);
up(&rport->task_sema);
return RETURN_OK;
}
void unf_rport_ref_dec(struct unf_rport_s *v_rport)
{
unsigned long flag = 0;
UNF_CHECK_VALID(0x3011, UNF_TRUE, v_rport, return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Rport(0x%x) reference count is %d",
v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt));
spin_lock_irqsave(&v_rport->rport_state_lock, flag);
if (atomic_dec_and_test(&v_rport->rport_ref_cnt)) {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
(void)unf_rport_remove(v_rport);
} else {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
}
}
static enum unf_rport_login_state_e unf_rport_stat_init(
enum unf_rport_login_state_e v_old_state,
enum unf_rport_event_e v_event)
{
enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT;
switch (v_event) {
case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */
en_next_state = UNF_RPORT_ST_LOGO;
break;
case UNF_EVENT_RPORT_ENTER_PLOGI: /* PLOGI --->>> PLOGI_WAIT */
en_next_state = UNF_RPORT_ST_PLOGI_WAIT;
break;
case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */
en_next_state = UNF_RPORT_ST_CLOSING;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
static enum unf_rport_login_state_e unf_rport_stat_plogi_wait(
enum unf_rport_login_state_e v_old_state,
enum unf_rport_event_e v_event)
{
enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT;
switch (v_event) {
case UNF_EVENT_RPORT_ENTER_PRLI: /* PRLI --->>> PRLI_WAIT */
en_next_state = UNF_RPORT_ST_PRLI_WAIT;
break;
case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */
en_next_state = UNF_RPORT_ST_CLOSING;
break;
case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */
en_next_state = UNF_RPORT_ST_LOGO;
break;
case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */
en_next_state = UNF_RPORT_ST_READY;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
static enum unf_rport_login_state_e unf_rport_stat_prli_wait(
enum unf_rport_login_state_e v_old_state,
enum unf_rport_event_e v_event)
{
enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT;
switch (v_event) {
case UNF_EVENT_RPORT_READY: /* Ready --->>> Ready */
en_next_state = UNF_RPORT_ST_READY;
break;
case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */
en_next_state = UNF_RPORT_ST_LOGO;
break;
case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */
en_next_state = UNF_RPORT_ST_CLOSING;
break;
case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */
en_next_state = UNF_RPORT_ST_READY;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
static enum unf_rport_login_state_e unf_rport_stat_ready(
enum unf_rport_login_state_e v_old_state,
enum unf_rport_event_e v_event)
{
enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT;
switch (v_event) {
case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */
en_next_state = UNF_RPORT_ST_LOGO;
break;
case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */
en_next_state = UNF_RPORT_ST_CLOSING;
break;
case UNF_EVENT_RPORT_ENTER_PLOGI: /* ready --->>> plogi_wait */
en_next_state = UNF_RPORT_ST_PLOGI_WAIT;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
static enum unf_rport_login_state_e unf_rport_stat_closing(
enum unf_rport_login_state_e v_old_state,
enum unf_rport_event_e v_event)
{
enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT;
switch (v_event) {
case UNF_EVENT_RPORT_CLS_TIMEOUT: /* timeout --->>> delete */
en_next_state = UNF_RPORT_ST_DELETE;
break;
case UNF_EVENT_RPORT_RELOGIN: /* relogin --->>> INIT */
en_next_state = UNF_RPORT_ST_INIT;
break;
case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */
en_next_state = UNF_RPORT_ST_READY;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
static enum unf_rport_login_state_e unf_rport_stat_logo(
enum unf_rport_login_state_e v_old_state,
enum unf_rport_event_e v_event)
{
enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT;
switch (v_event) {
case UNF_EVENT_RPORT_NORMAL_ENTER: /* normal enter --->>> closing */
en_next_state = UNF_RPORT_ST_CLOSING;
break;
case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */
en_next_state = UNF_RPORT_ST_READY;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
void unf_rport_state_ma(struct unf_rport_s *v_rport,
enum unf_rport_event_e v_event)
{
enum unf_rport_login_state_e en_old_state = UNF_RPORT_ST_INIT;
enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT;
UNF_CHECK_VALID(0x3056, UNF_TRUE, v_rport, return);
en_old_state = v_rport->rp_state;
switch (v_rport->rp_state) {
/* State INIT */
case UNF_RPORT_ST_INIT:
en_next_state = unf_rport_stat_init(en_old_state, v_event);
break;
/* State PLOGI Wait */
case UNF_RPORT_ST_PLOGI_WAIT:
en_next_state = unf_rport_stat_plogi_wait(en_old_state,
v_event);
break;
/* State PRLI Wait */
case UNF_RPORT_ST_PRLI_WAIT:
en_next_state = unf_rport_stat_prli_wait(en_old_state,
v_event);
break;
/* State LOGO */
case UNF_RPORT_ST_LOGO:
en_next_state = unf_rport_stat_logo(en_old_state, v_event);
break;
/* State CLOSING */
case UNF_RPORT_ST_CLOSING:
en_next_state = unf_rport_stat_closing(en_old_state, v_event);
break;
/* State READY */
case UNF_RPORT_ST_READY:
en_next_state = unf_rport_stat_ready(en_old_state, v_event);
break;
/* State DELETE */
case UNF_RPORT_ST_DELETE:
default:
en_next_state = UNF_RPORT_ST_INIT;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]RPort(0x%x) hold state(0x%x)",
v_rport->nport_id, v_rport->rp_state);
break;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR,
"[info]RPort(0x%x) with oldstate(0x%x) event(0x%x) nextstate(0x%x)",
v_rport->nport_id, en_old_state, v_event, en_next_state);
unf_set_rport_state(v_rport, en_next_state);
}
void unf_clean_linkdown_rport(struct unf_lport_s *v_lport)
{
/* for L_Port's R_Port(s) */
struct unf_disc_s *disc = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_rport_s *rport = NULL;
struct unf_lport_s *lport = NULL;
unsigned long disc_lock_flag = 0;
unsigned long rport_lock_flag = 0;
UNF_CHECK_VALID(0x3058, UNF_TRUE, v_lport, return);
disc = &v_lport->disc;
/* for each busy R_Port */
spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_lock_flag);
/* --->>> busy_rports */
list_for_each_safe(node, next_node, &disc->list_busy_rports) {
rport = list_entry(node, struct unf_rport_s, entry_rport);
/* 1. Prevent process Repeatly: Closing */
spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag);
if (rport->rp_state == UNF_RPORT_ST_CLOSING) {
spin_unlock_irqrestore(&rport->rport_state_lock,
rport_lock_flag);
continue;
}
/* 2. Increase ref_cnt to protect R_Port */
if (unf_rport_ref_inc(rport) != RETURN_OK) {
spin_unlock_irqrestore(&rport->rport_state_lock,
rport_lock_flag);
continue;
}
/* 3. Update R_Port state:
* Link Down Event --->>> closing state
*/
unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN);
/* 4. Put R_Port from busy to destroy list */
list_del_init(&rport->entry_rport);
list_add_tail(&rport->entry_rport, &disc->list_destroy_rports);
lport = rport->lport;
spin_unlock_irqrestore(&rport->rport_state_lock,
rport_lock_flag);
/* 5. Schedule Closing work (Enqueuing workqueue) */
unf_schedule_closing_work(lport, rport);
/* 6. decrease R_Port ref_cnt (company with 2) */
unf_rport_ref_dec(rport);
}
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_lock_flag);
}
void unf_rport_enter_closing(struct unf_rport_s *v_rport)
{
/*
* call by
* 1. with RSCN processer
* 2. with LOGOUT processer
**
* from
* 1. R_Port Link Down
* 2. R_Port enter LOGO
*/
unsigned long rport_lock_flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
struct unf_disc_s *disc = NULL;
UNF_CHECK_VALID(0x3059, UNF_TRUE, v_rport, return);
/* 1. Increase ref_cnt to protect R_Port */
spin_lock_irqsave(&v_rport->rport_state_lock, rport_lock_flag);
ret = unf_rport_ref_inc(v_rport);
if (ret != RETURN_OK) {
spin_unlock_irqrestore(&v_rport->rport_state_lock,
rport_lock_flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort(0x%x_0x%p) is removing and no need process",
v_rport->nport_id, v_rport);
return;
}
/* NOTE: R_Port state has been set(with closing) */
lport = v_rport->lport;
spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_lock_flag);
/* 2. Put R_Port from busy to destroy list */
disc = &lport->disc;
spin_lock_irqsave(&disc->rport_busy_pool_lock, rport_lock_flag);
list_del_init(&v_rport->entry_rport);
list_add_tail(&v_rport->entry_rport, &disc->list_destroy_rports);
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, rport_lock_flag);
/* 3. Schedule Closing work (Enqueuing workqueue) */
unf_schedule_closing_work(lport, v_rport);
/* 4. dec R_Port ref_cnt */
unf_rport_ref_dec(v_rport);
}
void unf_rport_error_recovery(struct unf_rport_s *v_rport)
{
unsigned long delay = 0;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x3060, UNF_TRUE, v_rport, return);
spin_lock_irqsave(&v_rport->rport_state_lock, flag);
ret = unf_rport_ref_inc(v_rport);
if (ret != RETURN_OK) {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort(0x%x_0x%p) is removing and no need process",
v_rport->nport_id, v_rport);
return;
}
/* Check R_Port state */
if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) ||
(v_rport->rp_state == UNF_RPORT_ST_DELETE)) {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]RPort(0x%x_0x%p) offline and no need process",
v_rport->nport_id, v_rport);
unf_rport_ref_dec(v_rport);
return;
}
/* Check repeatability with recovery work */
if (delayed_work_pending(&v_rport->recovery_work)) {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]RPort(0x%x_0x%p) recovery work is running and no need process",
v_rport->nport_id, v_rport);
unf_rport_ref_dec(v_rport);
return;
}
/* NOTE: Re-login or Logout directly (recovery work) */
if (v_rport->retries < v_rport->mas_retries) {
v_rport->retries++;
delay = (unsigned long)v_rport->ed_tov;
if (queue_delayed_work(unf_work_queue,
&v_rport->recovery_work,
(unsigned long)msecs_to_jiffies(
(unsigned int)delay))) {
/* Inc ref_cnt: corresponding to this work timer */
(void)unf_rport_ref_inc(v_rport);
}
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort(0x%x_0x%p) state(0x%x) retry login failed",
v_rport->nport_id, v_rport, v_rport->rp_state);
/* Update R_Port state: LOGO event --->>> ST_LOGO */
unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LOGO);
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
/* Enter LOGO processer */
unf_rport_enter_logo(v_rport->lport, v_rport);
}
unf_rport_ref_dec(v_rport);
}
static unsigned int unf_rport_reuse_only(struct unf_rport_s *v_rport)
{
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x3061, UNF_TRUE,
v_rport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&v_rport->rport_state_lock, flag);
ret = unf_rport_ref_inc(v_rport);
if (ret != RETURN_OK) {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
/* R_Port with delete state */
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort(0x%x_0x%p) is removing and no need process",
v_rport->nport_id, v_rport);
return UNF_RETURN_ERROR;
}
/* R_Port State check: delete */
if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) ||
(v_rport->rp_state == UNF_RPORT_ST_CLOSING)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort(0x%x_0x%p) state(0x%x) is delete or closing no need process",
v_rport->nport_id, v_rport, v_rport->rp_state);
ret = UNF_RETURN_ERROR;
}
spin_unlock_irqrestore(&v_rport->rport_state_lock, flag);
unf_rport_ref_dec(v_rport);
return ret;
}
static unsigned int unf_rport_reuse_recover(struct unf_rport_s *v_rport)
{
unsigned long flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x3062, UNF_TRUE,
v_rport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&v_rport->rport_state_lock, flags);
ret = unf_rport_ref_inc(v_rport);
if (ret != RETURN_OK) {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flags);
/* R_Port with delete state */
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort(0x%x_0x%p) is removing and no need process",
v_rport->nport_id, v_rport);
return UNF_RETURN_ERROR;
}
/* R_Port state check: delete */
if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) ||
(v_rport->rp_state == UNF_RPORT_ST_CLOSING)) {
ret = UNF_RETURN_ERROR;
}
/* Update R_Port state: recovery --->>> ready */
unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_RECOVERY);
spin_unlock_irqrestore(&v_rport->rport_state_lock, flags);
unf_rport_ref_dec(v_rport);
return ret;
}
static unsigned int unf_rport_reuse_init(struct unf_rport_s *v_rport)
{
unsigned long flage = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x3063, UNF_TRUE,
v_rport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&v_rport->rport_state_lock, flage);
ret = unf_rport_ref_inc(v_rport);
if (ret != RETURN_OK) {
spin_unlock_irqrestore(&v_rport->rport_state_lock, flage);
/* R_Port with delete state */
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]RPort(0x%x_0x%p) is removing and no need process",
v_rport->nport_id, v_rport);
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]RPort(0x%x)'s state is 0x%x with use_init flag",
v_rport->nport_id, v_rport->rp_state);
/* R_Port State check: delete */
if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) ||
(v_rport->rp_state == UNF_RPORT_ST_CLOSING)) {
ret = UNF_RETURN_ERROR;
} else {
/* Update R_Port state: re-enter Init state */
unf_set_rport_state(v_rport, UNF_RPORT_ST_INIT);
}
spin_unlock_irqrestore(&v_rport->rport_state_lock, flage);
unf_rport_ref_dec(v_rport);
return ret;
}
struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport,
unsigned int nport_id)
{
struct unf_lport_s *lport = NULL;
struct unf_disc_s *disc = NULL;
struct unf_rport_s *rport = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
struct unf_rport_s *find_rport = NULL;
UNF_CHECK_VALID(0x3048, UNF_TRUE, v_lport, return NULL);
lport = (struct unf_lport_s *)v_lport;
disc = &lport->disc;
/* for each r_port from rport_busy_list: compare N_Port_ID */
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
list_for_each_safe(node, next_node, &disc->list_busy_rports) {
rport = list_entry(node, struct unf_rport_s, entry_rport);
if (rport && rport->nport_id == nport_id) {
find_rport = rport;
break;
}
}
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
return find_rport;
}
struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
enum unf_rport_reuse_flag_e v_reuse_flag,
unsigned int v_nport_id)
{
/*
* New add or plug
*
* retry_flogi --->>> reuse_only
* name_server_register --->>> reuse_only
* SNS_plogi --->>> reuse_only
* enter_flogi --->>> reuse_only
* logout --->>> reuse_only
* flogi_handler --->>> reuse_only
* plogi_handler --->>> reuse_only
* adisc_handler --->>> reuse_recovery
* logout_handler --->>> reuse_init
* prlo_handler --->>> reuse_init
* login_with_loop --->>> reuse_only
* gffid_callback --->>> reuse_only
* delay_plogi --->>> reuse_only
* gffid_rjt --->>> reuse_only
* gffid_rsp_unknown --->>> reuse_only
* gpnid_acc --->>> reuse_init
* fdisc_callback --->>> reuse_only
* flogi_acc --->>> reuse_only
* plogi_acc --->>> reuse_only
* logo_callback --->>> reuse_init
* rffid_callback --->>> reuse_only
*/
#define UNF_AVOID_LINK_FLASH_TIME 3000
struct unf_rport_s *rport = v_rport;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x3075, UNF_TRUE, v_lport, return NULL);
/* 1. Alloc New R_Port or Update R_Port Property */
if (!rport) {
/* If NULL, get/Alloc new node
* (R_Port from R_Port pool) directly
*/
rport = unf_rport_get_free_and_init(v_lport, UNF_PORT_TYPE_FC,
v_nport_id);
} else {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%x) get exist RPort(0x%x) with state(0x%x) and reuse_flag(0x%x)",
v_lport->port_id, rport->nport_id,
rport->rp_state, v_reuse_flag);
switch (v_reuse_flag) {
case UNF_RPORT_REUSE_ONLY:
ret = unf_rport_reuse_only(rport);
if (ret != RETURN_OK) {
/* R_Port within delete list: need get new */
rport = unf_rport_get_free_and_init(
v_lport,
UNF_PORT_TYPE_FC,
v_nport_id);
}
break;
case UNF_RPORT_REUSE_INIT:
ret = unf_rport_reuse_init(rport);
if (ret != RETURN_OK) {
/* R_Port within delete list: need get new */
rport = unf_rport_get_free_and_init(
v_lport,
UNF_PORT_TYPE_FC,
v_nport_id);
}
break;
case UNF_RPORT_REUSE_RECOVER:
ret = unf_rport_reuse_recover(rport);
if (ret != RETURN_OK) {
/* R_Port within delete list,
* NOTE: do nothing
*/
rport = NULL;
}
break;
default:
break;
}
}
return rport;
}
unsigned int unf_get_port_feature(unsigned long long v_wwpn)
{
struct unf_rport_feature_recard_s *port_fea = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
struct list_head list_temp_node;
spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags);
list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) {
port_fea = list_entry(node, struct unf_rport_feature_recard_s,
entry_feature);
if (v_wwpn == port_fea->wwpn) {
list_del(&port_fea->entry_feature);
list_add(&port_fea->entry_feature,
&port_fea_pool->list_busy_head);
spin_unlock_irqrestore(
&port_fea_pool->port_fea_pool_lock, flags);
return port_fea->port_feature;
}
}
list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) {
port_fea = list_entry(node, struct unf_rport_feature_recard_s,
entry_feature);
if (v_wwpn == port_fea->wwpn) {
list_del(&port_fea->entry_feature);
list_add(&port_fea->entry_feature,
&port_fea_pool->list_busy_head);
spin_unlock_irqrestore(
&port_fea_pool->port_fea_pool_lock, flags);
return port_fea->port_feature;
}
}
/* can't find wwpn */
if (list_empty(&port_fea_pool->list_free_head)) {
/* free is empty, transport busy to free */
list_temp_node = port_fea_pool->list_free_head;
port_fea_pool->list_free_head = port_fea_pool->list_busy_head;
port_fea_pool->list_busy_head = list_temp_node;
}
port_fea = list_entry((&port_fea_pool->list_free_head)->prev,
struct unf_rport_feature_recard_s,
entry_feature);
list_del(&port_fea->entry_feature);
list_add(&port_fea->entry_feature, &port_fea_pool->list_busy_head);
port_fea->wwpn = v_wwpn;
port_fea->port_feature = UNF_PORT_MODE_UNKNOWN;
spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags);
return UNF_PORT_MODE_UNKNOWN;
}
void unf_update_port_feature(unsigned long long v_wwpn,
unsigned int v_port_feature)
{
struct unf_rport_feature_recard_s *port_fea = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags);
list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) {
port_fea = list_entry(node,
struct unf_rport_feature_recard_s,
entry_feature);
if (v_wwpn == port_fea->wwpn) {
port_fea->port_feature = v_port_feature;
list_del(&port_fea->entry_feature);
list_add(&port_fea->entry_feature,
&port_fea_pool->list_busy_head);
spin_unlock_irqrestore(
&port_fea_pool->port_fea_pool_lock, flags);
return;
}
}
list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) {
port_fea = list_entry(node, struct unf_rport_feature_recard_s,
entry_feature);
if (v_wwpn == port_fea->wwpn) {
port_fea->port_feature = v_port_feature;
list_del(&port_fea->entry_feature);
list_add(&port_fea->entry_feature,
&port_fea_pool->list_busy_head);
spin_unlock_irqrestore(
&port_fea_pool->port_fea_pool_lock, flags);
return;
}
}
spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_RPORT_H
#define __UNF_RPORT_H
#define UNF_MAX_SCSI_ID 2048
#define UNF_LOSE_TMO 30
#define UNF_RPORT_INVALID_INDEX 0xffff
/* RSCN compare DISC list with local RPort macro */
#define UNF_RPORT_NEED_PROCESS 0x1
#define UNF_RPORT_ONLY_IN_DISC_PROCESS 0x2
#define UNF_RPORT_ONLY_IN_LOCAL_PROCESS 0x3
#define UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS 0x4
#define UNF_RPORT_NOT_NEED_PROCESS 0x5
#define UNF_ECHO_SEND_MAX_TIMES 1
extern struct unf_rport_feature_pool_s *port_fea_pool;
enum unf_rport_login_state_e {
UNF_RPORT_ST_INIT = 0x1000, /* initialized */
UNF_RPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */
UNF_RPORT_ST_PRLI_WAIT, /* waiting for PRLI completion */
UNF_RPORT_ST_READY, /* ready for use */
UNF_RPORT_ST_LOGO, /* port logout sent */
UNF_RPORT_ST_CLOSING, /* being closed */
UNF_RPORT_ST_DELETE, /* port being deleted */
UNF_RPORT_ST_BUTT
};
enum unf_rport_event_e {
UNF_EVENT_RPORT_NORMAL_ENTER = 0x9000,
UNF_EVENT_RPORT_ENTER_PLOGI = 0x9001,
UNF_EVENT_RPORT_ENTER_PRLI = 0x9002,
UNF_EVENT_RPORT_READY = 0x9003,
UNF_EVENT_RPORT_LOGO = 0x9004,
UNF_EVENT_RPORT_CLS_TIMEOUT = 0x9005,
UNF_EVENT_RPORT_RECOVERY = 0x9006,
UNF_EVENT_RPORT_RELOGIN = 0x9007,
UNF_EVENT_RPORT_LINK_DOWN = 0x9008,
UNF_EVENT_RPORT_BUTT
};
/* RPort local link state */
enum unf_port_state_e {
UNF_PORT_STATE_LINKUP = 0x1001,
UNF_PORT_STATE_LINKDOWN = 0x1002
};
enum unf_rport_reuse_flag_e {
UNF_RPORT_REUSE_ONLY = 0x1001,
UNF_RPORT_REUSE_INIT = 0x1002,
UNF_RPORT_REUSE_RECOVER = 0x1003
};
struct unf_disc_rport_s {
/* RPort entry */
struct list_head entry_rport;
unsigned int nport_id; /* Remote port NPortID */
unsigned int disc_done; /* 1:Disc done */
};
struct unf_rport_feature_pool_s {
struct list_head list_busy_head;
struct list_head list_free_head;
void *p_port_feature_pool_addr;
spinlock_t port_fea_pool_lock;
};
struct unf_rport_feature_recard_s {
struct list_head entry_feature;
unsigned long long wwpn;
unsigned int port_feature;
unsigned int reserved;
};
struct unf_os_thread_private_data_s {
struct list_head list;
spinlock_t spin_lock;
struct task_struct *thread;
unsigned int in_process;
unsigned int cpu_id;
atomic_t user_count;
};
/* Remote Port struct */
struct unf_rport_s {
unsigned int max_frame_size;
unsigned int supported_classes;
/* Dynamic Attributes */
/* Remote Port loss timeout in seconds. */
unsigned int dev_loss_tmo;
unsigned long long node_name;
unsigned long long port_name;
unsigned int nport_id; /* Remote port NPortID */
unsigned int local_nport_id;
unsigned int roles;
/* Remote port local INI state */
enum unf_port_state_e lport_ini_state;
enum unf_port_state_e last_lport_ini_state;
/* Remote port local TGT state */
enum unf_port_state_e lport_tgt_state;
enum unf_port_state_e last_lport_tgt_state;
/* Port Type:fc */
unsigned int port_type;
/* RPort reference counter */
atomic_t rport_ref_cnt;
/* Pending IO count */
atomic_t pending_io_cnt;
/* RPort entry */
struct list_head entry_rport;
/* Port State,delay reclaim when uiRpState == complete. */
enum unf_rport_login_state_e rp_state;
unsigned int disc_done; /* 1:Disc done */
struct unf_lport_s *lport;
void *rport;
spinlock_t rport_state_lock;
/* Port attribution */
unsigned int ed_tov;
unsigned int ra_tov;
unsigned int options; /* ini or tgt */
unsigned int last_report_linkup_options;
unsigned int fcp_conf_needed; /* INI Rport send FCP CONF flag */
unsigned int tape_support_needed; /* INI tape support flag */
unsigned int retries; /* special req retry times */
unsigned int logo_retries; /* logo error recovery retry times */
unsigned int mas_retries; /* special req retry times */
/* Rport alloc jiffies */
unsigned long long rport_alloc_jifs;
void *session;
/* binding with SCSI */
unsigned int scsi_id;
/* disc list compare flag */
unsigned int rscn_position;
unsigned int rport_index;
/* RPort timer,closing status */
struct work_struct closing_work;
/* RPort timer,rport linkup */
struct work_struct start_work;
/* RPort timer,recovery */
struct delayed_work recovery_work;
/* RPort timer,TGT mode,PRLI waiting */
struct delayed_work open_work;
struct semaphore task_sema;
/* Callback after rport Ready/delete.[with state:ok/fail].
* Creat/free TGT session here
* input : L_Port,R_Port,state:ready
* --creat session/delete--free session
*/
void (*pfn_unf_rport_call_back)(void *, void *, unsigned int);
struct unf_os_thread_private_data_s *data_thread;
};
#define UNF_IO_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_result) \
do { \
if (likely(((v_io_result) < UNF_MAX_IO_RETURN_VALUE) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic64_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->io_done_cnt[v_io_result]); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] io return value(0x%x) or scsi_id(0x%x) is invalid", \
v_io_result, v_scsi_id); \
} \
} while (0)
#define UNF_SCSI_CMD_CNT(v_scsi_table, v_scsi_id, v_io_type) \
do { \
if (likely(((v_io_type) < UNF_MAX_SCSI_CMD) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic64_inc(&((v_scsi_table->wwn_rport_info_table[v_scsi_id]).dfx_counter->scsi_cmd_cnt[v_io_type])); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \
v_io_type, v_scsi_id); \
} \
} while (0)
#define UNF_SCSI_ERROR_HANDLE_CNT(v_scsi_table, v_scsi_id, v_io_type) \
do { \
if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle[v_io_type]); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \
v_io_type, v_scsi_id); \
} \
} while (0)
#define UNF_SCSI_ERROR_HANDLE_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_type) \
do { \
if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \
((v_scsi_id) < UNF_MAX_SCSI_ID) && \
((v_scsi_table)->wwn_rport_info_table) && \
(v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \
atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle_result[v_io_type]); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \
UNF_LOG_EQUIP_ATT, UNF_ERR, \
"[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \
v_io_type, v_scsi_id); \
} \
} while (0)
void unf_rport_state_ma(struct unf_rport_s *v_rport,
enum unf_rport_event_e v_event);
void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int rport_att);
void unf_rport_enter_closing(struct unf_rport_s *v_rport);
void unf_clean_linkdown_rport(struct unf_lport_s *v_lport);
void unf_rport_error_recovery(struct unf_rport_s *v_rport);
struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport,
unsigned int nport_id);
void unf_rport_enter_logo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport);
void unf_rport_ref_dec(struct unf_rport_s *v_rport);
struct unf_rport_s *unf_rport_set_qualifier_key_reuse(
struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport_by_nport_id,
struct unf_rport_s *v_rport_by_wwpn,
unsigned long long v_wwpn,
unsigned int v_sid);
void unf_rport_delay_login(struct unf_rport_s *v_rport);
struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport,
unsigned long long v_wwpn,
unsigned int v_sid);
void unf_rport_linkdown(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
enum unf_rport_reuse_flag_e v_reuse_flag,
unsigned int v_nport_id);
void *unf_rport_get_free_and_init(void *v_lport,
unsigned int v_port_type,
unsigned int v_nport_id);
unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport,
unsigned int v_scsi_id);
void unf_schedule_closing_work(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
void unf_sesion_loss_timeout(struct work_struct *v_work);
unsigned int unf_get_port_feature(unsigned long long v_wwpn);
void unf_update_port_feature(unsigned long long v_wwpn,
unsigned int v_port_feature);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册