提交 7a80da77 编写于 作者: C Chenguangli 提交者: Yang Yingliang

scsi/hifc: add hifc driver FC service module

driver inclusion
category: feature
bugzilla: NA

-----------------------------------------------------------------------

This module is used to process services related to the FC protocol.
Signed-off-by: NChenguangli <chenguangli2@huawei.com>
Reviewed-by: NZengweiliang <zengweiliang.zengweiliang@huawei.com>
Acked-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 418ca332
...@@ -1113,6 +1113,8 @@ source "drivers/scsi/qla4xxx/Kconfig" ...@@ -1113,6 +1113,8 @@ source "drivers/scsi/qla4xxx/Kconfig"
source "drivers/scsi/qedi/Kconfig" source "drivers/scsi/qedi/Kconfig"
source "drivers/scsi/qedf/Kconfig" source "drivers/scsi/qedf/Kconfig"
source "drivers/scsi/huawei/Kconfig"
config SCSI_LPFC config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support" tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI depends on PCI && SCSI
......
...@@ -83,6 +83,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o ...@@ -83,6 +83,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_HUAWEI_FC) += huawei/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/ obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
......
#
# Huawei driver configuration
#
config SCSI_HUAWEI_FC
tristate "Huawei devices"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
default m
---help---
If you have a Fibre Channel PCI card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Huawei cards. If you say Y, you will be asked
for your specific card in the following questions.
if SCSI_HUAWEI_FC
source "drivers/scsi/huawei/hifc/Kconfig"
endif # SCSI_HUAWEI_FC
#
# Makefile for the Huawei device drivers.
#
obj-$(CONFIG_SCSI_FC_HIFC) += hifc/
#
# Huawei driver configuration
#
config SCSI_FC_HIFC
tristate "Huawei hifc Fibre Channel Support"
default m
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
---help---
This driver supports Huawei Fibre Channel PCI and
PCIE host adapters.
obj-$(CONFIG_SCSI_FC_HIFC) += hifc.o
hifc-objs += hifc_utils.o
hifc-objs += hifc_hba.o
hifc-objs += hifc_portmng.o
hifc-objs += hifc_module.o
hifc-objs += hifc_chipitf.o
hifc-objs += hifc_io.o
hifc-objs += hifc_queue.o
hifc-objs += hifc_service.o
hifc-objs += hifc_wqe.o
hifc-objs += hifc_cfg.o
hifc-objs += hifc_lld.o
hifc-objs += unf_io.o
hifc-objs += unf_io_abnormal.o
hifc-objs += unf_scsi.o
hifc-objs += unf_init.o
hifc-objs += unf_event.o
hifc-objs += unf_exchg.o
hifc-objs += unf_lport.o
hifc-objs += unf_disc.o
hifc-objs += unf_rport.o
hifc-objs += unf_service.o
hifc-objs += unf_portman.o
hifc-objs += unf_npiv.o
hifc-objs += hifc_sml.o
hifc-objs += hifc_tool.o
hifc-objs += hifc_tool_hw.o
hifc-objs += hifc_dbgtool_knl.o
hifc-objs += hifc_hwif.o
hifc-objs += hifc_eqs.o
hifc-objs += hifc_api_cmd.o
hifc-objs += hifc_mgmt.o
hifc-objs += hifc_wq.o
hifc-objs += hifc_cmdq.o
hifc-objs += hifc_hwdev.o
hifc-objs += hifc_cqm_main.o
hifc-objs += hifc_cqm_object.o
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "hifc_module.h"
#include "hifc_service.h"
#include "hifc_io.h"
#include "hifc_chipitf.h"
#define HIFC_RQ_ERROR_FRAME 0x100
#define HIFC_ELS_SRQ_BUF_NUM 0x9
/* Parent SCQ Receive the ELS processing function */
static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
/* Parent SCQ Receive the GS RSP processing function */
static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
/* Parent SCQ Receive the BLS RSP processing function */
static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
/* Parent SCQ Receive the offload completion processing function */
static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
/* Parent SCQ Receive the flush sq completion processing function */
static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
/* Parent SCQ Receive the bufferclear completion processing function */
static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe);
typedef unsigned int (*pfn_scqe_handler)(struct hifc_hba_s *,
union hifcoe_scqe_u *);
struct unf_scqe_handler_table_s {
unsigned int scqe_type; /* ELS type */
int reclaim_sq_wpg;
pfn_scqe_handler pfn_scqe_handle_fun;
};
struct unf_scqe_handler_table_s scqe_handler_table[] = {
{ /* INI rcvd ELS_CMND */
HIFC_SCQE_ELS_CMND,
UNF_FALSE,
hifc_scq_rcv_els_cmd
},
{ /* INI rcvd ELS_RSP */
HIFC_SCQE_ELS_RSP,
UNF_TRUE,
hifc_scq_rcv_els_rsp
},
{ /* INI rcvd GS_RSP */
HIFC_SCQE_GS_RSP,
UNF_TRUE,
hifc_scq_rcv_gs_rsp
},
{ /* INI rcvd BLS_RSP */
HIFC_SCQE_ABTS_RSP,
UNF_TRUE,
hifc_scq_rcv_abts_rsp
},
{ /* INI rcvd FCP RSP */
HIFC_SCQE_FCP_IRSP,
UNF_TRUE,
hifc_scq_recv_iresp
},
{ /* INI rcvd ELS_RSP STS(Done) */
HIFC_SCQE_ELS_RSP_STS,
UNF_TRUE,
hifc_scq_rcv_els_rsp_sts
},
{ /* INI rcvd Session enable STS */
HIFC_SCQE_SESS_EN_STS,
UNF_FALSE,
hifc_scq_rcv_offload_sts
},
{ /* INI rcvd flush (pending) SQ STS */
HIFC_SCQE_FLUSH_SQ_STS,
UNF_FALSE,
hifc_scq_rcv_flush_sq_sts
},
{ /* INI rcvd Buffer clear STS */
HIFC_SCQE_BUF_CLEAR_STS,
UNF_FALSE,
hifc_scq_rcv_buf_clear_sts
},
{ /* INI rcvd session reset STS */
HIFC_SCQE_SESS_RST_STS,
UNF_FALSE,
hifc_scq_rcv_sess_rst_sts
},
{ /* ELS SRQ */
HIFC_SCQE_CLEAR_SRQ_STS,
UNF_FALSE,
hifc_scq_rcv_clear_srq_sts
},
{ /* INI rcvd TMF RSP */
HIFC_SCQE_FCP_ITMF_RSP,
UNF_TRUE,
hifc_scq_recv_iresp
},
{ /* INI rcvd TMF Marker STS */
HIFC_SCQE_ITMF_MARKER_STS,
UNF_FALSE,
hifc_scq_rcv_marker_sts
},
{ /* INI rcvd ABTS Marker STS */
HIFC_SCQE_ABTS_MARKER_STS,
UNF_FALSE,
hifc_scq_rcv_abts_marker_sts
}
};
static unsigned int hifc_get_els_rps_pld_len(unsigned short type,
unsigned short cmnd,
unsigned int *v_els_acc_pld_len)
{
unsigned int ret = RETURN_OK;
UNF_CHECK_VALID(0x4917, UNF_TRUE, v_els_acc_pld_len,
return UNF_RETURN_ERROR);
/* RJT */
if (type == ELS_RJT) {
*v_els_acc_pld_len = UNF_ELS_ACC_RJT_LEN;
return RETURN_OK;
}
/* ACC */
switch (cmnd) {
/* uses the same PAYLOAD length as PLOGI. */
case ELS_FLOGI:
case ELS_PDISC:
case ELS_PLOGI:
*v_els_acc_pld_len = UNF_PLOGI_ACC_PAYLOAD_LEN;
break;
case ELS_PRLI:
/* The PRLI ACC payload extends 12 bytes */
*v_els_acc_pld_len = UNF_PRLI_ACC_PAYLOAD_LEN -
UNF_PRLI_SIRT_EXTRA_SIZE;
break;
case ELS_LOGO:
*v_els_acc_pld_len = UNF_LOGO_ACC_PAYLOAD_LEN;
break;
case ELS_PRLO:
*v_els_acc_pld_len = UNF_PRLO_ACC_PAYLOAD_LEN;
break;
case ELS_RSCN:
*v_els_acc_pld_len = UNF_RSCN_ACC_PAYLOAD_LEN;
break;
case ELS_ADISC:
*v_els_acc_pld_len = UNF_ADISC_ACC_PAYLOAD_LEN;
break;
case ELS_RRQ:
*v_els_acc_pld_len = UNF_RRQ_ACC_PAYLOAD_LEN;
break;
case ELS_SCR:
*v_els_acc_pld_len = UNF_SCR_RSP_PAYLOAD_LEN;
break;
case ELS_ECHO:
*v_els_acc_pld_len = UNF_ECHO_ACC_PAYLOAD_LEN;
break;
case ELS_RLS:
*v_els_acc_pld_len = UNF_RLS_ACC_PAYLOAD_LEN;
break;
case ELS_REC:
*v_els_acc_pld_len = UNF_REC_ACC_PAYLOAD_LEN;
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Unknown ELS command(0x%x)", cmnd);
ret = UNF_RETURN_ERROR;
break;
}
return ret;
}
struct hifc_els_cmd_payload_table_s {
unsigned short cmnd; /* ELS type */
unsigned int req_pld_len;
unsigned int rsp_pld_len;
};
struct hifc_els_cmd_payload_table_s els_pld_table_map[] = {
{ ELS_FDISC,
UNF_FDISC_PAYLOAD_LEN,
UNF_FDISC_ACC_PAYLOAD_LEN
},
{ ELS_FLOGI,
UNF_FLOGI_PAYLOAD_LEN,
UNF_FLOGI_ACC_PAYLOAD_LEN
},
{ ELS_PLOGI,
UNF_PLOGI_PAYLOAD_LEN,
UNF_PLOGI_ACC_PAYLOAD_LEN
},
{ ELS_SCR,
UNF_SCR_PAYLOAD_LEN,
UNF_SCR_RSP_PAYLOAD_LEN
},
{ ELS_PDISC,
UNF_PDISC_PAYLOAD_LEN,
UNF_PDISC_ACC_PAYLOAD_LEN
},
{ ELS_LOGO,
UNF_LOGO_PAYLOAD_LEN,
UNF_LOGO_ACC_PAYLOAD_LEN
},
{ ELS_PRLO,
UNF_PRLO_PAYLOAD_LEN,
UNF_PRLO_ACC_PAYLOAD_LEN
},
{ ELS_ADISC,
UNF_ADISC_PAYLOAD_LEN,
UNF_ADISC_ACC_PAYLOAD_LEN
},
{ ELS_RRQ,
UNF_RRQ_PAYLOAD_LEN,
UNF_RRQ_ACC_PAYLOAD_LEN
},
{ ELS_RSCN,
0,
UNF_RSCN_ACC_PAYLOAD_LEN
},
{ ELS_ECHO,
UNF_ECHO_PAYLOAD_LEN,
UNF_ECHO_ACC_PAYLOAD_LEN
},
{ ELS_RLS,
UNF_RLS_PAYLOAD_LEN,
UNF_RLS_ACC_PAYLOAD_LEN
},
{ ELS_REC,
UNF_REC_PAYLOAD_LEN,
UNF_REC_ACC_PAYLOAD_LEN
}
};
static unsigned int hifc_get_els_req_and_acc_pld_len(unsigned short cmnd,
unsigned int *req_pld_len,
unsigned int *rsp_pld_len)
{
unsigned int ret = RETURN_OK;
unsigned int i;
UNF_CHECK_VALID(0x4917, UNF_TRUE, req_pld_len, return UNF_RETURN_ERROR);
for (i = 0; i < (sizeof(els_pld_table_map) /
sizeof(struct hifc_els_cmd_payload_table_s)); i++) {
if (els_pld_table_map[i].cmnd == cmnd) {
*req_pld_len = els_pld_table_map[i].req_pld_len;
*rsp_pld_len = els_pld_table_map[i].rsp_pld_len;
return ret;
}
}
switch (cmnd) {
case ELS_PRLI:
/* If sirt is enabled, The PRLI ACC payload extends
* 12 bytes
*/
*req_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN;
*rsp_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN;
break;
default:
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT,
UNF_ERR, "[err]Unknown ELS_CMD(0x%x)", cmnd);
ret = UNF_RETURN_ERROR;
break;
}
return ret;
}
/*
* Function Name : hifc_get_els_frame_len
* Function Description: Get ELS Frame length
* Input Parameters : type,
* : cmnd
* Output Parameters : v_frame_len
* Return Type : unsigned int
*/
static unsigned int hifc_get_els_frame_len(unsigned short type,
unsigned short cmnd,
unsigned int *v_frame_len)
{
unsigned int ret = RETURN_OK;
unsigned int hdr_len = sizeof(struct unf_fchead_s);
unsigned int req_len = 0;
unsigned int rsp_len = 0;
UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR);
if (type == ELS_RJT)
rsp_len = UNF_ELS_ACC_RJT_LEN;
else
ret = hifc_get_els_req_and_acc_pld_len(cmnd, &req_len,
&rsp_len);
if (ret == RETURN_OK)
*v_frame_len = hdr_len + ((type == ELS_ACC || type == ELS_RJT) ?
rsp_len : req_len);
return ret;
}
static void hifc_build_els_frame_header(unsigned short v_xid_base,
unsigned short v_cmnd_type,
unsigned short els_code,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int fctl = 0;
unsigned int rctl = 0;
unsigned int type = 0;
struct unf_fchead_s *cm_fc_hdr_buf = NULL;
struct unf_fchead_s *pkg_fc_hdr_info = NULL;
pkg_fc_hdr_info = &v_pkg->frame_head;
cm_fc_hdr_buf = HIFC_GET_CMND_FC_HEADER(v_pkg);
if (v_cmnd_type == ELS_CMND) {
rctl = HIFC_FC_RCTL_ELS_REQ;
fctl = HIFC_FCTL_REQ;
/* If the ELS_CMD frame is sent, Adjusting the oxid */
cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid +
((unsigned int)v_xid_base << 16);
} else {
rctl = HIFC_FC_RCTL_ELS_RSP;
fctl = HIFC_FCTL_RESP;
/* If the ELS_RSP frame is sent, Adjusting the rxid */
cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid +
v_xid_base;
}
type = HIFC_FC_TYPE_ELS;
/* Get SID, DID, OXID, RXID from CM layer */
cm_fc_hdr_buf->rctl_did = pkg_fc_hdr_info->rctl_did;
cm_fc_hdr_buf->csctl_sid = pkg_fc_hdr_info->csctl_sid;
cm_fc_hdr_buf->parameter = 0;
/* R_CTL, CS_CTL, TYPE, F_CTL, SEQ_ID, DF_CTL, SEQ_CNT, LL filled */
UNF_SET_FC_HEADER_RCTL(cm_fc_hdr_buf, rctl);
UNF_SET_FC_HEADER_CS_CTL(cm_fc_hdr_buf, 0);
UNF_SET_FC_HEADER_TYPE(cm_fc_hdr_buf, type);
UNF_SET_FC_HEADER_FCTL(cm_fc_hdr_buf, fctl);
UNF_SET_FC_HEADER_SEQ_CNT(cm_fc_hdr_buf, 0);
UNF_SET_FC_HEADER_DF_CTL(cm_fc_hdr_buf, 0);
UNF_SET_FC_HEADER_SEQ_ID(cm_fc_hdr_buf, 0);
UNF_PRINT_SFS(UNF_INFO, 0, cm_fc_hdr_buf, sizeof(struct unf_fchead_s));
}
void hifc_save_login_para_in_sq_info(
struct hifc_hba_s *v_hba,
struct unf_port_login_parms_s *v_login_co_parms)
{
struct hifc_hba_s *hba = NULL;
unsigned int rport_index = v_login_co_parms->rport_index;
struct hifc_parent_sq_info_s *sq_info = NULL;
hba = (struct hifc_hba_s *)v_hba;
if (rport_index >= UNF_HIFC_MAXRPORT_NUM) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) save login parms,but uplevel alloc invalid rport index: 0x%x",
hba->port_cfg.port_id, rport_index);
return;
}
sq_info =
&hba->parent_queue_mgr->parent_queues[rport_index].parent_sq_info;
sq_info->plogi_coparams.seq_cnt = v_login_co_parms->seq_cnt;
sq_info->plogi_coparams.ed_tov = v_login_co_parms->ed_tov;
sq_info->plogi_coparams.tx_mfs = (v_login_co_parms->tx_mfs <
HIFC_DEFAULT_TX_MAX_FREAM_SIZE) ? HIFC_DEFAULT_TX_MAX_FREAM_SIZE :
v_login_co_parms->tx_mfs;
sq_info->plogi_coparams.ed_tov_timer_val =
v_login_co_parms->ed_tov_timer_val;
}
static void hifc_save_default_plogi_param_in_ctx(
struct hifc_hba_s *v_hba,
struct hifcoe_parent_context_s *v_ctx,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int tx_mfs = HIFC_DEFAULT_TX_MAX_FREAM_SIZE;
unsigned int did = 0;
did = UNF_GET_DID(v_pkg);
if (did == UNF_FC_FID_DIR_SERV)
tx_mfs = 2048;
v_ctx->sw_section.tx_mfs = cpu_to_be16((unsigned short)(tx_mfs));
}
static void hifc_save_plogi_acc_param_in_ctx(
struct hifc_hba_s *v_hba,
struct hifcoe_parent_context_s *v_ctx,
struct unf_frame_pkg_s *v_pkg)
{
#define HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH ((8 * 1024))
struct unf_lgn_port_coparms_s *port_co_param = NULL;
struct unf_plogi_payload_s *plogi_acc_pld = NULL;
plogi_acc_pld = UNF_GET_PLOGI_ACC_PAYLOAD(v_pkg);
port_co_param = &plogi_acc_pld->parms.co_parms;
/* e_d_tov and seq_cnt */
hifc_big_to_cpu32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1,
sizeof(unsigned int));
v_ctx->sw_section.sw_ctxt_config.dw.e_d_tov =
port_co_param->e_d_tov_resolution;
v_ctx->sw_section.sw_ctxt_config.dw.seq_cnt =
port_co_param->seq_cnt;
hifc_cpu_to_big32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1,
sizeof(unsigned int));
v_ctx->sw_section.tx_mfs =
(unsigned short)(v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE]) <
HIFC_DEFAULT_TX_MAX_FREAM_SIZE ?
cpu_to_be16((unsigned short)HIFC_DEFAULT_TX_MAX_FREAM_SIZE) :
cpu_to_be16 ((unsigned short)
(v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE]));
v_ctx->sw_section.e_d_tov_timer_val =
cpu_to_be32(port_co_param->e_d_tov);
v_ctx->sw_section.mfs_unaligned_bytes =
cpu_to_be16(HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH %
port_co_param->bb_receive_data_field_size);
}
static void hifc_recover_offloading_state(
struct hifc_parent_queue_info_s *v_prntq_info,
enum hifc_parent_queue_state_e offload_state)
{
unsigned long flag = 0;
spin_lock_irqsave(&v_prntq_info->parent_queue_state_lock, flag);
if (v_prntq_info->offload_state == HIFC_QUEUE_STATE_OFFLOADING)
v_prntq_info->offload_state = offload_state;
spin_unlock_irqrestore(&v_prntq_info->parent_queue_state_lock, flag);
}
static void hifc_save_magic_num_in_ctx(struct hifcoe_parent_context_s *v_ctx,
struct unf_frame_pkg_s *v_pkg)
{
/* The CID itself is initialized by the microcode.
* The driver multiplexes the CID as magicnum and then updates
* the CID by the microcode.
*/
v_ctx->sw_section.cid = cpu_to_be32(UNF_GETXCHGALLOCTIME(v_pkg));
}
static void hifc_save_magic_num_in_nurmal_root_ts(
struct hifc_root_sqe_s *v_rt_sqe,
struct unf_frame_pkg_s *v_pkg)
{
v_rt_sqe->task_section.fc_dw1.magic_num = UNF_GETXCHGALLOCTIME(v_pkg);
}
static int hifc_check_need_delay_offload(
void *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int rport_idx,
struct hifc_parent_queue_info_s *v_cur_parent_queue,
struct hifc_parent_queue_info_s **v_offload_parnt_queue)
{
unsigned long flag = 0;
struct hifc_parent_queue_info_s *offload_parnt_queue = NULL;
spin_lock_irqsave(&v_cur_parent_queue->parent_queue_state_lock, flag);
if (v_cur_parent_queue->offload_state == HIFC_QUEUE_STATE_OFFLOADING) {
spin_unlock_irqrestore(
&v_cur_parent_queue->parent_queue_state_lock, flag);
offload_parnt_queue = hifc_find_offload_parent_queue(
v_hba,
v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK,
v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK,
rport_idx);
if (offload_parnt_queue) {
*v_offload_parnt_queue = offload_parnt_queue;
return UNF_TRUE;
}
} else {
spin_unlock_irqrestore(
&v_cur_parent_queue->parent_queue_state_lock, flag);
}
return UNF_FALSE;
}
static unsigned int hifc_build_service_wqe_root_offload(
void *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifc_parent_queue_info_s *v_parnt_qinfo,
struct hifc_root_sqe_s *v_sqe)
{
unsigned int cqm_xid = 0;
unsigned short els_cmnd_type = UNF_ZERO;
struct hifc_parent_ctx_s *parnt_ctx = NULL;
struct hifc_parent_sq_info_s *sq_info = NULL;
struct hifcoe_parent_context_s *v_ctx = NULL;
els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd);
cqm_xid = hifc_get_parent_ctx_xid_by_pkg(v_hba, v_pkg);
/* An offload request is initiated only when the parent queue is in the
* initialized state
*/
if (v_parnt_qinfo->offload_state == HIFC_QUEUE_STATE_INITIALIZED) {
/* Obtain Parent Context and set WQE to off_load, GPA_Addr */
parnt_ctx = hifc_get_parnt_ctx_virt_addr_by_pkg(v_hba, v_pkg);
sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg);
if (unlikely((!parnt_ctx) || (!sq_info) ||
(cqm_xid == INVALID_VALUE32))) {
return UNF_RETURN_ERROR;
}
/* Fill in ROOT SQE with offload request */
hifc_build_els_wqe_root_offload(
v_sqe,
parnt_ctx->cqm_parent_ctx_obj->paddr,
cqm_xid);
/* If the value is PlogiAcc, parse the FlogiAcc negotiation
* parameter and fill in Context
*/
v_ctx = (struct hifcoe_parent_context_s *)
parnt_ctx->virt_parent_ctx;
if (els_cmnd_type == ELS_ACC)
hifc_save_plogi_acc_param_in_ctx(
(struct hifc_hba_s *)v_hba, v_ctx, v_pkg);
else
hifc_save_default_plogi_param_in_ctx(
(struct hifc_hba_s *)v_hba, v_ctx, v_pkg);
/* The SID DID parameter is updated to Parent SQ Qinfo */
sq_info->local_port_id = UNF_GET_SID(v_pkg);
sq_info->remote_port_id = UNF_GET_DID(v_pkg);
/* Transfers the key value to the ucode for offload */
hifc_big_to_cpu32(v_ctx->key, sizeof(v_ctx->key));
memcpy(v_ctx->key, &sq_info->local_port_id,
sizeof(sq_info->local_port_id));
memcpy((unsigned char *)v_ctx->key +
sizeof(sq_info->local_port_id),
&sq_info->remote_port_id,
sizeof(sq_info->remote_port_id));
hifc_cpu_to_big32(v_ctx->key, sizeof(v_ctx->key));
/* Update magic num to parent_ctx */
hifc_save_magic_num_in_ctx(v_ctx, v_pkg);
hifc_build_service_wqe_ctx_sge(
v_sqe, parnt_ctx->parent_ctx,
sizeof(struct hifcoe_parent_context_s));
v_parnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADING;
} else {
/* If the connection is being uninstalled and the plogi is
* delivered through the root channel, the plogi must be carried
* to the ucode.
*/
v_sqe->task_section.fc_dw4.parent_xid = cqm_xid;
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send PLOGI with no offload while parent queue is not initialized status",
((struct hifc_hba_s *)v_hba)->port_cfg.port_id);
}
return RETURN_OK;
}
static unsigned int hifc_send_els_via_root(void *v_hba,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned short els_cmd_code = UNF_ZERO;
unsigned short els_cmnd_type = UNF_ZERO;
unsigned int frame_len = 0;
unsigned int exch_id = 0;
unsigned int scq_num = 0;
unsigned int rport_idx = 0;
int sqe_delay = UNF_FALSE;
void *frame_addr = NULL;
struct hifc_hba_s *hba = NULL;
struct hifc_parent_queue_info_s *prnt_qinfo = NULL;
struct hifc_parent_queue_info_s *offload_parnt_queue = NULL;
struct hifc_root_sqe_s *sqe = NULL;
struct hifc_root_sqe_s local_rt_sqe;
unsigned long flag = 0;
enum hifc_parent_queue_state_e last_offload_state =
HIFC_QUEUE_STATE_INITIALIZED;
struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 };
unsigned long long frame_phy_addr;
/* The ROOT SQE is assembled in local variables and then copied to the
* queue memory
*/
sqe = &local_rt_sqe;
hba = (struct hifc_hba_s *)v_hba;
memset(sqe, 0, sizeof(local_rt_sqe));
/* Determine the ELS type in the pstPkg */
els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd);
if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) {
els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd);
exch_id = UNF_GET_RXID(v_pkg);
sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_RSP;
} else {
els_cmd_code = els_cmnd_type;
els_cmnd_type = ELS_CMND;
exch_id = UNF_GET_OXID(v_pkg);
sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_CMND;
}
if ((els_cmd_code == ELS_ECHO) && (els_cmnd_type != ELS_RJT)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_WARN,
"[info]Port(0x%x) RPort(0x%x) send ELS ECHO can't send via root Type(0x%x)",
hba->port_cfg.port_id, rport_idx, els_cmnd_type);
return UNF_RETURN_NOT_SUPPORT;
}
exch_id += hba->exit_base;
ret = hifc_get_els_frame_len(els_cmnd_type, els_cmd_code, &frame_len);
if (ret != RETURN_OK) {
dump_stack();
return ret;
}
/* Obtains the frame start address */
frame_addr = HIFC_GET_CMND_HEADER_ADDR(v_pkg);
frame_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr;
/* Assemble the frame header and adjust the Paylaod based on the ELS */
hifc_build_els_frame_header(hba->exit_base, els_cmnd_type,
els_cmd_code, v_pkg);
/* Assembling the Control Section */
hifc_build_service_wqe_ctrl_section(
&sqe->ctrl_section,
HIFC_BYTES_TO_QW_NUM(
sizeof(struct hifc_root_sqe_task_section_s)),
HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s)));
/* Fill in Normal Root SQE TS */
rport_idx = v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX];
scq_num = hifc_get_rport_maped_cmd_scqn(v_hba, rport_idx);
hifc_build_service_wqe_root_ts(v_hba, sqe, exch_id, rport_idx, scq_num);
/* Upsate magic number into sqe */
hifc_save_magic_num_in_nurmal_root_ts(sqe, v_pkg);
/* Fill in the special part of Normal Root SQE TS and initiate implicit
* uninstallation
*/
if ((els_cmd_code == ELS_PLOGI) && (els_cmnd_type != ELS_RJT)) {
prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg);
if (!prnt_qinfo) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) find parent queue fail",
hba->port_cfg.port_id, rport_idx,
els_cmnd_type);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag);
last_offload_state = prnt_qinfo->offload_state;
/* Fill in the special part of Normal Root SQE TS */
ret = hifc_build_service_wqe_root_offload((void *)hba,
v_pkg, prnt_qinfo,
sqe);
if (ret != RETURN_OK) {
spin_unlock_irqrestore(
&prnt_qinfo->parent_queue_state_lock, flag);
return ret;
}
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock,
flag);
/* Before the offload, check whether there is a risk of
* repeated offload
*/
sqe_delay = hifc_check_need_delay_offload((void *)hba,
v_pkg, rport_idx,
prnt_qinfo,
&offload_parnt_queue);
}
/* Fill in Normal Root SQE SGE */
hifc_build_service_wqe_root_sge(sqe, frame_addr, frame_phy_addr,
frame_len, v_hba);
if (sqe_delay == UNF_TRUE) {
ret = hifc_push_delay_sqe((void *)hba, offload_parnt_queue,
sqe, v_pkg);
if (ret == RETURN_OK) {
hifc_recover_offloading_state(prnt_qinfo,
last_offload_state);
return ret;
}
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)",
hba->port_cfg.port_id, rport_idx, els_cmnd_type,
els_cmd_code, exch_id);
ret = hifc_root_sq_enqueue(hba, sqe);
if ((ret != RETURN_OK) && (prnt_qinfo)) {
hifc_recover_offloading_state(prnt_qinfo, last_offload_state);
spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag);
if (prnt_qinfo->parent_sq_info.destroy_sqe.valid ==
UNF_TRUE) {
memcpy(&destroy_sqe_info,
&prnt_qinfo->parent_sq_info.destroy_sqe,
sizeof(struct hifc_destroy_ctrl_info_s));
prnt_qinfo->parent_sq_info.destroy_sqe.valid =
UNF_FALSE;
}
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock,
flag);
hifc_pop_destroy_parent_queue_sqe((void *)v_hba,
&destroy_sqe_info);
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x) fail, recover offloadstatus(%u)",
hba->port_cfg.port_id,
rport_idx,
els_cmnd_type,
els_cmd_code,
exch_id,
prnt_qinfo->offload_state);
}
return ret;
}
static void *hifc_get_els_frame_addr(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned short els_cmd_code,
unsigned short els_cmnd_type,
unsigned long long *v_phyaddr)
{
void *frame_pld_addr;
dma_addr_t els_frame_addr = 0;
if (els_cmd_code == ELS_ECHO) {
frame_pld_addr = (void *)UNF_GET_ECHO_PAYLOAD(v_pkg);
els_frame_addr = UNF_GET_ECHO_PAYLOAD_PHYADDR(v_pkg);
} else if (els_cmd_code == ELS_RSCN) {
if (els_cmnd_type == ELS_CMND) {
/* Not Support */
frame_pld_addr = NULL;
els_frame_addr = 0;
} else {
frame_pld_addr =
(void *)UNF_GET_RSCN_ACC_PAYLOAD(v_pkg);
els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr +
sizeof(struct unf_fchead_s);
}
} else {
frame_pld_addr = (void *)HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg);
els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr +
sizeof(struct unf_fchead_s);
}
*v_phyaddr = els_frame_addr;
return frame_pld_addr;
}
static unsigned int hifc_send_els_via_parent(
void *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifc_parent_queue_info_s *v_prntq_info)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned short els_cmd_code = UNF_ZERO;
unsigned short els_cmnd_type = UNF_ZERO;
unsigned short remote_xid = 0;
unsigned short local_xid = 0;
struct hifc_hba_s *hba;
struct hifc_parent_sq_info_s *sq_info = NULL;
struct hifcoe_sqe_s sqe;
void *frame_pld_addr;
unsigned int frame_pld_len = 0;
unsigned int acc_pld_len = 0;
unsigned long long fram_phy_addr = 0;
hba = (struct hifc_hba_s *)v_hba;
memset(&sqe, 0, sizeof(struct hifcoe_sqe_s));
sq_info = &v_prntq_info->parent_sq_info;
/* Determine the ELS type in pstPkg */
els_cmnd_type = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd);
if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) {
els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd);
remote_xid = UNF_GET_OXID(v_pkg);
local_xid = UNF_GET_RXID(v_pkg) + hba->exit_base;
} else {
els_cmd_code = els_cmnd_type;
els_cmnd_type = ELS_CMND;
local_xid = UNF_GET_OXID(v_pkg) + hba->exit_base;
remote_xid = UNF_GET_RXID(v_pkg);
}
frame_pld_addr = hifc_get_els_frame_addr(v_hba, v_pkg, els_cmd_code,
els_cmnd_type, &fram_phy_addr);
if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) {
ret = hifc_get_els_rps_pld_len(els_cmnd_type, els_cmd_code,
&frame_pld_len);
if (ret != RETURN_OK)
return ret;
hifc_build_els_wqe_ts_rsp(
&sqe, sq_info, frame_pld_addr,
els_cmnd_type, els_cmd_code,
v_prntq_info->parent_sts_scq_info.cqm_queue_id);
} else {
/* Fill in HIFCOE_TASK_T_ELS */
ret = hifc_get_els_req_and_acc_pld_len(els_cmd_code,
&frame_pld_len,
&acc_pld_len);
if (ret != RETURN_OK)
return ret;
hifc_build_els_wqe_ts_req(
&sqe, sq_info, els_cmd_code,
v_prntq_info->parent_sts_scq_info.cqm_queue_id,
frame_pld_addr);
}
/* Assemble the magicnum field of the els */
hifc_build_els_wqe_ts_magic_num(&sqe, els_cmnd_type,
UNF_GETXCHGALLOCTIME(v_pkg));
/* Assemble the SQE Control Section part */
hifc_build_service_wqe_ctrl_section(
&sqe.ctrl_sl,
HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE),
HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s)));
/* Assemble the SQE Task Section Els Common part */
hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index,
local_xid, remote_xid,
HIFC_LSW(frame_pld_len));
/* Build SGE */
hifc_build_els_gs_wqe_sge(&sqe, frame_pld_addr, fram_phy_addr,
frame_pld_len, sq_info->context_id, v_hba);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)",
hba->port_cfg.port_id, sq_info->rport_index, els_cmnd_type,
els_cmd_code, local_xid);
ret = hifc_parent_sq_enqueue(sq_info, &sqe);
return ret;
}
unsigned int hifc_send_els_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned long flag = 0;
struct hifc_hba_s *hba = NULL;
struct hifc_parent_queue_info_s *prnt_qinfo = NULL;
unsigned short els_cmd_code = UNF_ZERO;
unsigned short els_rsp_code = UNF_ZERO;
union unf_sfs_u *fc_entry = NULL;
struct unf_rrq_s *rrq_pld = NULL;
unsigned short ox_id = 0;
unsigned short rx_id = 0;
/* Check Parameters */
UNF_CHECK_VALID(0x5014, UNF_TRUE, v_hba, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x5015, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x5016, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x5017, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg),
return UNF_RETURN_ERROR);
HIFC_CHECK_PKG_ALLOCTIME(v_pkg);
hba = (struct hifc_hba_s *)v_hba;
els_cmd_code = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd);
els_rsp_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd);
/* If RRQ Req, Special processing */
if (els_cmd_code == ELS_RRQ) {
fc_entry = UNF_GET_SFS_ENTRY(v_pkg);
rrq_pld = &fc_entry->rrq;
ox_id = (unsigned short)(rrq_pld->oxid_rxid >> 16);
rx_id = (unsigned short)(rrq_pld->oxid_rxid & 0xFFFF);
ox_id += hba->exit_base;
rrq_pld->oxid_rxid = ox_id << 16 | rx_id;
}
prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg);
if (!prnt_qinfo) {
HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) send ELS SID(0x%x) DID(0x%x) get a null parent queue info, send via root",
hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
/* If the Rport cannot be found, Send Pkg by Root SQ */
ret = hifc_send_els_via_root(v_hba, v_pkg);
return ret;
}
spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag);
/* After offload, Send Pkg by Parent SQ */
if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) {
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock,
flag);
ret = hifc_send_els_via_parent(v_hba, v_pkg, prnt_qinfo);
} else {
/* Before offload, Send Pkg by Root SQ */
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock,
flag);
ret = hifc_send_els_via_root(v_hba, v_pkg);
}
return ret;
}
unsigned int hifc_rq_rcv_els_rsp_sts(
struct hifc_hba_s *v_hba,
struct hifc_root_rq_complet_info_s *v_cs_info)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int rx_id = (~0);
struct unf_frame_pkg_s pkg = { 0 };
rx_id = (unsigned int)v_cs_info->exch_id - v_hba->exit_base;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = v_cs_info->magic_num;
ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id);
HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_ELS_RSP_STS);
return ret;
}
static unsigned int hifc_recv_els_rsp_payload(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id,
unsigned char *v_els_pld_buf,
unsigned int pld_len)
{
unsigned int ret = UNF_RETURN_ERROR;
v_pkg->type = UNF_PKG_ELS_REQ_DONE;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id;
/* Payload Buffer in ROOT SQ Buffer */
v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf;
v_pkg->unf_cmnd_pload_bl.length = pld_len;
v_pkg->byte_orders |= HIFC_BIT_2;
/* Mark as a non-last block */
v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE;
UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg);
return ret;
}
static unsigned int hifc_rq_rcv_els_frame(struct hifc_hba_s *v_hba,
unsigned char *v_frame,
unsigned int frame_len,
unsigned short pkg_flag,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int ox_id = INVALID_VALUE32;
unsigned int pld_len = 0;
unsigned char *plg_buf = NULL;
unsigned long flags = 0;
plg_buf = v_frame;
pld_len = frame_len;
v_pkg->status = UNF_IO_SUCCESS;
if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) ==
HIFC_FC_RCTL_ELS_RSP) {
ox_id = v_pkg->frame_head.oxid_rxid >> 16;
if (!(HIFC_XID_IS_VALID(ox_id, (unsigned int)v_hba->exit_base,
(unsigned int)v_hba->exit_count))) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT,
UNF_WARN, "[err]Port(0x%x) ExchId(0x%x) isn't in 0x%x~0x%x",
v_hba->port_cfg.port_id, ox_id,
v_hba->exit_base,
v_hba->exit_base + v_hba->exit_count - 1);
goto rq_recv_error_els_frame;
}
ox_id -= v_hba->exit_base;
ret = hifc_recv_els_rsp_payload(v_hba, v_pkg, ox_id, plg_buf,
pld_len);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT,
UNF_ERR,
"[err]Port(0x%x) receive ESL RSP payload error, OXID(0x%x) RXID(0x%x) PldLen(0x%x)",
v_hba->port_cfg.port_id, UNF_GET_OXID(v_pkg),
UNF_GET_RXID(v_pkg), pld_len);
HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP);
}
if (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) {
ret = hifc_rcv_els_rsp(v_hba, v_pkg, ox_id);
HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP);
}
} else if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) ==
HIFC_FC_RCTL_ELS_REQ) {
HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_CMD);
if (HIFC_CHECK_IF_FIRST_PKG(pkg_flag))
v_pkg->xchg_contex = NULL;
v_pkg->last_pkg_flag = (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) ?
UNF_PKG_LAST_REQUEST : UNF_PKG_NOT_LAST_REQUEST;
ret = hifc_rcv_els_cmnd(v_hba, v_pkg, plg_buf, pld_len,
HIFC_CHECK_IF_FIRST_PKG(pkg_flag));
spin_lock_irqsave(&v_hba->delay_info.srq_lock, flags);
if (v_hba->delay_info.srq_delay_flag) {
v_hba->delay_info.srq_delay_flag = 0;
if (!cancel_delayed_work(&v_hba->delay_info.del_work)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) rcvd plogi from srq process delay timer maybe timeout",
v_hba->port_cfg.port_id);
}
spin_unlock_irqrestore(&v_hba->delay_info.srq_lock,
flags);
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT,
UNF_ERR,
"[info]Port(0x%x) received els from root rq and send delay plogi to CM",
v_hba->port_cfg.port_id);
hifc_rcv_els_cmnd(
v_hba, &v_hba->delay_info.pkg,
v_hba->delay_info.pkg.unf_cmnd_pload_bl.buffer_ptr,
0, UNF_FALSE);
} else {
spin_unlock_irqrestore(&v_hba->delay_info.srq_lock,
flags);
}
} else {
goto rq_recv_error_els_frame;
}
return ret;
rq_recv_error_els_frame:
return HIFC_RQ_ERROR_FRAME;
}
static unsigned int hifc_rq_rcv_bls_frame(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = RETURN_OK;
unsigned int ox_id = INVALID_VALUE32;
v_pkg->status = UNF_IO_SUCCESS;
if ((UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_ACC) ||
(UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_RJT)) {
/* INI Mode */
ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head);
if ((ox_id < (unsigned int)v_hba->exit_base) ||
(ox_id >= (unsigned int)(v_hba->exit_base +
v_hba->exit_count))) {
goto rq_recv_error_bls_frame;
}
ox_id -= v_hba->exit_base;
ret = hifc_rcv_bls_rsp(v_hba, v_pkg, ox_id);
HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ABTS_RSP);
} else {
goto rq_recv_error_bls_frame;
}
return ret;
rq_recv_error_bls_frame:
return HIFC_RQ_ERROR_FRAME;
}
static unsigned int hifc_rq_rcv_service_frame(struct hifc_hba_s *v_hba,
unsigned char *v_frame,
unsigned int frame_len,
unsigned short pkg_flag,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned char fc_frame_type = 0;
fc_frame_type = UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head);
if (fc_frame_type == HIFC_FC_TYPE_ELS) {
v_hba->delay_info.root_rq_rcvd_flag = 1;
ret = hifc_rq_rcv_els_frame(v_hba, v_frame, frame_len,
pkg_flag, v_pkg);
} else if (fc_frame_type == HIFC_FC_TYPE_BLS) {
ret = hifc_rq_rcv_bls_frame(v_hba, v_pkg);
} else {
ret = HIFC_RQ_ERROR_FRAME;
}
if (ret == HIFC_RQ_ERROR_FRAME) {
/* Error statistics are collected when an invalid frame
* is received
*/
HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_BUTT);
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[info]Port(0x%x) Receive an unsupported frame, Rctl(0x%x), Type(0x%x), Fctl(0x%x), Sid_Did(0x%x_0x%x),OxId_RxId(0x%x_0x%x), FrameLen(0x%x), drop it",
v_hba->port_cfg.port_id,
UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head),
UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head),
UNF_GET_FC_HEADER_FCTL(&v_pkg->frame_head),
UNF_GET_FC_HEADER_SID(&v_pkg->frame_head),
UNF_GET_FC_HEADER_DID(&v_pkg->frame_head),
UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head),
UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head),
frame_len);
}
return ret;
}
unsigned int hifc_rcv_service_frame_from_rq(struct hifc_hba_s *v_hba,
struct hifc_root_rq_info_s
*v_rq_info,
struct hifc_root_rq_complet_info_s
*v_complet_info,
unsigned short v_rcv_buf_num)
{
unsigned short remain_len = 0;
unsigned short rcv_len = 0;
unsigned short pkg_flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned short pkt_len = 0;
void *root_rq_rcv_buf = NULL;
unsigned short ci = 0;
unsigned int loop = 0;
struct unf_frame_pkg_s pkg = { 0 };
struct unf_fchead_s *els_frame = NULL;
unsigned char *pld_buf = NULL;
unsigned int pld_len = 0;
ci = v_rq_info->ci;
pkt_len = v_complet_info->buf_length;
memset(&pkg, 0, sizeof(pkg));
for (loop = 0; loop < v_rcv_buf_num; loop++) {
/* Obtain rcv buffer */
root_rq_rcv_buf =
(void *)((unsigned long long)v_rq_info->rq_rcv_buff +
HIFC_ROOT_RQ_RECV_BUFF_SIZE * ci);
/* Calculate the frame data address and length */
els_frame = (struct unf_fchead_s *)root_rq_rcv_buf;
rcv_len = HIFC_ROOT_RQ_RECV_BUFF_SIZE;
pkg_flag = 0;
if (loop == (v_rcv_buf_num - 1)) {
pkg_flag |= HIFC_LAST_PKG_FLAG;
remain_len = pkt_len % HIFC_ROOT_RQ_RECV_BUFF_SIZE;
rcv_len = (remain_len > 0) ? (remain_len) :
HIFC_ROOT_RQ_RECV_BUFF_SIZE;
}
/* Calculate the frame data address and length */
if (loop == 0) {
pkg_flag |= HIFC_FIRST_PKG_FLAG;
memcpy(&pkg.frame_head, els_frame,
sizeof(pkg.frame_head));
hifc_big_to_cpu32(&pkg.frame_head,
sizeof(pkg.frame_head));
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
v_complet_info->magic_num;
pld_buf = (unsigned char *)(els_frame + 1);
pld_len = rcv_len - sizeof(pkg.frame_head);
} else {
pld_buf = (unsigned char *)els_frame;
pld_len = rcv_len;
}
/* Processing the rqe sent by the FC ucode */
ret = hifc_rq_rcv_service_frame(v_hba, pld_buf, pld_len,
pkg_flag, &pkg);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT,
UNF_INFO,
"[err]Up layer Process RQE frame or status abnormal(0x%x)",
ret);
return UNF_RETURN_ERROR;
}
ci = ((ci + 1) < v_rq_info->q_depth) ? (ci + 1) : 0;
}
return RETURN_OK;
}
static unsigned int hifc_rcv_gs_rsp_payload(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id,
unsigned char *v_els_pld_buf,
unsigned int pld_len)
{
unsigned int ret = UNF_RETURN_ERROR;
v_pkg->type = UNF_PKG_GS_REQ_DONE;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id;
/* Convert to small endian */
hifc_big_to_cpu32(v_els_pld_buf, pld_len);
/* Payload Buffer in ROOT SQ Buffer */
v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf;
v_pkg->unf_cmnd_pload_bl.length = pld_len;
/* Mark as a non-last block */
v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE;
UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg);
return ret;
}
static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
/* Default path, which is sent from SCQ to the driver */
unsigned char status = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int ox_id = INVALID_VALUE32;
struct unf_frame_pkg_s pkg = { 0 };
struct hifcoe_scqe_rcv_abts_rsp_s *abts_rsp = NULL;
abts_rsp = &v_scqe->rcv_abts_rsp;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_rsp->magic_num;
ox_id = (unsigned int)(abts_rsp->wd0.ox_id);
if (unlikely((ox_id < (unsigned int)v_hba->exit_base) ||
(ox_id >=
(unsigned int)(v_hba->exit_base + v_hba->exit_count)))) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) has bad OX_ID(0x%x) for bls_rsp",
v_hba->port_cfg.port_id, ox_id);
return UNF_RETURN_ERROR;
}
ox_id -= v_hba->exit_base;
if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) BLS response has error code(0x%x) tag(0x%x)",
v_hba->port_cfg.port_id,
HIFC_GET_SCQE_STATUS(v_scqe),
(unsigned int)(abts_rsp->wd0.ox_id));
status = UNF_IO_FAILED;
} else {
pkg.frame_head.rctl_did = abts_rsp->wd3.did;
pkg.frame_head.csctl_sid = abts_rsp->wd4.sid;
pkg.frame_head.oxid_rxid = (unsigned int)(abts_rsp->wd0.rx_id)
| ox_id << 16;
/* BLS_ACC/BLS_RJT: IO_succeed */
if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_ACC) {
status = UNF_IO_SUCCESS;
} else if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_RJT) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) ABTS RJT: %08x-%08x-%08x",
v_hba->port_cfg.port_id,
abts_rsp->payload[0],
abts_rsp->payload[1], abts_rsp->payload[2]);
status = UNF_IO_SUCCESS;
} else {
/* 3. BA_RSP type is err: IO_failed */
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) BLS response RCTL is error",
v_hba->port_cfg.port_id);
HIFC_ERR_IO_STAT(v_hba, HIFC_SCQE_ABTS_RSP);
status = UNF_IO_FAILED;
}
}
/* Set PKG/exchange status & Process BLS_RSP */
pkg.status = status;
ret = hifc_rcv_bls_rsp(v_hba, &pkg, ox_id);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) recv ABTS rsp OX_ID(0x%x) RX_ID(0x%x) SID(0x%x) DID(0x%x) %s",
v_hba->port_cfg.port_id,
ox_id,
abts_rsp->wd0.rx_id,
abts_rsp->wd4.sid,
abts_rsp->wd3.did,
(ret == RETURN_OK) ? "OK" : "ERROR");
return ret;
}
unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba,
struct hifc_root_rq_complet_info_s *v_cs_info)
{
UNF_REFERNCE_VAR(v_hba);
UNF_REFERNCE_VAR(v_cs_info);
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]hifc_rq_rcv_srv_err not implemented yet");
if (!v_hba)
return UNF_RETURN_ERROR;
if (!v_cs_info)
return UNF_RETURN_ERROR;
return UNF_RETURN_ERROR;
}
unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned char *v_pld,
unsigned int pld_len,
int first_frame)
{
unsigned int ret = UNF_RETURN_ERROR;
/* Convert Payload to small endian */
hifc_big_to_cpu32(v_pld, pld_len);
v_pkg->type = UNF_PKG_ELS_REQ;
v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_pld;
/* Payload length */
v_pkg->unf_cmnd_pload_bl.length = pld_len;
/* Obtain the Cmnd type from the Paylaod. The Cmnd is in small endian */
if (first_frame == UNF_TRUE) {
v_pkg->cmnd = UNF_GET_FC_PAYLOAD_ELS_CMND(
v_pkg->unf_cmnd_pload_bl.buffer_ptr);
}
/* Errors have been processed in HIFC_RecvElsError */
v_pkg->status = UNF_IO_SUCCESS;
/* Send PKG to the CM layer */
UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg);
return ret;
}
unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id)
{
unsigned int ret = UNF_RETURN_ERROR;
/* Receive CmndReqSts */
v_pkg->type = UNF_PKG_ELS_REQ_DONE;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id;
v_pkg->byte_orders |= HIFC_BIT_2;
/* Mark the last block */
v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE;
/* Send PKG to the CM layer */
UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg);
return ret;
}
unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int rx_id)
{
unsigned int ret = UNF_RETURN_ERROR;
v_pkg->type = UNF_PKG_ELS_REPLY_DONE;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = rx_id;
UNF_LOWLEVEL_SEND_ELS_DONE(ret, v_hba->lport, v_pkg);
return ret;
}
unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id)
{
unsigned int ret = UNF_RETURN_ERROR;
/* Receive CmndReqSts */
v_pkg->type = UNF_PKG_GS_REQ_DONE;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id;
/* Mark the last block */
v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE;
/* Send PKG to the CM layer */
UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg);
return ret;
}
unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id)
{
/*
* 1. SCQ (normal)
* 2. from Root RQ (parent no existence)
**
* single frame, single sequence
*/
unsigned int ret = UNF_RETURN_ERROR;
v_pkg->type = UNF_PKG_BLS_REQ_DONE;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id;
v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE;
UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, v_hba->lport, v_pkg);
return ret;
}
unsigned int hifc_rcv_tmf_marker_sts(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id)
{
unsigned int ret = UNF_RETURN_ERROR;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id;
/* Send PKG info to COM */
UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, v_hba->lport, v_pkg);
return ret;
}
unsigned int hifc_rcv_abts_marker_sts(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id)
{
unsigned int ret = UNF_RETURN_ERROR;
v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id;
UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, v_hba->lport, v_pkg);
return ret;
}
void hifc_scqe_error_pre_process(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
/* Currently, only printing and statistics collection are performed */
HIFC_ERR_IO_STAT(v_hba, HIFC_GET_SCQE_TYPE(v_scqe));
HIFC_SCQ_ERR_TYPE_STAT(v_hba, HIFC_GET_SCQE_STATUS(v_scqe));
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_WARN,
"[warn]Port(0x%x)-Task_type(%u) SCQE contain error code(%u), additional info(0x%x)",
v_hba->port_cfg.port_id,
v_scqe->common.ch.wd0.task_type,
v_scqe->common.ch.wd0.err_code,
v_scqe->common.conn_id);
}
unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe,
unsigned int scq_idx)
{
unsigned int ret = UNF_RETURN_ERROR;
int do_reclaim = UNF_FALSE;
unsigned int index = 0;
unsigned int total_index = 0;
struct hifc_hba_s *hba = NULL;
union hifcoe_scqe_u *scqe = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scqe,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, HIFC_TOTAL_SCQ_NUM > scq_idx,
return UNF_RETURN_ERROR);
scqe = (union hifcoe_scqe_u *)v_scqe;
hba = (struct hifc_hba_s *)v_hba;
HIFC_IO_STAT(hba, HIFC_GET_SCQE_TYPE(scqe));
/* 1. error code cheking */
if (unlikely(HIFC_SCQE_HAS_ERRCODE(scqe))) {
/* So far, just print & counter */
hifc_scqe_error_pre_process(hba, scqe);
}
/* 2. Process SCQE by corresponding processer */
total_index = sizeof(scqe_handler_table) /
sizeof(struct unf_scqe_handler_table_s);
while (index < total_index) {
if (HIFC_GET_SCQE_TYPE(scqe) ==
scqe_handler_table[index].scqe_type) {
ret = scqe_handler_table[index].pfn_scqe_handle_fun(
hba, scqe);
do_reclaim = scqe_handler_table[index].reclaim_sq_wpg;
break;
}
index++;
}
/* 3. SCQE type check */
if (unlikely(index == total_index)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[warn]Unknown SCQE type %d",
HIFC_GET_SCQE_TYPE(scqe));
UNF_PRINT_SFS_LIMIT(UNF_ERR, hba->port_cfg.port_id, scqe,
sizeof(union hifcoe_scqe_u));
}
/* 4. If SCQE is for SQ-WQE then recovery Link List SQ free page */
if (do_reclaim == UNF_TRUE) {
if (HIFC_SCQE_CONN_ID_VALID(scqe)) {
ret = hifc_reclaim_sq_wqe_page(v_hba, scqe);
} else {
/* NOTE: for buffer clear, the SCQE conn_id is 0xFFFF,
* count with HBA
*/
HIFC_HBA_STAT(
(struct hifc_hba_s *)v_hba,
HIFC_STAT_SQ_IO_BUFFER_CLEARED);
}
}
return ret;
}
static void *hifc_get_els_buf_by_userid(struct hifc_hba_s *v_hba,
unsigned short user_id)
{
struct hifc_srq_buff_entry_s *buf_entry = NULL;
struct hifc_srq_info_s *srq_info = NULL;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return NULL);
srq_info = &v_hba->els_srq_info;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
user_id < srq_info->valid_wqe_num, return NULL);
buf_entry = &srq_info->els_buff_entry_head[user_id];
return buf_entry->buff_addr;
}
static unsigned int hifc_check_srq_buf_valid(struct hifc_hba_s *v_hba,
unsigned int *v_buf_id,
unsigned int v_buf_num)
{
unsigned int index = 0;
unsigned int buf_id = 0;
void *srq_buf = NULL;
for (index = 0; index < v_buf_num; index++) {
buf_id = v_buf_id[index];
if (buf_id < v_hba->els_srq_info.valid_wqe_num) {
srq_buf = hifc_get_els_buf_by_userid(
v_hba,
(unsigned short)buf_id);
} else {
srq_buf = NULL;
}
if (!srq_buf) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) get srq buffer user id(0x%x) is null",
v_hba->port_cfg.port_id, buf_id);
return UNF_RETURN_ERROR;
}
}
return RETURN_OK;
}
static void hifc_reclaim_srq_buff(struct hifc_hba_s *v_hba,
unsigned int *v_buf_id,
unsigned int v_buf_num)
{
unsigned int index = 0;
unsigned int buf_id = 0;
void *srq_buf = NULL;
for (index = 0; index < v_buf_num; index++) {
buf_id = v_buf_id[index];
if (buf_id < v_hba->els_srq_info.valid_wqe_num) {
srq_buf = hifc_get_els_buf_by_userid(
v_hba,
(unsigned short)buf_id);
} else {
srq_buf = NULL;
}
/* If the value of buffer is NULL, it indicates that the value
* of buffer is invalid. In this case, exit directly.
*/
if (!srq_buf)
break;
hifc_post_els_srq_wqe(&v_hba->els_srq_info,
(unsigned short)buf_id);
}
}
static unsigned int hifc_check_els_gs_valid(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe,
struct unf_frame_pkg_s *v_pkg,
unsigned int *v_buf_id,
unsigned int buf_num,
unsigned int frame_len)
{
unsigned int ox_id = INVALID_VALUE32;
ox_id = v_pkg->frame_head.oxid_rxid >> 16;
/* The ELS CMD returns an error code and discards it directly */
if ((sizeof(struct hifc_fc_frame_header) > frame_len) ||
(HIFC_SCQE_HAS_ERRCODE(v_scqe)) ||
(buf_num > HIFC_ELS_SRQ_BUF_NUM)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_KEVENT,
"[event]Port(0x%x) get scqe type(0x%x) payload len(0x%x),scq status(0x%x),user id num(0x%x) abnormal",
v_hba->port_cfg.port_id,
HIFC_GET_SCQE_TYPE(v_scqe),
frame_len,
HIFC_GET_SCQE_STATUS(v_scqe),
buf_num);
/* ELS RSP Special Processing */
if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_RSP) {
if (HIFC_SCQE_ERR_TO_CM(v_scqe)) {
v_pkg->status = UNF_IO_FAILED;
(void)hifc_rcv_els_rsp(v_hba, v_pkg, ox_id);
} else {
HIFC_HBA_STAT(v_hba,
HIFC_STAT_ELS_RSP_EXCH_REUSE);
}
}
/* GS RSP Special Processing */
if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_GS_RSP) {
if (HIFC_SCQE_ERR_TO_CM(v_scqe)) {
v_pkg->status = UNF_IO_FAILED;
(void)hifc_rcv_gs_rsp(v_hba, v_pkg, ox_id);
} else {
HIFC_HBA_STAT(v_hba,
HIFC_STAT_GS_RSP_EXCH_REUSE);
}
}
/* Reclaim srq */
if (buf_num <= HIFC_ELS_SRQ_BUF_NUM)
hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num);
return UNF_RETURN_ERROR;
}
/* ELS CMD Check the validity of the buffer sent by the ucode */
if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_CMND) {
if (hifc_check_srq_buf_valid(v_hba, v_buf_id, buf_num) !=
RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) get els cmnd scqe user id num(0x%x) abnormal, as some srq buff is null",
v_hba->port_cfg.port_id, buf_num);
hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num);
return UNF_RETURN_ERROR;
}
}
return RETURN_OK;
}
static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int ret = RETURN_OK;
unsigned int pld_len = 0;
unsigned int hdr_len = 0;
unsigned int frame_len = 0;
unsigned int rcv_data_len = 0;
unsigned int max_buf_num = 0;
unsigned short buf_id = 0;
unsigned int index = 0;
unsigned char *pld = NULL;
struct unf_frame_pkg_s pkg = { 0 };
struct hifcoe_scqe_rcv_els_cmd_s *els_cmd = NULL;
struct hifc_fc_frame_header *els_frame = NULL;
struct hifc_fc_frame_header local_fc_frame = { 0 };
void *els_buf = NULL;
int first_frame = UNF_FALSE;
unsigned long flags = 0;
unsigned char srq_delay_flag = 0;
els_cmd = &v_scqe->rcv_els_cmd;
frame_len = els_cmd->wd3.data_len;
max_buf_num = els_cmd->wd3.user_id_num;
pkg.xchg_contex = NULL;
pkg.status = UNF_IO_SUCCESS;
/* Check the validity of error codes and buff. If an exception occurs,
* discard the error code
*/
ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, els_cmd->user_id,
max_buf_num, frame_len);
if (ret != RETURN_OK)
return RETURN_OK;
/* Send data to COM cyclically */
for (index = 0; index < max_buf_num; index++) {
/* Exception record, which is not processed currently */
if (rcv_data_len >= frame_len) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) get els cmd date len(0x%x) is bigger than fream len(0x%x)",
v_hba->port_cfg.port_id,
rcv_data_len, frame_len);
}
buf_id = (unsigned short)els_cmd->user_id[index];
els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id);
/* Obtain playload address */
pld = (unsigned char *)(els_buf);
hdr_len = 0;
first_frame = UNF_FALSE;
if (index == 0) {
els_frame = (struct hifc_fc_frame_header *)els_buf;
pld = (unsigned char *)(els_frame + 1);
hdr_len = sizeof(struct hifc_fc_frame_header);
first_frame = UNF_TRUE;
memcpy(&local_fc_frame, els_frame,
sizeof(struct hifc_fc_frame_header));
hifc_big_to_cpu32(&local_fc_frame,
sizeof(struct hifc_fc_frame_header));
memcpy(&pkg.frame_head, &local_fc_frame,
sizeof(pkg.frame_head));
}
/* Calculate the playload length */
pkg.last_pkg_flag = 0;
pld_len = HIFC_SRQ_ELS_SGE_LEN;
if ((rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= frame_len) {
pkg.last_pkg_flag = 1;
pld_len = frame_len - rcv_data_len;
if (unlikely(
(v_hba->active_topo == UNF_TOP_P2P_MASK) &&
(v_hba->delay_info.root_rq_rcvd_flag == 0))) {
/* Only data is pushed for the first time, but
* the last packet flag is not set
*/
pkg.last_pkg_flag = 0;
srq_delay_flag = 1;
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) revd els from srq, and need delay processed, topo(0x%x)",
v_hba->port_cfg.port_id,
v_hba->active_topo);
}
}
/* Push data to COM */
if (ret == RETURN_OK) {
ret = hifc_rcv_els_cmnd(v_hba, &pkg, pld,
(pld_len - hdr_len),
first_frame);
/* If the plogi arrives before the flogi, the pkg is
* saved, and the last packet is pushed
* when the root rq contains content.
*/
if (unlikely(srq_delay_flag == 1)) {
spin_lock_irqsave(&v_hba->delay_info.srq_lock,
flags);
memcpy(&v_hba->delay_info.pkg, &pkg,
sizeof(pkg));
v_hba->delay_info.srq_delay_flag = 1;
v_hba->delay_info.pkg.last_pkg_flag = 1;
/* Add a 20-ms timer to prevent the root rq
* from processing data
*/
(void)queue_delayed_work(
v_hba->work_queue,
&v_hba->delay_info.del_work,
(unsigned long)
msecs_to_jiffies((unsigned int)
HIFC_SRQ_PROCESS_DELAY_MS));
spin_unlock_irqrestore(
&v_hba->delay_info.srq_lock, flags);
}
}
/* Reclaim srq buffer */
hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id);
rcv_data_len += pld_len;
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) recv ELS Type(0x%x) Cmnd(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) %u",
v_hba->port_cfg.port_id,
pkg.type,
pkg.cmnd,
els_cmd->wd2.ox_id,
els_cmd->wd2.rx_id,
els_cmd->wd1.sid,
els_cmd->wd0.did,
ret);
return ret;
}
static unsigned int hifc_get_els_gs_pld_len(struct hifc_hba_s *v_hba,
unsigned int v_rcv_data_len,
unsigned int v_frame_len)
{
unsigned int pld_len;
/* Exception record, which is not processed currently */
if (v_rcv_data_len >= v_frame_len) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) get els rsp date len(0x%x) is bigger than fream len(0x%x)",
v_hba->port_cfg.port_id,
v_rcv_data_len, v_frame_len);
}
pld_len = HIFC_SRQ_ELS_SGE_LEN;
if ((v_rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= v_frame_len)
pld_len = v_frame_len - v_rcv_data_len;
return pld_len;
}
static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int ret = RETURN_OK;
unsigned int pld_len = 0;
unsigned int hdr_len = 0;
unsigned int frame_len = 0;
unsigned int rcv_data_len = 0;
unsigned int max_buf_num = 0;
unsigned short buf_id = 0;
unsigned int index = 0;
unsigned int ox_id = (~0);
struct unf_frame_pkg_s pkg = { 0 };
struct hifcoe_scqe_rcv_els_gs_rsp_s *els_rsp;
struct hifc_fc_frame_header *els_frame = NULL;
void *els_buf = NULL;
unsigned char *pld = NULL;
els_rsp = &v_scqe->rcv_els_gs_rsp;
frame_len = els_rsp->wd2.data_len;
max_buf_num = els_rsp->wd4.user_id_num;
ox_id = (unsigned int)(els_rsp->wd1.ox_id) - v_hba->exit_base;
pkg.frame_head.oxid_rxid = (unsigned int)(els_rsp->wd1.rx_id) |
ox_id << 16;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp->magic_num;
pkg.frame_head.csctl_sid = els_rsp->wd4.sid;
pkg.frame_head.rctl_did = els_rsp->wd3.did;
pkg.status = UNF_IO_SUCCESS;
/* Handle the exception first. The ELS RSP returns the error code.
* Only the OXID can submit the error code to the CM layer.
*/
ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg,
els_rsp->user_id, max_buf_num, frame_len);
if (ret != RETURN_OK)
return RETURN_OK;
/* if this is echo rsp */
if (els_rsp->wd3.echo_rsp == UNF_TRUE) {
/* echo time stamp fill in the Els rsp user_id last 4dword */
pkg.private[PKG_PRIVATE_ECHO_CMD_RCV_TIME] =
els_rsp->user_id[5];
pkg.private[PKG_PRIVATE_ECHO_RSP_SND_TIME] =
els_rsp->user_id[6];
pkg.private[PKG_PRIVATE_ECHO_CMD_SND_TIME] =
els_rsp->user_id[7];
pkg.private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] =
els_rsp->user_id[8];
}
/* Send data to COM cyclically */
for (index = 0; index < max_buf_num; index++) {
/* Obtain buffer address */
els_buf = NULL;
buf_id = (unsigned short)els_rsp->user_id[index];
els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id);
/* If the value of buffer is NULL, the buff id is abnormal and
* exits directly
*/
if (unlikely(!els_buf)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get els rsp buff user id(0x%x) abnormal",
v_hba->port_cfg.port_id, ox_id,
els_rsp->wd1.rx_id, els_rsp->wd4.sid,
els_rsp->wd3.did, index, buf_id);
if (index == 0) {
pkg.status = UNF_IO_FAILED;
ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id);
}
return ret;
}
hdr_len = 0;
pld = (unsigned char *)(els_buf);
if (index == 0) {
hdr_len = sizeof(struct hifc_fc_frame_header);
els_frame = (struct hifc_fc_frame_header *)els_buf;
pld = (unsigned char *)(els_frame + 1);
}
/* Calculate the playload length */
pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len,
frame_len);
/* Push data to COM */
if (ret == RETURN_OK) {
ret = hifc_recv_els_rsp_payload(v_hba, &pkg, ox_id, pld,
(pld_len - hdr_len));
}
/* Reclaim srq buffer */
hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id);
rcv_data_len += pld_len;
}
if ((els_rsp->wd3.end_rsp) && (ret == RETURN_OK))
ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) receive ELS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)",
v_hba->port_cfg.port_id,
ox_id,
els_rsp->wd1.rx_id,
els_rsp->wd4.sid,
els_rsp->wd3.did,
els_rsp->wd3.end_rsp,
els_rsp->wd4.user_id_num);
return ret;
}
static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int ret = RETURN_OK;
unsigned int pld_len = 0;
unsigned int hdr_len = 0;
unsigned int frame_len = 0;
unsigned int rcv_data_len = 0;
unsigned int max_buf_num = 0;
unsigned short buf_id = 0;
unsigned int index = 0;
unsigned int ox_id = (~0);
struct unf_frame_pkg_s pkg = { 0 };
struct hifcoe_scqe_rcv_els_gs_rsp_s *gs_rsp = NULL;
struct hifc_fc_frame_header *gs_frame = NULL;
void *gs_buf = NULL;
unsigned char *pld = NULL;
gs_rsp = &v_scqe->rcv_els_gs_rsp;
frame_len = gs_rsp->wd2.data_len;
max_buf_num = gs_rsp->wd4.user_id_num;
ox_id = (unsigned int)(gs_rsp->wd1.ox_id) - v_hba->exit_base;
pkg.frame_head.oxid_rxid = (unsigned int)(gs_rsp->wd1.rx_id) |
ox_id << 16;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = gs_rsp->magic_num;
pkg.frame_head.csctl_sid = gs_rsp->wd4.sid;
pkg.frame_head.rctl_did = gs_rsp->wd3.did;
pkg.status = UNF_IO_SUCCESS;
if (gs_rsp->wd3.end_rsp)
HIFC_HBA_STAT(v_hba, HIFC_STAT_LAST_GS_SCQE);
/* Exception handling: The GS RSP returns an error code. Only the OXID
* can submit the error code to the CM layer
*/
ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, gs_rsp->user_id,
max_buf_num, frame_len);
if (ret != RETURN_OK)
return RETURN_OK;
/* Send data to COM cyclically */
for (index = 0; index < max_buf_num; index++) {
/* Obtain buffer address */
gs_buf = NULL;
buf_id = (unsigned short)gs_rsp->user_id[index];
gs_buf = hifc_get_els_buf_by_userid(v_hba, buf_id);
if (unlikely(!gs_buf)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get gs rsp scqe user id(0x%x) abnormal",
v_hba->port_cfg.port_id, ox_id,
gs_rsp->wd1.rx_id, gs_rsp->wd4.sid,
gs_rsp->wd3.did, index, buf_id);
if (index == 0) {
pkg.status = UNF_IO_FAILED;
ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id);
}
return ret;
}
/* Obtain playload address */
hdr_len = 0;
pld = (unsigned char *)(gs_buf);
if (index == 0) {
hdr_len = sizeof(struct hifc_fc_frame_header);
gs_frame = (struct hifc_fc_frame_header *)gs_buf;
pld = (unsigned char *)(gs_frame + 1);
}
/* Calculate the playload length */
pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len,
frame_len);
/* Push data to COM */
if (ret == RETURN_OK)
ret = hifc_rcv_gs_rsp_payload(v_hba, &pkg, ox_id, pld,
(pld_len - hdr_len));
/* Reclaim srq buffer */
hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id);
rcv_data_len += pld_len;
}
if ((gs_rsp->wd3.end_rsp) && (ret == RETURN_OK))
ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) recv GS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)",
v_hba->port_cfg.port_id,
ox_id,
gs_rsp->wd1.rx_id,
gs_rsp->wd4.sid,
gs_rsp->wd3.did,
gs_rsp->wd3.end_rsp,
gs_rsp->wd4.user_id_num);
return ret;
}
static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int rx_id = INVALID_VALUE32;
struct unf_frame_pkg_s pkg = { 0 };
struct hifcoe_scqe_comm_rsp_sts_s *els_rsp_sts = NULL;
els_rsp_sts = &v_scqe->comm_sts;
rx_id = (unsigned int)els_rsp_sts->wd0.rx_id;
rx_id = rx_id - v_hba->exit_base;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp_sts->magic_num;
pkg.frame_head.oxid_rxid = rx_id |
(unsigned int)(els_rsp_sts->wd0.ox_id) << 16;
if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe)))
pkg.status = UNF_IO_FAILED;
else
pkg.status = UNF_IO_SUCCESS;
ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id);
return ret;
}
static unsigned int hifc_check_rport_is_valid(
const struct hifc_parent_queue_info_s *v_prntq_info,
unsigned int scqe_xid)
{
if (v_prntq_info->parent_ctx.cqm_parent_ctx_obj) {
if ((v_prntq_info->parent_sq_info.context_id &
HIFC_CQM_XID_MASK) == (scqe_xid & HIFC_CQM_XID_MASK))
return RETURN_OK;
}
return UNF_RETURN_ERROR;
}
static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int rport_valid = UNF_RETURN_ERROR;
unsigned int rport_index = 0;
unsigned int cache_id = 0;
unsigned int local_ctx_id = 0;
unsigned long flag = 0;
struct hifc_parent_queue_info_s *prnt_qinfo = NULL;
struct hifcoe_scqe_sess_sts_s *offload_sts = NULL;
struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 };
offload_sts = &v_scqe->sess_sts;
rport_index = offload_sts->wd1.conn_id;
cache_id = offload_sts->wd2.cid;
local_ctx_id = offload_sts->wd0.xid_qpn;
if (rport_index >= UNF_HIFC_MAXRPORT_NUM) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, cache id(0x%x)",
v_hba->port_cfg.port_id, rport_index, cache_id);
return UNF_RETURN_ERROR;
}
prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index];
rport_valid = hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id);
if (rport_valid != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid",
v_hba->port_cfg.port_id, rport_index, local_ctx_id);
return UNF_RETURN_ERROR;
}
/* off_load failed */
if (HIFC_GET_SCQE_STATUS(v_scqe) != HIFC_COMPLETION_STATUS_SUCCESS) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x), rport(0x%x), context id(0x%x), cache id(0x%x), offload failed",
v_hba->port_cfg.port_id, rport_index,
local_ctx_id, cache_id);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag);
prnt_qinfo->parent_sq_info.cache_id = cache_id;
prnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADED;
atomic_set(&prnt_qinfo->parent_sq_info.sq_cashed, UNF_TRUE);
if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) {
destroy_sqe_info.valid =
prnt_qinfo->parent_sq_info.destroy_sqe.valid;
destroy_sqe_info.rport_index =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_index;
destroy_sqe_info.time_out =
prnt_qinfo->parent_sq_info.destroy_sqe.time_out;
destroy_sqe_info.start_jiff =
prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff;
destroy_sqe_info.rport_info.nport_id =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id;
destroy_sqe_info.rport_info.rport_index =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index;
destroy_sqe_info.rport_info.port_name =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name;
prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE;
}
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag);
hifc_pop_destroy_parent_queue_sqe((void *)v_hba, &destroy_sqe_info);
return RETURN_OK;
}
unsigned int hifc_get_gs_req_and_rsp_pld_len(unsigned short cmd_code,
unsigned int *v_gs_pld_len,
unsigned int *v_gs_rsp_pld_len)
{
UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_pld_len,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_rsp_pld_len,
return UNF_RETURN_ERROR);
switch (cmd_code) {
case NS_GPN_ID:
*v_gs_pld_len = UNF_GPNID_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_GPNID_RSP_PAYLOAD_LEN;
break;
case NS_GNN_ID:
*v_gs_pld_len = UNF_GNNID_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_GNNID_RSP_PAYLOAD_LEN;
break;
case NS_GFF_ID:
*v_gs_pld_len = UNF_GFFID_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_GFFID_RSP_PAYLOAD_LEN;
break;
case NS_GID_FT:
case NS_GID_PT:
*v_gs_pld_len = UNF_GID_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN;
break;
case NS_RFT_ID:
*v_gs_pld_len = UNF_RFTID_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN;
break;
case NS_RFF_ID:
*v_gs_pld_len = UNF_RFFID_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_RFFID_RSP_PAYLOAD_LEN;
break;
case NS_GA_NXT:
*v_gs_pld_len = UNF_GID_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN;
break;
case NS_GIEL:
*v_gs_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN;
*v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN;
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Unknown GS commond type(0x%x)", cmd_code);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
static unsigned int hifc_send_gs_via_parent(void *v_hba,
struct unf_frame_pkg_s *v_pkg)
{
unsigned short ox_id, rx_id;
unsigned short cmd_code = UNF_ZERO;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int gs_pld_len = UNF_ZERO;
unsigned int gs_rsp_pld_len = UNF_ZERO;
void *gs_pld_addr = NULL;
struct hifc_hba_s *hba = NULL;
struct hifc_parent_sq_info_s *sq_info;
struct hifcoe_sqe_s sqe;
unsigned long long fram_phy_addr;
hba = (struct hifc_hba_s *)v_hba;
memset(&sqe, 0, sizeof(struct hifcoe_sqe_s));
sq_info = hifc_find_parent_sq_by_pkg(hba, v_pkg);
if (!sq_info) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Get NULL parent SQ information");
return ret;
}
cmd_code = HIFC_GET_GS_CMND_CODE(v_pkg->cmnd);
ret = hifc_get_gs_req_and_rsp_pld_len(cmd_code, &gs_pld_len,
&gs_rsp_pld_len);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get error GS request and response payload length",
hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return ret;
}
gs_pld_addr = (void *)(HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg));
fram_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr +
sizeof(struct unf_fchead_s);
if (cmd_code == NS_GID_FT || cmd_code == NS_GID_PT)
gs_pld_addr = (void *)(UNF_GET_GID_PAYLOAD(v_pkg));
/* Assemble the SQE Control Section part */
hifc_build_service_wqe_ctrl_section(
&sqe.ctrl_sl,
HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE),
HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s)));
/* Assemble the SQE Task Section part */
ox_id = UNF_GET_OXID(v_pkg) + hba->exit_base;
rx_id = UNF_GET_RXID(v_pkg);
hifc_build_service_wqe_ts_common(&sqe.ts_sl,
sq_info->rport_index, ox_id,
rx_id, HIFC_LSW(gs_pld_len));
hifc_build_gs_wqe_ts_req(&sqe, UNF_GETXCHGALLOCTIME(v_pkg));
hifc_build_els_gs_wqe_sge(&sqe, gs_pld_addr, fram_phy_addr, gs_pld_len,
sq_info->context_id, v_hba);
ret = hifc_parent_sq_enqueue(sq_info, &sqe);
return ret;
}
unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_hba_s *hba = NULL;
struct hifc_parent_queue_info_s *prnt_qinfo = NULL;
UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4915, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg),
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4916, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg),
return UNF_RETURN_ERROR);
HIFC_CHECK_PKG_ALLOCTIME(v_pkg);
hba = (struct hifc_hba_s *)v_hba;
prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg);
if (!prnt_qinfo) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get a null parent queue information",
hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return ret;
}
if (HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) send GS SID(0x%x) DID(0x%x), send GS Request before PLOGI",
hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return ret;
}
ret = hifc_send_gs_via_parent(v_hba, v_pkg);
return ret;
}
static unsigned int hifc_get_bls_pld_len(struct unf_frame_pkg_s *v_pkg,
unsigned int *v_frame_len)
{
unsigned int ret = RETURN_OK;
unsigned int rctl = 0;
UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR);
rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head);
if (rctl == HIFC_RCTL_BLS_ACC) {
/* BA_ACC */
*v_frame_len = sizeof(struct unf_ba_acc_s);
} else if (rctl == HIFC_RCTL_BLS_RJT) {
/* BA_RJT */
*v_frame_len = sizeof(struct unf_ba_rjt_s);
} else {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[warn]PKG Rclt(0x%x) not BLS ACC or RJT", rctl);
*v_frame_len = 0;
ret = UNF_RETURN_ERROR;
}
return ret;
}
static unsigned int hifc_send_bls_via_cmdq(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int rctl = 0;
unsigned int bls_pld_len = 0;
unsigned short rx_id = INVALID_VALUE16;
unsigned short ox_id = INVALID_VALUE16;
unsigned short exch_id = INVALID_VALUE16;
unsigned char *bls_pld_addr = NULL;
union hifc_cmdqe_u cmdqe;
struct hifc_parent_sq_info_s *sq_info = NULL;
sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg);
if (!sq_info) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information",
v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return UNF_RETURN_ERROR;
}
/* Determine whether the value is ACC or RTJ and obtain the payload
* length of the ABTS_RSP
*/
ret = hifc_get_bls_pld_len(v_pkg, &bls_pld_len);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) cmdq send BLS PKG DID(0x%x) failed",
v_hba->port_index, v_pkg->frame_head.rctl_did);
return UNF_RETURN_ERROR;
}
rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head);
exch_id = (v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) & 0xffff;
if ((exch_id == INVALID_VALUE16) && (rctl == HIFC_RCTL_BLS_ACC)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) cmdq send BA_ACC with error RXID(0xffff)",
v_hba->port_index);
return UNF_RETURN_ERROR;
}
/*
* FC-FS-3 15.3.3.1 Description:
* The OX_ID and RX_ID shall be set to match the Exchange in which
* the ABTS frame was transmitted.
*/
rx_id = UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head);
ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head);
if (exch_id != INVALID_VALUE16) {
exch_id = exch_id + v_hba->exit_base;
} else {
/* If the number is not an immediate number and the rxid is not
* allocated to the CM, the CM may correspond to the rjt.
*/
}
memset(&cmdqe, 0, sizeof(cmdqe));
hifc_build_cmdqe_common(&cmdqe, HIFC_CMDQE_ABTS_RSP, exch_id);
cmdqe.snd_abts_rsp.wd1.ox_id = ox_id;
cmdqe.snd_abts_rsp.wd1.port_id = v_hba->port_index;
cmdqe.snd_abts_rsp.wd1.payload_len = bls_pld_len;
cmdqe.snd_abts_rsp.wd1.rsp_type = ((rctl == HIFC_RCTL_BLS_ACC) ? 0 : 1);
cmdqe.snd_abts_rsp.wd2.conn_id = sq_info->rport_index;
cmdqe.snd_abts_rsp.wd2.scqn = hifc_get_rport_maped_sts_scqn(v_hba,
sq_info->rport_index);
cmdqe.snd_abts_rsp.wd3.xid = sq_info->context_id;
cmdqe.snd_abts_rsp.wd4.cid = sq_info->cache_id;
cmdqe.snd_abts_rsp.wd5.req_rx_id = rx_id;
bls_pld_addr = HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg);
memcpy(cmdqe.snd_abts_rsp.payload, bls_pld_addr, bls_pld_len);
/* Send the ABTS_RSP command via ROOT CMDQ. */
ret = hifc_root_cmdq_enqueue(v_hba, &cmdqe, sizeof(cmdqe.snd_abts_rsp));
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) RPort(0x%x) send ABTS_RSP OXID(0x%x) RXID(0x%x) EXCHID(0x%x)",
v_hba->port_cfg.port_id, sq_info->rport_index, ox_id,
rx_id, exch_id);
return ret;
}
static unsigned int hifc_send_bls_via_parent(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned short ox_id = INVALID_VALUE16;
unsigned short rx_id = INVALID_VALUE16;
struct hifcoe_sqe_s sqe;
struct hifc_parent_sq_info_s *sq_info = NULL;
struct hifc_parent_queue_info_s *prnt_qinfo = NULL;
UNF_CHECK_VALID(0x5015, UNF_TRUE, (v_pkg->type == UNF_PKG_BLS_REQ),
return UNF_RETURN_ERROR);
memset(&sqe, 0, sizeof(struct hifcoe_sqe_s));
prnt_qinfo = hifc_find_parent_queue_info_by_pkg(v_hba, v_pkg);
if (!prnt_qinfo) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information",
v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return ret;
}
sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg);
if (!sq_info) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send ABTS SID_DID(0x%x_0x%x) with null parent queue information",
v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return ret;
}
rx_id = UNF_GET_RXID(v_pkg);
ox_id = UNF_GET_OXID(v_pkg) + v_hba->exit_base;
/* Assemble the SQE Control Section part.
* The ABTS does not have Payload. bdsl=0
*/
hifc_build_service_wqe_ctrl_section(
&sqe.ctrl_sl,
HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), 0);
/* Assemble the SQE Task Section BLS Common part. The value of DW2
* of BLS WQE is Rsvd, and the value of DW2 is 0
*/
hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index,
ox_id, rx_id, 0);
/* Assemble the special part of the ABTS */
hifc_build_bls_wqe_ts_req(&sqe, v_pkg->frame_head.parameter,
UNF_GETXCHGALLOCTIME(v_pkg));
ret = hifc_parent_sq_enqueue(sq_info, &sqe);
return ret;
}
unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg)
{
unsigned int ret = UNF_RETURN_ERROR;
struct hifc_hba_s *hba = NULL;
unsigned long flag = 0;
struct hifc_parent_queue_info_s *prnt_qinfo = NULL;
UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x4913, UNF_TRUE, UNF_PKG_BLS_REQ == v_pkg->type,
return UNF_RETURN_ERROR);
HIFC_CHECK_PKG_ALLOCTIME(v_pkg);
hba = (struct hifc_hba_s *)v_hba;
prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg);
if (!prnt_qinfo) {
HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information",
hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return ret;
}
spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag);
if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) {
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock,
flag);
/* INI: send ABTS_REQ via parent SQ */
ret = hifc_send_bls_via_parent(hba, v_pkg);
} else {
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock,
flag);
ret = hifc_send_bls_via_cmdq(hba, v_pkg);
}
return ret;
}
static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
/*
* RCVD sq flush sts
* --->>> continue flush or clear done
*/
unsigned int ret = UNF_RETURN_ERROR;
if (v_scqe->flush_sts.wd0.port_id != v_hba->port_index) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL,
"[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)",
v_hba->port_cfg.port_id,
v_scqe->clear_sts.wd0.port_id,
v_hba->port_index,
v_hba->q_set_stage);
return UNF_RETURN_ERROR;
}
if (v_scqe->flush_sts.wd0.last_flush) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_INFO,
"[info]Port(0x%x) flush sq(0x%x) done, stage(0x%x)",
v_hba->port_cfg.port_id, v_hba->next_clearing_sq,
v_hba->q_set_stage);
/* If the Flush STS is last one, send cmd done */
ret = hifc_clear_sq_wqe_done(v_hba);
} else {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR,
"[info]Port(0x%x) continue flush sq(0x%x), stage(0x%x)",
v_hba->port_cfg.port_id, v_hba->next_clearing_sq,
v_hba->q_set_stage);
ret = hifc_clear_pending_sq_wqe(v_hba);
}
return ret;
}
static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
/*
* clear: fetched sq wqe
* ---to--->>> pending sq wqe
*/
unsigned int ret = UNF_RETURN_ERROR;
if (v_scqe->clear_sts.wd0.port_id != v_hba->port_index) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL,
"[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)",
v_hba->port_cfg.port_id,
v_scqe->clear_sts.wd0.port_id,
v_hba->port_index,
v_hba->q_set_stage);
return UNF_RETURN_ERROR;
}
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_KEVENT,
"[info]Port(0x%x) cleared all fetched wqe, start clear sq pending wqe, stage (0x%x)",
v_hba->port_cfg.port_id, v_hba->q_set_stage);
v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHING;
ret = hifc_clear_pending_sq_wqe(v_hba);
return ret;
}
static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int rport_index = INVALID_VALUE32;
unsigned long flag = 0;
struct hifc_parent_queue_info_s *parent_queue_info = NULL;
struct hifcoe_scqe_sess_sts_s *sess_sts =
(struct hifcoe_scqe_sess_sts_s *)(void *)v_scqe;
unsigned int ctx_flush_done;
unsigned int *ctx_dw = NULL;
int ret;
rport_index = sess_sts->wd1.conn_id;
if (rport_index >= UNF_HIFC_MAXRPORT_NUM) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) receive reset session cmd sts failed, invlaid rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)",
v_hba->port_cfg.port_id,
rport_index,
sess_sts->ch.wd0.err_code,
sess_sts->ch.wd0.cqe_remain_cnt);
return UNF_RETURN_ERROR;
}
parent_queue_info =
&v_hba->parent_queue_mgr->parent_queues[rport_index];
/*
* If only session reset is used, the offload status of sq remains
* unchanged. If a link is deleted, the offload status is set to
* destroying and is irreversible.
*/
spin_lock_irqsave(&parent_queue_info->parent_queue_state_lock, flag);
/*
* According to the fault tolerance principle, even if the connection
* deletion times out and the sts returns to delete the connection, one
* indicates thatthe cancel timer is successful, and 0 indicates that
* the timer is being processed.
*/
if (!cancel_delayed_work(
&parent_queue_info->parent_sq_info.del_work)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) rport_index(0x%x) delete rport timer maybe timeout",
v_hba->port_cfg.port_id,
rport_index);
}
/*
* If the SessRstSts is returned too late and the Parent Queue Info
* resource is released, OK is returned.
*/
if (parent_queue_info->offload_state != HIFC_QUEUE_STATE_DESTROYING) {
spin_unlock_irqrestore(
&parent_queue_info->parent_queue_state_lock, flag);
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[info]Port(0x%x) reset session cmd complete, no need to free parent qinfo, rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)",
v_hba->port_cfg.port_id,
rport_index,
sess_sts->ch.wd0.err_code,
sess_sts->ch.wd0.cqe_remain_cnt);
return RETURN_OK;
}
if (parent_queue_info->parent_ctx.cqm_parent_ctx_obj) {
ctx_dw = (unsigned int *)((void *)(parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr));
ctx_flush_done = ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] &
HIFC_CTXT_FLUSH_DONE_MASK_BE;
/* memory barr */
mb();
if (ctx_flush_done == 0) {
spin_unlock_irqrestore(
&parent_queue_info->parent_queue_state_lock,
flag);
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) rport(0x%x) flushdone is not set, delay to free parent session",
v_hba->port_cfg.port_id, rport_index);
/* If flushdone bit is not set,delay free Sq info */
ret = queue_delayed_work(
v_hba->work_queue,
&parent_queue_info->parent_sq_info.flush_done_tmo_work,
(unsigned long)
msecs_to_jiffies((unsigned int)
HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS));
if (ret == (int)false) {
HIFC_HBA_STAT(
v_hba,
HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK);
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) rport(0x%x) queue delayed work failed iret:%d",
v_hba->port_cfg.port_id,
rport_index, ret);
}
return RETURN_OK;
}
}
spin_unlock_irqrestore(&parent_queue_info->parent_queue_state_lock,
flag);
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to free parent session with rport_index(0x%x)",
v_hba->port_cfg.port_id,
rport_index);
hifc_free_parent_queue_info(v_hba, parent_queue_info);
return RETURN_OK;
}
static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
/*
* clear ELS/Immi SRQ
* ---then--->>> Destroy SRQ
*/
struct hifc_hba_s *hba = v_hba;
struct hifc_srq_info_s *srq_info = NULL;
if (HIFC_GET_SCQE_STATUS(v_scqe) != 0) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) clear srq failed, status(0x%x)",
v_hba->port_cfg.port_id,
HIFC_GET_SCQE_STATUS(v_scqe));
return RETURN_OK;
}
srq_info = &hba->els_srq_info;
/*
* 1: cancel timer succeed
* 0: the timer is being processed, the SQ is released when the timer
* times out
*/
if (cancel_delayed_work(&srq_info->del_work)) {
/*
* not free srq resource, it will be freed on hba remove
*/
srq_info->state = HIFC_CLEAN_DONE;
}
return RETURN_OK;
}
static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int ox_id = INVALID_VALUE32;
unsigned int rx_id = INVALID_VALUE32;
struct unf_frame_pkg_s pkg = { 0 };
struct hifcoe_scqe_itmf_marker_sts_s *marker_sts = NULL;
marker_sts = &v_scqe->itmf_marker_sts;
ox_id = (unsigned int)marker_sts->wd1.ox_id;
ox_id = ox_id - v_hba->exit_base;
rx_id = (unsigned int)marker_sts->wd1.rx_id;
pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16;
pkg.frame_head.csctl_sid = marker_sts->wd3.sid;
pkg.frame_head.rctl_did = marker_sts->wd2.did;
/* 1. set pkg status */
if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe)))
pkg.status = UNF_IO_FAILED;
else
pkg.status = UNF_IO_SUCCESS;
/* 2 .process rcvd marker STS: set exchange state */
ret = hifc_rcv_tmf_marker_sts(v_hba, &pkg, ox_id);
return ret;
}
static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_scqe)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int ox_id = INVALID_VALUE32;
unsigned int rx_id = INVALID_VALUE32;
struct unf_frame_pkg_s pkg = { 0 };
struct hifcoe_scqe_abts_marker_sts_s *abts_sts = NULL;
abts_sts = &v_scqe->abts_marker_sts;
if (!abts_sts) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]ABTS marker STS is NULL");
return ret;
}
ox_id = (unsigned int)abts_sts->wd1.ox_id;
ox_id = ox_id - v_hba->exit_base;
rx_id = (unsigned int)abts_sts->wd1.rx_id;
pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16;
pkg.frame_head.csctl_sid = abts_sts->wd3.sid;
pkg.frame_head.rctl_did = abts_sts->wd2.did;
/* abts marker abts_maker_status as ucode stat */
pkg.abts_maker_status = (unsigned int)abts_sts->wd3.io_state;
if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe)))
pkg.status = UNF_IO_FAILED;
else
pkg.status = UNF_IO_SUCCESS;
ret = hifc_rcv_abts_marker_sts(v_hba, &pkg, ox_id);
return ret;
}
unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba,
struct hifcoe_aqe_data_s *v_aeq_msg)
{
unsigned int ret = RETURN_OK;
struct hifcoe_aqe_data_s *aeq_msg;
unsigned int rport_index = 0;
unsigned int local_ctx_id = 0;
struct hifc_parent_queue_info_s *prnt_qinfo = NULL;
struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 };
unsigned long flag = 0;
aeq_msg = v_aeq_msg;
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) receive off_load Err Event, EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x)",
v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code,
aeq_msg->wd0.conn_id, aeq_msg->wd1.xid);
/* Currently, only the offload failure caused by insufficient scqe is
* processed. Other errors are not processed temporarily.
*/
if (unlikely(aeq_msg->wd0.evt_code !=
FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL)) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) receive an unsupported error code of AEQ Event, EvtCode(0x%x) Conn_id(0x%x)",
v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code,
aeq_msg->wd0.conn_id);
return UNF_RETURN_ERROR;
}
HIFC_SCQ_ERR_TYPE_STAT(v_hba, FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL);
rport_index = aeq_msg->wd0.conn_id;
local_ctx_id = aeq_msg->wd1.xid;
if (rport_index >= UNF_HIFC_MAXRPORT_NUM) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, Xid(0x%x)",
v_hba->port_cfg.port_id, rport_index,
aeq_msg->wd1.xid);
return UNF_RETURN_ERROR;
}
prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index];
if (hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id) != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid",
v_hba->port_cfg.port_id, rport_index, local_ctx_id);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag);
/* The offload status is restored only
* when the offload status is offloading
*/
if (prnt_qinfo->offload_state == HIFC_QUEUE_STATE_OFFLOADING)
prnt_qinfo->offload_state = HIFC_QUEUE_STATE_INITIALIZED;
spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag);
if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) {
destroy_sqe_info.valid =
prnt_qinfo->parent_sq_info.destroy_sqe.valid;
destroy_sqe_info.rport_index =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_index;
destroy_sqe_info.time_out =
prnt_qinfo->parent_sq_info.destroy_sqe.time_out;
destroy_sqe_info.start_jiff =
prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff;
destroy_sqe_info.rport_info.nport_id =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id;
destroy_sqe_info.rport_info.rport_index =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index;
destroy_sqe_info.rport_info.port_name =
prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name;
prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x",
v_hba->port_cfg.port_id,
destroy_sqe_info.start_jiff,
destroy_sqe_info.time_out,
prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index,
HIFC_QUEUE_STATE_INITIALIZED);
ret = hifc_free_parent_resource(v_hba,
&destroy_sqe_info.rport_info);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) pop delay destroy parent sq failed, rport index 0x%x, rport nport id 0x%x",
v_hba->port_cfg.port_id,
destroy_sqe_info.rport_info.rport_index,
destroy_sqe_info.rport_info.nport_id);
}
}
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_SERVICE_H__
#define __HIFC_SERVICE_H__
/* Send ElsCmnd or ElsRsp */
unsigned int hifc_send_els_cmnd(void *phba, struct unf_frame_pkg_s *v_pkg);
/* Send GsCmnd */
unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg);
/* Send BlsCmnd */
unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg);
/* Receive Frame from Root RQ */
unsigned int hifc_rcv_service_frame_from_rq(
struct hifc_hba_s *v_hba,
struct hifc_root_rq_info_s *rq_info,
struct hifc_root_rq_complet_info_s *v_complet_info,
unsigned short v_rcv_buf_num);
unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba,
struct hifc_root_rq_complet_info_s *v_info);
unsigned int hifc_rq_rcv_els_rsp_sts(
struct hifc_hba_s *v_hba,
struct hifc_root_rq_complet_info_s *v_info);
/* Receive Frame from SCQ */
unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe,
unsigned int scq_idx);
/* FC txmfs */
#define HIFC_DEFAULT_TX_MAX_FREAM_SIZE 256
#define HIFC_FIRST_PKG_FLAG (1 << 0)
#define HIFC_LAST_PKG_FLAG (1 << 1)
#define HIFC_CHECK_IF_FIRST_PKG(pkg_flag) ((pkg_flag) & HIFC_FIRST_PKG_FLAG)
#define HIFC_CHECK_IF_LAST_PKG(pkg_flag) ((pkg_flag) & HIFC_LAST_PKG_FLAG)
#define HIFC_GET_SERVICE_TYPE(v_hba) 12
#define HIFC_GET_PACKET_TYPE(v_service_type) 1
#define HIFC_GET_PACKET_COS(v_service_type) 1
#define HIFC_GET_PRLI_PAYLOAD_LEN \
(UNF_PRLI_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE)
/* Start addr of the header/payloed of the cmnd buffer in the pkg */
#define HIFC_FC_HEAD_LEN (sizeof(struct unf_fchead_s))
#define HIFC_PAYLOAD_OFFSET (sizeof(struct unf_fchead_s))
#define HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg) \
UNF_GET_FLOGI_PAYLOAD(v_pkg)
#define HIFC_GET_CMND_HEADER_ADDR(v_pkg) \
((v_pkg)->unf_cmnd_pload_bl.buffer_ptr)
#define HIFC_GET_RSP_HEADER_ADDR(v_pkg) \
((v_pkg)->unf_rsp_pload_bl.buffer_ptr)
#define HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg) \
((v_pkg)->unf_rsp_pload_bl.buffer_ptr + HIFC_PAYLOAD_OFFSET)
#define HIFC_GET_CMND_FC_HEADER(v_pkg) \
(&(UNF_GET_SFS_ENTRY(v_pkg)->sfs_common.frame_head))
#define HIFC_PKG_IS_ELS_RSP(els_cmnd_type) \
(((els_cmnd_type) == ELS_ACC) || ((els_cmnd_type) == ELS_RJT))
#define HIFC_XID_IS_VALID(xid, exi_base, exi_count) \
(((xid) >= (exi_base)) && ((xid) < ((exi_base) + (exi_count))))
#define UNF_FC_PAYLOAD_ELS_MASK 0xFF000000
#define UNF_FC_PAYLOAD_ELS_SHIFT 24
#define UNF_FC_PAYLOAD_ELS_DWORD 0
/* Note: this pfcpayload is little endian */
#define UNF_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \
UNF_GET_SHIFTMASK(((unsigned int *)(void *)pfcpayload)\
[UNF_FC_PAYLOAD_ELS_DWORD], \
UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK)
#define HIFC_ELS_CMND_MASK 0xffff
#define HIFC_ELS_CMND__RELEVANT_SHIFT 16UL
#define HIFC_GET_ELS_CMND_CODE(__cmnd) \
((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK))
#define HIFC_GET_ELS_RSP_TYPE(__cmnd) \
((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK))
#define HIFC_GET_ELS_RSP_CODE(__cmnd) \
((unsigned short)((__cmnd) >> HIFC_ELS_CMND__RELEVANT_SHIFT & \
HIFC_ELS_CMND_MASK))
#define HIFC_GET_GS_CMND_CODE(__cmnd) \
((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK))
/* ELS CMND Request */
#define ELS_CMND 0
/* fh_f_ctl - Frame control flags. */
#define HIFC_FC_EX_CTX (1 << 23) /* sent by responder to exchange */
#define HIFC_FC_SEQ_CTX (1 << 22) /* sent by responder to sequence */
#define HIFC_FC_FIRST_SEQ (1 << 21) /* first sequence of this exchange */
#define HIFC_FC_LAST_SEQ (1 << 20) /* last sequence of this exchange */
#define HIFC_FC_END_SEQ (1 << 19) /* last frame of sequence */
#define HIFC_FC_END_CONN (1 << 18) /* end of class 1 connection pending */
#define HIFC_FC_RES_B17 (1 << 17) /* reserved */
#define HIFC_FC_SEQ_INIT (1 << 16) /* transfer of sequence initiative */
#define HIFC_FC_X_ID_REASS (1 << 15) /* exchange ID has been changed */
#define HIFC_FC_X_ID_INVAL (1 << 14) /* exchange ID invalidated */
#define HIFC_FC_ACK_1 (1 << 12) /* 13:12 = 1: ACK_1 expected */
#define HIFC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */
#define HIFC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */
#define HIFC_FC_RES_B11 (1 << 11) /* reserved */
#define HIFC_FC_RES_B10 (1 << 10) /* reserved */
#define HIFC_FC_RETX_SEQ (1 << 9) /* retransmitted sequence */
#define HIFC_FC_UNI_TX (1 << 8) /* unidirectional transmit (class 1) */
#define HIFC_FC_CONT_SEQ(i) ((i) << 6)
#define HIFC_FC_ABT_SEQ(i) ((i) << 4)
#define HIFC_FC_REL_OFF (1 << 3) /* parameter is relative offset */
#define HIFC_FC_RES2 (1 << 2) /* reserved */
#define HIFC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */
#define HIFC_FCTL_REQ (HIFC_FC_FIRST_SEQ | HIFC_FC_END_SEQ |\
HIFC_FC_SEQ_INIT)
#define HIFC_FCTL_RESP (HIFC_FC_EX_CTX | HIFC_FC_LAST_SEQ | \
HIFC_FC_END_SEQ | HIFC_FC_SEQ_INIT)
#define HIFC_RCTL_BLS_REQ 0x81
#define HIFC_RCTL_BLS_ACC 0x84
#define HIFC_RCTL_BLS_RJT 0x85
#define UNF_IO_STATE_NEW 0
#define TGT_IO_STATE_SEND_XFERRDY (1 << 2)
#define TGT_IO_STATE_RSP (1 << 5)
#define TGT_IO_STATE_ABORT (1 << 7)
enum HIFC_FC_FH_TYPE_E {
HIFC_FC_TYPE_BLS = 0x00, /* basic link service */
HIFC_FC_TYPE_ELS = 0x01, /* extended link service */
HIFC_FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */
HIFC_FC_TYPE_FCP = 0x08, /* SCSI FCP */
HIFC_FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */
HIFC_FC_TYPE_ILS = 0x22 /* internal link service */
};
enum HIFC_FC_FH_RCTL_E {
HIFC_FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */
HIFC_FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */
HIFC_FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */
HIFC_FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */
HIFC_FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */
HIFC_FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */
HIFC_FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */
HIFC_FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */
#define HIFC_FC_RCTL_ILS_REQ HIFC_FC_RCTL_DD_UNSOL_CTL /* ILS request */
#define HIFC_FC_RCTL_ILS_REP HIFC_FC_RCTL_DD_SOL_CTL /* ILS reply */
/*
* Extended Link_Data
*/
HIFC_FC_RCTL_ELS_REQ = 0x22, /* extended link services request */
HIFC_FC_RCTL_ELS_RSP = 0x23, /* extended link services reply */
HIFC_FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */
HIFC_FC_RCTL_ELS4_RSP = 0x33, /* FC-4 ELS reply */
/*
* Optional Extended Headers
*/
HIFC_FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */
HIFC_FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */
HIFC_FC_RCTL_ENCH = 0x52, /* encapsulation header */
/*
* Basic Link Services fh_r_ctl values.
*/
HIFC_FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */
HIFC_FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */
HIFC_FC_RCTL_BA_RMC = 0x82, /* remove connection */
HIFC_FC_RCTL_BA_ACC = 0x84, /* basic accept */
HIFC_FC_RCTL_BA_RJT = 0x85, /* basic reject */
HIFC_FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */
/*
* Link Control Information.
*/
HIFC_FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */
HIFC_FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */
HIFC_FC_RCTL_P_RJT = 0xc2, /* port reject */
HIFC_FC_RCTL_F_RJT = 0xc3, /* fabric reject */
HIFC_FC_RCTL_P_BSY = 0xc4, /* port busy */
HIFC_FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */
HIFC_FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */
HIFC_FC_RCTL_LCR = 0xc7, /* link credit reset */
HIFC_FC_RCTL_END = 0xc9 /* end */
};
struct hifc_fc_frame_header {
unsigned char rctl; /* routing control */
unsigned char did[3]; /* Destination ID */
unsigned char cs_ctl; /* class of service control / pri */
unsigned char sid[3]; /* Source ID */
unsigned char type; /* see enum fc_fh_type below */
unsigned char frame_ctl[3]; /* frame control */
unsigned char seq_id; /* sequence ID */
unsigned char df_ctl; /* data field control */
unsigned short seq_cnt; /* sequence count */
unsigned short ox_id; /* originator exchange ID */
unsigned short rx_id; /* responder exchange ID */
unsigned int parm_offset; /* parameter or relative offset */
};
unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned char *v_pld,
unsigned int pld_len,
int first_frame);
unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id);
unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int rx_id);
unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id);
unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
unsigned int ox_id);
void hifc_save_login_para_in_sq_info(
struct hifc_hba_s *v_hba,
struct unf_port_login_parms_s *v_login_coparms);
unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba,
struct hifcoe_aqe_data_s *v_aeg_msg);
#define HIFC_CHECK_PKG_ALLOCTIME(v_pkg) \
do { \
if (unlikely(UNF_GETXCHGALLOCTIME(v_pkg) == 0)) { \
HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, \
UNF_WARN, \
"[warn]Invalid MagicNum,S_ID(0x%x) D_ID(0x%x) OXID(0x%x) RX_ID(0x%x) pkg type(0x%x) hot pooltag(0x%x)", \
UNF_GET_SID(v_pkg), \
UNF_GET_DID(v_pkg), \
UNF_GET_OXID(v_pkg), \
UNF_GET_RXID(v_pkg), \
((struct unf_frame_pkg_s *)v_pkg)->type, \
UNF_GET_XCHG_TAG(v_pkg)); \
} \
} while (0)
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_disc.h"
#include "unf_event.h"
#include "unf_lport.h"
#include "unf_rport.h"
#include "unf_exchg.h"
#include "unf_service.h"
#include "unf_portman.h"
#define UNF_LIST_RSCN_PAGE_CNT 2560
#define UNF_MAX_PORTS_PRI_LOOP 2
#define UNF_MAX_GS_SEND_NUM 8
#define UNF_OS_REMOVE_CARD_TIMEOUT (60 * 1000)
static void unf_set_disc_state(struct unf_disc_s *v_disc,
enum unf_disc_state_e v_en_states)
{
UNF_CHECK_VALID(0x651, UNF_TRUE, v_disc, return);
if (v_en_states != v_disc->en_states) {
/* Reset disc retry count */
v_disc->retry_count = 0;
}
v_disc->en_states = v_en_states;
}
static inline unsigned int unf_get_loop_map(struct unf_lport_s *v_lport,
unsigned char v_loop_map[],
unsigned int loop_map_size)
{
struct unf_buf_s buf = { 0 };
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(
0x652, UNF_TRUE,
v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get,
return UNF_RETURN_ERROR);
buf.cbuf = v_loop_map;
buf.buf_len = loop_map_size;
ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get(
v_lport->fc_port,
UNF_PORT_CFG_GET_LOOP_MAP,
(void *)&buf);
return ret;
}
static int unf_discover_private_loop(void *v_arg_in, void *v_arg_out)
{
struct unf_lport_s *lport = (struct unf_lport_s *)v_arg_in;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int i = 0;
unsigned char loop_id = 0;
unsigned int alpa_index = 0;
unsigned char loop_map[UNF_LOOPMAP_COUNT];
UNF_REFERNCE_VAR(v_arg_out);
UNF_CHECK_VALID(0x653, UNF_TRUE, lport, return UNF_RETURN_ERROR);
memset(loop_map, 0x0, UNF_LOOPMAP_COUNT);
/* Get Port Loop Map */
ret = unf_get_loop_map(lport, loop_map, UNF_LOOPMAP_COUNT);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[warn]Port(0x%x) get loop map failed",
lport->port_id);
return UNF_RETURN_ERROR;
}
/* Check Loop Map Ports Count */
if (loop_map[0] > UNF_MAX_PORTS_PRI_LOOP) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) has more than %d ports(%u) in private loop",
lport->port_id, UNF_MAX_PORTS_PRI_LOOP,
loop_map[0]);
return UNF_RETURN_ERROR;
}
/* AL_PA = 0 means Public Loop */
if ((loop_map[1] == UNF_FL_PORT_LOOP_ADDR) ||
(loop_map[2] == UNF_FL_PORT_LOOP_ADDR)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) one or more AL_PA is 0x00, indicate it's FL_Port",
lport->port_id);
return UNF_RETURN_ERROR;
}
/* Discovery Private Loop Ports */
for (i = 0; i < loop_map[0]; i++) {
alpa_index = i + 1;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%x) start to disc(0x%x) with count(0x%x)",
lport->port_id, loop_map[alpa_index], i);
/* Check whether need delay to send PLOGI or not */
loop_id = loop_map[alpa_index];
unf_login_with_loop_node(lport, (unsigned int)loop_id);
}
return RETURN_OK;
}
static unsigned int unf_disc_start(void *v_lport)
{
/*
* Call by:
* 1. Enter Private Loop Login
* 2. Analysis RSCN payload
* 3. SCR callback
**
* Doing:
* Fabric/Public Loop: Send GID_PT
* Private Loop: (delay to) send PLOGI or send LOGO immediately
* P2P: do nothing
*/
struct unf_lport_s *lport = (struct unf_lport_s *)v_lport;
struct unf_rport_s *rport = NULL;
struct unf_disc_s *disc = NULL;
struct unf_cm_event_report *event = NULL;
unsigned int ret = RETURN_OK;
unsigned long flag = 0;
enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN;
UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
act_topo = lport->en_act_topo;
disc = &lport->disc;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]LOGIN: Port(0x%x) with topo(0x%x) begin to discovery",
lport->port_id, act_topo);
if ((act_topo == UNF_ACT_TOP_P2P_FABRIC) ||
(act_topo == UNF_ACT_TOP_PUBLIC_LOOP)) {
/* 1. Fabric or Public Loop Topology: for directory server */
/* 0xfffffc */
rport = unf_get_rport_by_nport_id(lport,
UNF_FC_FID_DIR_SERV);
if (!rport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) unable to get SNS RPort(0xfffffc)",
lport->port_id);
rport = unf_rport_get_free_and_init(
lport,
UNF_PORT_TYPE_FC,
UNF_FC_FID_DIR_SERV);
if (!rport)
return UNF_RETURN_ERROR;
rport->nport_id = UNF_FC_FID_DIR_SERV;
}
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
unf_set_disc_state(disc, UNF_DISC_ST_START); /* disc start */
unf_disc_state_ma(lport, UNF_EVENT_DISC_NORMAL_ENTER);
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
/*
* NOTE: Send GID_PT
* The Name Server shall, when it receives a GID_PT request,
* return all Port Identifiers having registered support for
* the specified Port Type.
* One or more Port Identifiers, having registered as
* the specified Port Type, are returned.
*/
ret = unf_send_gid_pt(lport, rport);
if (ret != RETURN_OK)
unf_disc_error_recovery(lport);
} else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) {
/* Private Loop: to thread process */
event = unf_get_one_event_node(lport);
UNF_CHECK_VALID(0x655, UNF_TRUE, NULL != event,
return UNF_RETURN_ERROR);
event->lport = lport;
event->event_asy_flag = UNF_EVENT_ASYN;
event->pfn_unf_event_task = unf_discover_private_loop;
event->para_in = (void *)lport;
unf_post_one_event_node(lport, event);
} else {
/* P2P toplogy mode: Do nothing */
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_MAJOR,
"[info]Port(0x%x) with topo(0x%x) need do nothing",
lport->port_id, act_topo);
}
return ret;
}
static unsigned int unf_disc_stop(void *v_lport)
{
/* Call by GID_ACC processer */
struct unf_lport_s *lport = NULL;
struct unf_lport_s *root_lport = NULL;
struct unf_rport_s *sns_port = NULL;
struct unf_disc_rport_s *disc_rport = NULL;
struct unf_disc_s *disc = NULL;
struct unf_disc_s *root_disc = NULL;
struct list_head *node = NULL;
unsigned long flag = 0;
unsigned int ret = RETURN_OK;
unsigned int nport_id = 0;
UNF_CHECK_VALID(0x656, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
disc = &lport->disc;
root_lport = (struct unf_lport_s *)lport->root_lport;
root_disc = &root_lport->disc;
/* Get R_Port for Directory server */
/* 0xfffffc */
sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV);
if (!sns_port) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) find fabric RPort(0xfffffc) failed",
lport->port_id);
return UNF_RETURN_ERROR;
}
/* for R_Port from disc pool busy list */
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
if (list_empty(&disc->disc_rport_mgr.list_disc_rport_busy)) {
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
/* Empty and return directly */
return RETURN_OK;
}
node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next;
do {
/* Delete from Disc busy list */
disc_rport = list_entry(node, struct unf_disc_rport_s,
entry_rport);
nport_id = disc_rport->nport_id;
list_del_init(node);
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
/* Add back to (free) Disc R_Port pool (list) */
spin_lock_irqsave(&root_disc->rport_busy_pool_lock, flag);
list_add_tail(node,
&root_disc->disc_rport_mgr.list_disc_rports_pool);
spin_unlock_irqrestore(&root_disc->rport_busy_pool_lock, flag);
/* Send GNN_ID to Name Server */
ret = unf_get_and_post_disc_event(lport, sns_port, nport_id,
UNF_DISC_GET_NODE_NAME);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT,
UNF_ERR,
"[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)",
lport->nport_id, UNF_DISC_GET_NODE_NAME,
nport_id);
/* NOTE: go to next stage */
unf_rcv_gnn_id_rsp_unknown(lport, sns_port,
nport_id);
}
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next;
} while (node != &disc->disc_rport_mgr.list_disc_rport_busy);
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
return ret;
}
static void unf_disc_callback(void *v_lport, unsigned int v_result)
{
/* Do nothing */
UNF_REFERNCE_VAR(v_lport);
UNF_REFERNCE_VAR(v_result);
}
/*
* Function Name : unf_init_rport_pool
* Function Description: Init R_Port (free) Pool
* Input Parameters : struct unf_lport_s *v_lport
* Output Parameters : N/A
* Return Type : unsigned int
*/
static unsigned int unf_init_rport_pool(struct unf_lport_s *v_lport)
{
struct unf_rport_pool_s *rport_pool = NULL;
struct unf_rport_s *rport = NULL;
unsigned int ret = RETURN_OK;
unsigned int i = 0;
unsigned int bit_map_cnt = 0;
unsigned long flag = 0;
unsigned int max_login = 0;
UNF_CHECK_VALID(0x657, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
/* Init RPort Pool info */
rport_pool = &v_lport->rport_pool;
max_login = v_lport->low_level_func.lport_cfg_items.max_login;
rport_pool->rport_pool_completion = NULL;
rport_pool->rport_pool_count = max_login;
spin_lock_init(&rport_pool->rport_free_pool_lock);
INIT_LIST_HEAD(&rport_pool->list_rports_pool); /* free RPort pool */
/* 1. Alloc RPort Pool buffer/resource (memory) */
rport_pool->rport_pool_add =
vmalloc((size_t)(max_login * sizeof(struct unf_rport_s)));
if (!rport_pool->rport_pool_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) allocate RPort(s) resource failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
memset(rport_pool->rport_pool_add, 0,
(max_login * sizeof(struct unf_rport_s)));
/* 2. Alloc R_Port Pool bitmap */
bit_map_cnt = (v_lport->low_level_func.support_max_rport) /
BITS_PER_LONG + 1;
rport_pool->pul_rpi_bitmap = vmalloc((size_t)(bit_map_cnt *
sizeof(unsigned long)));
if (!rport_pool->pul_rpi_bitmap) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) allocate RPort Bitmap failed",
v_lport->port_id);
vfree(rport_pool->rport_pool_add);
rport_pool->rport_pool_add = NULL;
return UNF_RETURN_ERROR;
}
memset(rport_pool->pul_rpi_bitmap, 0,
(bit_map_cnt * sizeof(unsigned long)));
/* 3. Rport resource Management: Add Rports (buffer)
* to Rport Pool List
*/
rport = (struct unf_rport_s *)(rport_pool->rport_pool_add);
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
for (i = 0; i < rport_pool->rport_pool_count; i++) {
spin_lock_init(&rport->rport_state_lock);
list_add_tail(&rport->entry_rport,
&rport_pool->list_rports_pool);
sema_init(&rport->task_sema, 0);
rport++;
}
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag);
return ret;
}
static void unf_free_rport_pool(struct unf_lport_s *v_lport)
{
struct unf_rport_pool_s *rport_pool = NULL;
int wait = UNF_FALSE;
unsigned long flag = 0;
unsigned int remain = 0;
unsigned long long time_out = 0;
unsigned int max_login = 0;
unsigned int i;
struct unf_rport_s *rport;
struct completion rport_pool_completion =
COMPLETION_INITIALIZER(rport_pool_completion);
UNF_CHECK_VALID(0x671, UNF_TRUE, v_lport, return);
UNF_REFERNCE_VAR(remain);
rport_pool = &v_lport->rport_pool;
max_login = v_lport->low_level_func.lport_cfg_items.max_login;
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
if (max_login != rport_pool->rport_pool_count) {
rport_pool->rport_pool_completion = &rport_pool_completion;
remain = max_login - rport_pool->rport_pool_count;
wait = UNF_TRUE;
}
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag);
if (wait == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait for RPort pool completion(%ld), remain(0x%x)",
v_lport->port_id, jiffies, remain);
time_out = wait_for_completion_timeout(
rport_pool->rport_pool_completion,
msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT));
if (time_out == 0)
unf_cmmark_dirty_mem(
v_lport,
UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait for RPort pool completion end(%ld)",
v_lport->port_id, jiffies);
spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag);
rport_pool->rport_pool_completion = NULL;
spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag);
}
rport = (struct unf_rport_s *)(rport_pool->rport_pool_add);
for (i = 0; i < rport_pool->rport_pool_count; i++) {
if (!rport)
break;
rport++;
}
if ((v_lport->dirty_flag &
UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) == 0) {
vfree(rport_pool->rport_pool_add);
rport_pool->rport_pool_add = NULL; /* R_Port pool */
vfree(rport_pool->pul_rpi_bitmap); /* R_Port bitmap */
rport_pool->pul_rpi_bitmap = NULL;
}
UNF_REFERNCE_VAR(remain);
}
static void unf_init_rscn_node(struct unf_port_id_page_s *v_port_id_page)
{
UNF_CHECK_VALID(0x658, UNF_TRUE, v_port_id_page, return);
v_port_id_page->uc_addr_format = 0;
v_port_id_page->uc_event_qualifier = 0;
v_port_id_page->uc_reserved = 0;
v_port_id_page->port_id_area = 0;
v_port_id_page->port_id_domain = 0;
v_port_id_page->port_id_port = 0;
}
struct unf_port_id_page_s *unf_get_free_rscn_node(void *v_rscn_mg)
{
/* Call by Save RSCN Port_ID */
struct unf_rscn_mg_s *rscn_mgr = NULL;
struct unf_port_id_page_s *port_id_node = NULL;
struct list_head *list_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x659, UNF_TRUE, v_rscn_mg, return NULL);
rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg;
spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag);
if (list_empty(&rscn_mgr->list_free_rscn_page)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT,
UNF_WARN,
"[warn]No RSCN node anymore");
spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag);
return NULL;
}
/* Get from list_free_RSCN_page */
list_node = (&rscn_mgr->list_free_rscn_page)->next;
list_del(list_node);
rscn_mgr->free_rscn_count--;
port_id_node = list_entry(list_node, struct unf_port_id_page_s,
list_node_rscn);
unf_init_rscn_node(port_id_node);
spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag);
return port_id_node;
}
static void unf_release_rscn_node(void *v_rscn_mg,
void *v_port_id_node)
{
/* Call by RSCN GID_ACC */
struct unf_rscn_mg_s *rscn_mgr = NULL;
struct unf_port_id_page_s *port_id_node = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x660, UNF_TRUE, v_rscn_mg, return);
UNF_CHECK_VALID(0x661, UNF_TRUE, v_port_id_node, return);
rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg;
port_id_node = (struct unf_port_id_page_s *)v_port_id_node;
/* Back to list_free_RSCN_page */
spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag);
rscn_mgr->free_rscn_count++;
unf_init_rscn_node(port_id_node);
list_add_tail(&port_id_node->list_node_rscn,
&rscn_mgr->list_free_rscn_page);
spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag);
}
static unsigned int unf_init_rscn_pool(struct unf_lport_s *v_lport)
{
struct unf_rscn_mg_s *rscn_mgr = NULL;
struct unf_port_id_page_s *port_id_page = NULL;
unsigned int ret = RETURN_OK;
unsigned int i = 0;
unsigned long flag = 0;
UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
rscn_mgr = &v_lport->disc.rscn_mgr;
/* Get RSCN Pool buffer */
rscn_mgr->rscn_pool_add =
vmalloc(UNF_LIST_RSCN_PAGE_CNT *
sizeof(struct unf_port_id_page_s));
if (!rscn_mgr->rscn_pool_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate RSCN pool failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
memset(rscn_mgr->rscn_pool_add, 0,
sizeof(struct unf_port_id_page_s) * UNF_LIST_RSCN_PAGE_CNT);
spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag);
port_id_page = (struct unf_port_id_page_s *)(rscn_mgr->rscn_pool_add);
for (i = 0; i < UNF_LIST_RSCN_PAGE_CNT; i++) {
/* Add tail to list_free_RSCN_page */
list_add_tail(&port_id_page->list_node_rscn,
&rscn_mgr->list_free_rscn_page);
rscn_mgr->free_rscn_count++;
port_id_page++;
}
spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag);
return ret;
}
static void unf_free_rscn_pool(struct unf_lport_s *v_lport)
{
struct unf_disc_s *disc = NULL;
UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return);
disc = &v_lport->disc;
if (disc->rscn_mgr.rscn_pool_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_INFO,
"[info]Port(0x%x) free RSCN pool",
v_lport->nport_id);
vfree(disc->rscn_mgr.rscn_pool_add);
disc->rscn_mgr.rscn_pool_add = NULL;
}
}
static unsigned int unf_init_rscn_mgr(struct unf_lport_s *v_lport)
{
struct unf_rscn_mg_s *rscn_mgr = NULL;
unsigned int ret = RETURN_OK;
UNF_CHECK_VALID(0x664, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
rscn_mgr = &v_lport->disc.rscn_mgr;
/* free RSCN page list */
INIT_LIST_HEAD(&rscn_mgr->list_free_rscn_page);
/* busy RSCN page list */
INIT_LIST_HEAD(&rscn_mgr->list_using_rscn_page);
spin_lock_init(&rscn_mgr->rscn_id_list_lock);
rscn_mgr->free_rscn_count = 0;
rscn_mgr->pfn_unf_get_free_rscn_node = unf_get_free_rscn_node;
rscn_mgr->pfn_unf_release_rscn_node = unf_release_rscn_node;
ret = unf_init_rscn_pool(v_lport);
return ret;
}
static void unf_destroy_rscn_mgr(struct unf_lport_s *v_lport)
{
struct unf_rscn_mg_s *rscn_mgr = NULL;
UNF_CHECK_VALID(0x665, UNF_TRUE, v_lport, return);
rscn_mgr = &v_lport->disc.rscn_mgr;
rscn_mgr->free_rscn_count = 0;
rscn_mgr->pfn_unf_get_free_rscn_node = NULL;
rscn_mgr->pfn_unf_release_rscn_node = NULL;
unf_free_rscn_pool(v_lport);
}
static unsigned int unf_init_disc_rport_pool(struct unf_lport_s *v_lport)
{
struct unf_disc_rport_mg_s *disc_mgr = NULL;
struct unf_disc_rport_s *disc_rport = NULL;
unsigned int i = 0;
unsigned int max_login = 0;
unsigned long flag = 0;
UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
max_login = v_lport->low_level_func.lport_cfg_items.max_login;
disc_mgr = &v_lport->disc.disc_rport_mgr;
/* Alloc R_Port Disc Pool buffer (address) */
disc_mgr->disc_pool_add = vmalloc(max_login *
sizeof(struct unf_disc_rport_s));
if (!disc_mgr->disc_pool_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate disc RPort pool failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
memset(disc_mgr->disc_pool_add, 0,
(max_login * sizeof(struct unf_disc_rport_s)));
/* Add R_Port to (free) DISC R_Port Pool */
spin_lock_irqsave(&v_lport->disc.rport_busy_pool_lock, flag);
disc_rport = (struct unf_disc_rport_s *)(disc_mgr->disc_pool_add);
for (i = 0; i < max_login; i++) {
/* Add tail to list_disc_Rport_pool */
list_add_tail(&disc_rport->entry_rport,
&disc_mgr->list_disc_rports_pool);
disc_rport++;
}
spin_unlock_irqrestore(&v_lport->disc.rport_busy_pool_lock, flag);
return RETURN_OK;
}
static void unf_free_disc_rport_pool(struct unf_lport_s *v_lport)
{
struct unf_disc_s *disc = NULL;
UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return);
disc = &v_lport->disc;
if (disc->disc_rport_mgr.disc_pool_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_INFO,
"[info]Port(0x%x) free disc RPort pool",
v_lport->port_id);
vfree(disc->disc_rport_mgr.disc_pool_add);
disc->disc_rport_mgr.disc_pool_add = NULL;
}
}
static int unf_discover_port_info(void *v_arg_in)
{
struct unf_disc_gs_event_info *gs_info = NULL;
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
UNF_CHECK_VALID(0x2250, UNF_TRUE, v_arg_in, return UNF_RETURN_ERROR);
gs_info = (struct unf_disc_gs_event_info *)v_arg_in;
lport = (struct unf_lport_s *)gs_info->lport;
rport = (struct unf_rport_s *)gs_info->rport;
switch (gs_info->entype) {
case UNF_DISC_GET_PORT_NAME:
ret = unf_send_gpn_id(lport, rport, gs_info->rport_id);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[warn]Port(0x%x) send GPN_ID failed RPort(0x%x)",
lport->nport_id, gs_info->rport_id);
unf_rcv_gpn_id_rsp_unknown(lport, gs_info->rport_id);
}
break;
case UNF_DISC_GET_FEATURE:
ret = unf_send_gff_id(lport, rport, gs_info->rport_id);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[warn]Port(0x%x) send GFF_ID failed to get RPort(0x%x)'s feature",
lport->port_id, gs_info->rport_id);
unf_rcv_gff_id_rsp_unknown(lport, gs_info->rport_id);
}
break;
case UNF_DISC_GET_NODE_NAME:
ret = unf_send_gnn_id(lport, rport, gs_info->rport_id);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[warn]Port(0x%x) GNN_ID send failed with NPort ID(0x%x)",
lport->port_id, gs_info->rport_id);
/* NOTE: Continue to next stage */
unf_rcv_gnn_id_rsp_unknown(lport, rport,
gs_info->rport_id);
}
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR,
"[err]Send GS packet type(0x%x) is unknown",
gs_info->entype);
}
kfree(gs_info);
return (int)ret;
}
unsigned int unf_get_and_post_disc_event(void *v_lport,
void *v_sns_port,
unsigned int v_nport_id,
enum unf_disc_type_e v_en_type)
{
struct unf_disc_gs_event_info *gs_info = NULL;
unsigned long flag = 0;
struct unf_lport_s *root_lport = NULL;
struct unf_lport_s *lport = NULL;
struct unf_disc_manage_info_s *disc_info = NULL;
UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x654, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
if (lport->link_up == UNF_PORT_LINK_DOWN)
return RETURN_OK;
root_lport = lport->root_lport;
disc_info = &root_lport->disc.disc_thread_info;
if (disc_info->b_thread_exit == UNF_TRUE)
return RETURN_OK;
gs_info = kmalloc(sizeof(struct unf_disc_gs_event_info), GFP_ATOMIC);
if (!gs_info)
return UNF_RETURN_ERROR;
gs_info->entype = v_en_type;
gs_info->lport = v_lport;
gs_info->rport = v_sns_port;
gs_info->rport_id = v_nport_id;
INIT_LIST_HEAD(&gs_info->list_entry);
spin_lock_irqsave(&disc_info->disc_event_list_lock, flag);
list_add_tail(&gs_info->list_entry, &disc_info->list_head);
spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag);
wake_up_process(disc_info->data_thread);
return RETURN_OK;
}
static int unf_disc_event_process(void *v_arg)
{
struct list_head *node = NULL;
struct unf_disc_gs_event_info *gs_info = NULL;
unsigned long flags = 0;
struct unf_disc_s *disc = (struct unf_disc_s *)v_arg;
struct unf_disc_manage_info_s *disc_info = &disc->disc_thread_info;
UNF_REFERNCE_VAR(v_arg);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT,
UNF_INFO,
"Port(0x%x) enter discovery thread.",
disc->lport->port_id);
while (!kthread_should_stop()) {
if (disc_info->b_thread_exit == UNF_TRUE)
break;
spin_lock_irqsave(&disc_info->disc_event_list_lock, flags);
if ((list_empty(&disc_info->list_head) == UNF_TRUE) ||
(atomic_read(&disc_info->disc_contrl_size) == 0)) {
spin_unlock_irqrestore(&disc_info->disc_event_list_lock,
flags);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((long)msecs_to_jiffies(1000));
} else {
node = (&disc_info->list_head)->next;
list_del_init(node);
gs_info = list_entry(node,
struct unf_disc_gs_event_info,
list_entry);
spin_unlock_irqrestore(&disc_info->disc_event_list_lock,
flags);
unf_discover_port_info(gs_info);
}
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT,
UNF_MAJOR,
"Port(0x%x) discovery thread over.", disc->lport->port_id);
return RETURN_OK;
}
void unf_flush_disc_event(void *v_disc, void *v_vport)
{
struct unf_disc_s *disc = (struct unf_disc_s *)v_disc;
struct unf_disc_manage_info_s *disc_info = NULL;
struct list_head *list = NULL;
struct list_head *list_tmp = NULL;
struct unf_disc_gs_event_info *gs_info = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x2249, UNF_TRUE, v_disc, return);
disc_info = &disc->disc_thread_info;
spin_lock_irqsave(&disc_info->disc_event_list_lock, flag);
list_for_each_safe(list, list_tmp, &disc_info->list_head) {
gs_info = list_entry(list, struct unf_disc_gs_event_info,
list_entry);
if (!v_vport || gs_info->lport == v_vport) {
list_del_init(&gs_info->list_entry);
kfree(gs_info);
}
}
if (!v_vport)
atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM);
spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag);
}
void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd)
{
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x2249, UNF_TRUE, v_lport, return);
lport = (struct unf_lport_s *)v_lport;
lport = lport->root_lport;
UNF_CHECK_VALID(0x2249, UNF_TRUE, lport, return);
if (atomic_read(&lport->disc.disc_thread_info.disc_contrl_size) ==
UNF_MAX_GS_SEND_NUM)
return;
if (v_cmnd == NS_GPN_ID || v_cmnd == NS_GNN_ID || v_cmnd == NS_GFF_ID)
atomic_inc(&lport->disc.disc_thread_info.disc_contrl_size);
}
static void unf_destroy_disc_thread(void *v_disc)
{
struct unf_disc_manage_info_s *disc_info = NULL;
struct unf_disc_s *disc = (struct unf_disc_s *)v_disc;
UNF_CHECK_VALID(0x2249, UNF_TRUE, disc, return);
disc_info = &disc->disc_thread_info;
disc_info->b_thread_exit = UNF_TRUE;
unf_flush_disc_event(disc, NULL);
wake_up_process(disc_info->data_thread);
kthread_stop(disc_info->data_thread);
disc_info->data_thread = NULL;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) destroy discovery thread succeed.",
disc->lport->port_id);
}
static unsigned int unf_create_disc_thread(void *v_disc)
{
struct unf_disc_manage_info_s *disc_info = NULL;
struct unf_disc_s *disc = (struct unf_disc_s *)v_disc;
UNF_CHECK_VALID(0x2250, UNF_TRUE, disc, return UNF_RETURN_ERROR);
/* If the thread cannot be found, apply for a new thread. */
disc_info = &disc->disc_thread_info;
memset(disc_info, 0, sizeof(struct unf_disc_manage_info_s));
INIT_LIST_HEAD(&disc_info->list_head);
spin_lock_init(&disc_info->disc_event_list_lock);
atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM);
disc_info->b_thread_exit = UNF_FALSE;
disc_info->data_thread =
kthread_create(unf_disc_event_process, disc,
"%x_DiscT", disc->lport->port_id);
if (IS_ERR(disc_info->data_thread) || !disc_info->data_thread) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) creat discovery thread(0x%p) unsuccessful.",
disc->lport->port_id, disc_info->data_thread);
return UNF_RETURN_ERROR;
}
wake_up_process(disc_info->data_thread);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Port(0x%x) creat discovery thread succeed.",
disc->lport->port_id);
return RETURN_OK;
}
static void unf_disc_ref_cnt_dec(struct unf_disc_s *v_disc)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x669, UNF_TRUE, v_disc, return);
spin_lock_irqsave(&v_disc->rport_busy_pool_lock, flags);
if (atomic_dec_and_test(&v_disc->disc_ref_cnt)) {
if (v_disc->disc_completion)
complete(v_disc->disc_completion);
}
spin_unlock_irqrestore(&v_disc->rport_busy_pool_lock, flags);
}
static void unf_lport_disc_timeout(struct work_struct *v_work)
{
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_disc_s *disc = NULL;
enum unf_disc_state_e en_state = UNF_DISC_ST_END;
unsigned long flag = 0;
UNF_CHECK_VALID(0x675, UNF_TRUE, v_work, return);
disc = container_of(v_work, struct unf_disc_s, disc_work.work);
if (!disc) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Get discover pointer failed");
return;
}
lport = disc->lport;
if (!lport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Find Port by discovery work failed");
unf_disc_ref_cnt_dec(disc);
return;
}
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
en_state = disc->en_states;
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
/* 0xfffffc */
rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV);
if (!rport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) find fabric RPort failed",
lport->port_id);
unf_disc_ref_cnt_dec(disc);
return;
}
switch (en_state) {
case UNF_DISC_ST_START:
break;
case UNF_DISC_ST_GIDPT_WAIT:
(void)unf_send_gid_pt(lport, rport);
break;
case UNF_DISC_ST_GIDFT_WAIT:
(void)unf_send_gid_ft(lport, rport);
break;
case UNF_DISC_ST_END:
break;
default:
break;
}
unf_disc_ref_cnt_dec(disc);
}
unsigned int unf_init_disc_mgr(struct unf_lport_s *v_lport)
{
struct unf_disc_s *disc = NULL;
unsigned int ret = RETURN_OK;
UNF_CHECK_VALID(0x666, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
disc = &v_lport->disc;
disc->max_retry_count = UNF_DISC_RETRY_TIMES;
disc->retry_count = 0;
disc->disc_flag = UNF_DISC_NONE;
INIT_LIST_HEAD(&disc->list_busy_rports); /* busy RPort pool list */
/* delete RPort pool list */
INIT_LIST_HEAD(&disc->list_delete_rports);
/* destroy RPort pool list */
INIT_LIST_HEAD(&disc->list_destroy_rports);
spin_lock_init(&disc->rport_busy_pool_lock);
disc->disc_rport_mgr.disc_pool_add = NULL;
/* free disc RPort pool */
INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_pool);
/* busy disc RPort pool */
INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rport_busy);
disc->disc_completion = NULL;
disc->lport = v_lport;
INIT_DELAYED_WORK(&disc->disc_work, unf_lport_disc_timeout);
disc->unf_disc_temp.pfn_unf_disc_start = unf_disc_start;
disc->unf_disc_temp.pfn_unf_disc_stop = unf_disc_stop;
disc->unf_disc_temp.pfn_unf_disc_callback = unf_disc_callback;
atomic_set(&disc->disc_ref_cnt, 0);
/* Init RSCN Manager */
ret = unf_init_rscn_mgr(v_lport);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
if (v_lport != v_lport->root_lport)
return ret;
ret = unf_create_disc_thread(disc);
if (ret != RETURN_OK) {
unf_destroy_rscn_mgr(v_lport);
return UNF_RETURN_ERROR;
}
/* Init R_Port free Pool */
ret = unf_init_rport_pool(v_lport);
if (ret != RETURN_OK) {
unf_destroy_disc_thread(disc);
unf_destroy_rscn_mgr(v_lport);
return UNF_RETURN_ERROR;
}
/* Init R_Port free disc Pool */
ret = unf_init_disc_rport_pool(v_lport);
if (ret != RETURN_OK) {
unf_destroy_disc_thread(disc);
unf_free_rport_pool(v_lport);
unf_destroy_rscn_mgr(v_lport);
return UNF_RETURN_ERROR;
}
return ret;
}
static void unf_wait_disc_complete(struct unf_lport_s *v_lport)
{
struct unf_disc_s *disc = NULL;
int wait = UNF_FALSE;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned long long time_out = 0;
struct completion disc_completion =
COMPLETION_INITIALIZER(disc_completion);
disc = &v_lport->disc;
UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, &disc->disc_work,
"Disc_work");
if (ret == RETURN_OK)
unf_disc_ref_cnt_dec(disc);
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
if (atomic_read(&disc->disc_ref_cnt) != 0) {
disc->disc_completion = &disc_completion;
wait = UNF_TRUE;
}
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
if (wait == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait for discover completion(0x%lx)",
v_lport->port_id, jiffies);
time_out = wait_for_completion_timeout(
disc->disc_completion,
msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT));
if (time_out == 0)
unf_cmmark_dirty_mem(v_lport,
UNF_LPORT_DIRTY_FLAG_DISC_DIRTY);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait for discover completion end(0x%lx)",
v_lport->port_id, jiffies);
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
disc->disc_completion = NULL;
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
}
}
void unf_disc_mgr_destroy(void *v_lport)
{
struct unf_disc_s *disc = NULL;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x672, UNF_TRUE, v_lport, return);
lport = (struct unf_lport_s *)v_lport;
disc = &lport->disc;
disc->retry_count = 0;
disc->unf_disc_temp.pfn_unf_disc_start = NULL;
disc->unf_disc_temp.pfn_unf_disc_stop = NULL;
disc->unf_disc_temp.pfn_unf_disc_callback = NULL;
unf_free_disc_rport_pool(lport);
unf_destroy_rscn_mgr(lport);
unf_wait_disc_complete(lport);
if (lport != lport->root_lport)
return;
unf_destroy_disc_thread(disc);
unf_free_rport_pool(lport);
lport->destroy_step = UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR;
}
void unf_disc_error_recovery(void *v_lport)
{
struct unf_rport_s *rport = NULL;
struct unf_disc_s *disc = NULL;
unsigned long delay = 0;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x673, UNF_TRUE, v_lport, return);
lport = (struct unf_lport_s *)v_lport;
disc = &lport->disc;
rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV);
if (!rport) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) find RPort failed",
lport->port_id);
return;
}
spin_lock_irqsave(&disc->rport_busy_pool_lock, flag);
/* Delay work is pending */
if (delayed_work_pending(&disc->disc_work)) {
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"[info]Port(0x%x) disc_work is running and do nothing",
lport->port_id);
return;
}
/* Continue to retry */
if (disc->retry_count < disc->max_retry_count) {
disc->retry_count++;
delay = (unsigned long)lport->ed_tov;
if (queue_delayed_work(unf_work_queue, &disc->disc_work,
(unsigned long)msecs_to_jiffies(
(unsigned int)delay))) {
atomic_inc(&disc->disc_ref_cnt);
}
spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag);
} else {
/* Go to next stage */
if (disc->en_states == UNF_DISC_ST_GIDPT_WAIT) {
/* GID_PT_WAIT --->>> Send GID_FT */
unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT);
spin_unlock_irqrestore(&disc->rport_busy_pool_lock,
flag);
while ((ret != RETURN_OK) &&
(disc->retry_count < disc->max_retry_count)) {
ret = unf_send_gid_ft(lport, rport);
disc->retry_count++;
}
} else if (disc->en_states == UNF_DISC_ST_GIDFT_WAIT) {
/* GID_FT_WAIT --->>> Send LOGO */
unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT);
spin_unlock_irqrestore(&disc->rport_busy_pool_lock,
flag);
} else {
spin_unlock_irqrestore(&disc->rport_busy_pool_lock,
flag);
}
}
}
enum unf_disc_state_e unf_disc_stat_start(enum unf_disc_state_e v_old_state,
enum unf_disc_event_e v_en_event)
{
enum unf_disc_state_e en_next_state = UNF_DISC_ST_END;
if (v_en_event == UNF_EVENT_DISC_NORMAL_ENTER)
en_next_state = UNF_DISC_ST_GIDPT_WAIT;
else
en_next_state = v_old_state;
return en_next_state;
}
enum unf_disc_state_e unf_disc_stat_gid_pt_wait(
enum unf_disc_state_e v_old_state,
enum unf_disc_event_e v_en_event)
{
enum unf_disc_state_e en_next_state = UNF_DISC_ST_END;
switch (v_en_event) {
case UNF_EVENT_DISC_FAILED:
en_next_state = UNF_DISC_ST_GIDPT_WAIT;
break;
case UNF_EVENT_DISC_RETRY_TIMEOUT:
en_next_state = UNF_DISC_ST_GIDFT_WAIT;
break;
case UNF_EVENT_DISC_SUCCESS:
en_next_state = UNF_DISC_ST_END;
break;
case UNF_EVENT_DISC_LINKDOWN:
en_next_state = UNF_DISC_ST_START;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
enum unf_disc_state_e unf_disc_stat_gid_ft_wait(
enum unf_disc_state_e v_old_state,
enum unf_disc_event_e v_en_event)
{
enum unf_disc_state_e en_next_state = UNF_DISC_ST_END;
switch (v_en_event) {
case UNF_EVENT_DISC_FAILED:
en_next_state = UNF_DISC_ST_GIDFT_WAIT;
break;
case UNF_EVENT_DISC_RETRY_TIMEOUT:
en_next_state = UNF_DISC_ST_END;
break;
case UNF_EVENT_DISC_SUCCESS:
en_next_state = UNF_DISC_ST_END;
break;
case UNF_EVENT_DISC_LINKDOWN:
en_next_state = UNF_DISC_ST_START;
break;
default:
en_next_state = v_old_state;
break;
}
return en_next_state;
}
enum unf_disc_state_e unf_disc_stat_end(enum unf_disc_state_e v_old_state,
enum unf_disc_event_e v_en_event)
{
enum unf_disc_state_e en_next_state = UNF_DISC_ST_END;
if (v_en_event == UNF_EVENT_DISC_LINKDOWN)
en_next_state = UNF_DISC_ST_START;
else
en_next_state = v_old_state;
return en_next_state;
}
void unf_disc_state_ma(struct unf_lport_s *v_lport,
enum unf_disc_event_e v_en_event)
{
struct unf_disc_s *disc = NULL;
enum unf_disc_state_e en_old_state = UNF_DISC_ST_START;
enum unf_disc_state_e en_next_state = UNF_DISC_ST_START;
UNF_CHECK_VALID(0x674, UNF_TRUE, v_lport, return);
disc = &v_lport->disc;
en_old_state = disc->en_states;
switch (disc->en_states) {
case UNF_DISC_ST_START:
en_next_state = unf_disc_stat_start(en_old_state, v_en_event);
break;
case UNF_DISC_ST_GIDPT_WAIT:
en_next_state = unf_disc_stat_gid_pt_wait(en_old_state,
v_en_event);
break;
case UNF_DISC_ST_GIDFT_WAIT:
en_next_state = unf_disc_stat_gid_ft_wait(en_old_state,
v_en_event);
break;
case UNF_DISC_ST_END:
en_next_state = unf_disc_stat_end(en_old_state, v_en_event);
break;
default:
en_next_state = en_old_state;
break;
}
unf_set_disc_state(disc, en_next_state);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_DISC_H__
#define __UNF_DISC_H__
#define UNF_DISC_RETRY_TIMES 3
#define UNF_DISC_NONE 0
#define UNF_DISC_FABRIC 1
#define UNF_DISC_LOOP 2
enum unf_disc_state_e {
UNF_DISC_ST_START = 0x3000,
UNF_DISC_ST_GIDPT_WAIT,
UNF_DISC_ST_GIDFT_WAIT,
UNF_DISC_ST_END
};
enum unf_disc_event_e {
UNF_EVENT_DISC_NORMAL_ENTER = 0x8000,
UNF_EVENT_DISC_FAILED = 0x8001,
UNF_EVENT_DISC_SUCCESS = 0x8002,
UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003,
UNF_EVENT_DISC_LINKDOWN = 0x8004
};
enum unf_disc_type_e {
UNF_DISC_GET_PORT_NAME = 0,
UNF_DISC_GET_NODE_NAME,
UNF_DISC_GET_FEATURE
};
struct unf_disc_gs_event_info {
void *lport;
void *rport;
unsigned int rport_id;
enum unf_disc_type_e entype;
struct list_head list_entry;
};
unsigned int unf_get_and_post_disc_event(void *v_lport,
void *v_sns_port,
unsigned int v_nport_id,
enum unf_disc_type_e v_en_type);
void unf_flush_disc_event(void *v_disc, void *v_vport);
void unf_disc_error_recovery(void *v_lport);
void unf_disc_mgr_destroy(void *v_lport);
void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_event.h"
#include "unf_lport.h"
struct unf_event_list fc_event_list;
struct unf_global_event_queue global_event_queue;
/* Max global event node */
#define UNF_MAX_GLOBAL_ENENT_NODE 24
unsigned int unf_init_event_msg(struct unf_lport_s *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned int i;
unsigned long flags = 0;
UNF_CHECK_VALID(0x770, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
event_mgr = &v_lport->event_mgr;
/* Get and Initial Event Node resource */
event_mgr->pmem_add =
vmalloc((size_t)event_mgr->free_event_count *
sizeof(struct unf_cm_event_report));
if (!event_mgr->pmem_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate event manager failed",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
memset(event_mgr->pmem_add, 0,
((size_t)event_mgr->free_event_count *
sizeof(struct unf_cm_event_report)));
event_node = (struct unf_cm_event_report *)(event_mgr->pmem_add);
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
for (i = 0; i < event_mgr->free_event_count; i++) {
INIT_LIST_HEAD(&event_node->list_entry);
list_add_tail(&event_node->list_entry,
&event_mgr->list_free_event);
event_node++;
}
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return RETURN_OK;
}
static void unf_del_eventcenter(struct unf_lport_s *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
UNF_CHECK_VALID(0x771, UNF_TRUE, v_lport, return);
event_mgr = &v_lport->event_mgr;
event_mgr->pfn_unf_get_free_event = NULL;
event_mgr->pfn_unf_release_event = NULL;
event_mgr->pfn_unf_post_event = NULL;
}
void unf_init_event_node(struct unf_cm_event_report *v_event_node)
{
UNF_CHECK_VALID(0x776, UNF_TRUE, v_event_node, return);
v_event_node->event = UNF_EVENT_TYPE_REQUIRE;
v_event_node->event_asy_flag = UNF_EVENT_ASYN;
v_event_node->delay_times = 0;
v_event_node->para_in = NULL;
v_event_node->para_out = NULL;
v_event_node->result = 0;
v_event_node->lport = NULL;
v_event_node->pfn_unf_event_task = NULL;
v_event_node->pfn_unf_event_recovery_strategy = NULL;
v_event_node->pfn_unf_event_alarm_strategy = NULL;
}
struct unf_cm_event_report *unf_get_free_event_node(void *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_cm_event_report *event_node = NULL;
struct list_head *list_node = NULL;
struct unf_lport_s *root_lport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x777, UNF_TRUE, v_lport, return NULL);
root_lport = (struct unf_lport_s *)v_lport;
root_lport = root_lport->root_lport;
if (unlikely(atomic_read(&root_lport->port_no_operater_flag) ==
UNF_LPORT_NOP))
return NULL;
/* Get EventMgr from Lport */
event_mgr = &root_lport->event_mgr;
/* Get free node free pool */
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
if (list_empty(&event_mgr->list_free_event)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) have no event node anymore",
root_lport->port_id);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return NULL;
}
list_node = (&event_mgr->list_free_event)->next;
list_del(list_node);
event_mgr->free_event_count--;
event_node = list_entry(list_node, struct unf_cm_event_report,
list_entry);
/* Initial event node */
unf_init_event_node(event_node);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return event_node;
}
void unf_check_event_mgr_status(struct unf_event_mgr *v_event_mgr)
{
unsigned long flag = 0;
UNF_CHECK_VALID(0x773, UNF_TRUE, v_event_mgr, return);
spin_lock_irqsave(&v_event_mgr->port_event_lock, flag);
if ((v_event_mgr->emg_completion) &&
(v_event_mgr->free_event_count == UNF_MAX_EVENT_NODE)) {
complete(v_event_mgr->emg_completion);
}
spin_unlock_irqrestore(&v_event_mgr->port_event_lock, flag);
}
void unf_release_event(void *v_lport, void *v_event_node)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_lport_s *root_lport = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x778, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x779, UNF_TRUE, v_event_node, return);
event_node = (struct unf_cm_event_report *)v_event_node;
root_lport = (struct unf_lport_s *)v_lport;
root_lport = root_lport->root_lport;
event_mgr = &root_lport->event_mgr;
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
event_mgr->free_event_count++;
unf_init_event_node(event_node);
list_add_tail(&event_node->list_entry, &event_mgr->list_free_event);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
unf_check_event_mgr_status(event_mgr);
}
void unf_post_event(void *v_lport, void *v_event_node)
{
struct unf_cm_event_report *event_node = NULL;
struct unf_chip_manage_info_s *card_thread_info = NULL;
struct unf_lport_s *root_lport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x780, UNF_TRUE, v_event_node, return);
event_node = (struct unf_cm_event_report *)v_event_node;
UNF_REFERNCE_VAR(v_lport);
/* If null, post to global event center */
if (!v_lport) {
spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags);
fc_event_list.list_num++;
list_add_tail(&event_node->list_entry,
&fc_event_list.list_head);
spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock,
flags);
wake_up_process(event_thread);
} else {
root_lport = (struct unf_lport_s *)v_lport;
root_lport = root_lport->root_lport;
card_thread_info = root_lport->chip_info;
/* Post to global event center */
if (!card_thread_info) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT,
UNF_WARN,
"[warn]Port(0x%x) has strange event with type(0x%x)",
root_lport->nport_id, event_node->event);
spin_lock_irqsave(&fc_event_list.fc_eventlist_lock,
flags);
fc_event_list.list_num++;
list_add_tail(&event_node->list_entry,
&fc_event_list.list_head);
spin_unlock_irqrestore(
&fc_event_list.fc_eventlist_lock,
flags);
wake_up_process(event_thread);
} else {
spin_lock_irqsave(
&card_thread_info->chip_event_list_lock,
flags);
card_thread_info->list_num++;
list_add_tail(&event_node->list_entry,
&card_thread_info->list_head);
spin_unlock_irqrestore(
&card_thread_info->chip_event_list_lock,
flags);
wake_up_process(card_thread_info->data_thread);
}
}
}
unsigned int unf_init_event_center(void *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
unsigned int ret = RETURN_OK;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x772, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
/* Initial Disc manager */
event_mgr = &lport->event_mgr;
event_mgr->free_event_count = UNF_MAX_EVENT_NODE;
event_mgr->pfn_unf_get_free_event = unf_get_free_event_node;
event_mgr->pfn_unf_release_event = unf_release_event;
event_mgr->pfn_unf_post_event = unf_post_event;
INIT_LIST_HEAD(&event_mgr->list_free_event);
spin_lock_init(&event_mgr->port_event_lock);
event_mgr->emg_completion = NULL;
ret = unf_init_event_msg(lport);
return ret;
}
void unf_wait_event_mgr_complete(struct unf_event_mgr *v_event_mgr)
{
struct unf_event_mgr *event_mgr = NULL;
int wait = UNF_FALSE;
unsigned long mg_flag = 0;
struct completion fc_event_completion =
COMPLETION_INITIALIZER(fc_event_completion);
UNF_CHECK_VALID(0x774, UNF_TRUE, v_event_mgr, return);
event_mgr = v_event_mgr;
spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag);
if (event_mgr->free_event_count != UNF_MAX_EVENT_NODE) {
event_mgr->emg_completion = &fc_event_completion;
wait = UNF_TRUE;
}
spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag);
if (wait == UNF_TRUE)
wait_for_completion(event_mgr->emg_completion);
spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag);
event_mgr->emg_completion = NULL;
spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag);
}
unsigned int unf_event_center_destroy(void *v_lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct list_head *list = NULL;
struct list_head *list_tmp = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned int ret = RETURN_OK;
unsigned long flag = 0;
unsigned long list_lock_flag = 0;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x775, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
event_mgr = &lport->event_mgr;
spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, list_lock_flag);
if (!list_empty(&fc_event_list.list_head)) {
list_for_each_safe(list, list_tmp, &fc_event_list.list_head) {
event_node = list_entry(list,
struct unf_cm_event_report,
list_entry);
if (lport == event_node->lport) {
list_del_init(&event_node->list_entry);
if (event_node->event_asy_flag ==
UNF_EVENT_SYN) {
event_node->result = UNF_RETURN_ERROR;
complete(&event_node->event_comp);
}
spin_lock_irqsave(&event_mgr->port_event_lock,
flag);
event_mgr->free_event_count++;
list_add_tail(&event_node->list_entry,
&event_mgr->list_free_event);
spin_unlock_irqrestore(
&event_mgr->port_event_lock, flag);
}
}
}
spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock,
list_lock_flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait event", lport->port_id);
unf_wait_event_mgr_complete(event_mgr);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait event process end", lport->port_id);
unf_del_eventcenter(lport);
vfree(event_mgr->pmem_add);
event_mgr->pmem_add = NULL;
lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER;
return ret;
}
static void unf_procee_asyn_event(struct unf_cm_event_report *v_event_node)
{
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = (struct unf_lport_s *)v_event_node->lport;
UNF_CHECK_VALID(0x782, UNF_TRUE, lport, return);
if (v_event_node->pfn_unf_event_task)
ret = (unsigned int)
v_event_node->pfn_unf_event_task(v_event_node->para_in,
v_event_node->para_out);
if (lport->event_mgr.pfn_unf_release_event)
lport->event_mgr.pfn_unf_release_event(lport, v_event_node);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN,
"[warn]Port(0x%x) handle event(0x%x) failed",
lport->port_id, v_event_node->event);
}
UNF_REFERNCE_VAR(ret);
}
void unf_release_global_event(void *v_event_node)
{
unsigned long flag = 0;
struct unf_cm_event_report *event_node = NULL;
UNF_CHECK_VALID(0x784, UNF_TRUE, v_event_node, return);
event_node = (struct unf_cm_event_report *)v_event_node;
unf_init_event_node(event_node);
spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag);
global_event_queue.list_number++;
list_add_tail(&event_node->list_entry,
&global_event_queue.global_eventlist);
spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock,
flag);
}
void unf_handle_event(struct unf_cm_event_report *v_event_node)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int event = 0;
unsigned int event_asy_flag = UNF_EVENT_ASYN;
UNF_CHECK_VALID(0x781, UNF_TRUE, v_event_node, return);
UNF_REFERNCE_VAR(ret);
UNF_REFERNCE_VAR(event);
event = v_event_node->event;
event_asy_flag = v_event_node->event_asy_flag;
switch (event_asy_flag) {
case UNF_EVENT_SYN: /* synchronous event node */
case UNF_GLOBAL_EVENT_SYN:
if (v_event_node->pfn_unf_event_task) {
ret = (unsigned int)v_event_node->pfn_unf_event_task(
v_event_node->para_in,
v_event_node->para_out);
}
v_event_node->result = ret;
complete(&v_event_node->event_comp);
break;
case UNF_EVENT_ASYN: /* asynchronous event node */
unf_procee_asyn_event(v_event_node);
break;
case UNF_GLOBAL_EVENT_ASYN:
if (v_event_node->pfn_unf_event_task) {
ret = (unsigned int)v_event_node->pfn_unf_event_task(
v_event_node->para_in,
v_event_node->para_out);
}
unf_release_global_event(v_event_node);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN,
UNF_LOG_EVENT, UNF_WARN,
"[warn]handle global event(0x%x) failed",
event);
}
break;
default:
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN,
"[warn]Unknown event(0x%x)", event);
break;
}
}
unsigned int unf_init_global_event_msg(void)
{
struct unf_cm_event_report *event_node = NULL;
unsigned int ret = RETURN_OK;
unsigned int i = 0;
unsigned long flag = 0;
INIT_LIST_HEAD(&global_event_queue.global_eventlist);
spin_lock_init(&global_event_queue.global_eventlist_lock);
global_event_queue.list_number = 0;
global_event_queue.global_event_add =
vmalloc(UNF_MAX_GLOBAL_ENENT_NODE *
sizeof(struct unf_cm_event_report));
if (!global_event_queue.global_event_add) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Can't allocate global event queue");
return UNF_RETURN_ERROR;
}
memset(global_event_queue.global_event_add, 0,
(sizeof(struct unf_cm_event_report) *
UNF_MAX_GLOBAL_ENENT_NODE));
event_node = (struct unf_cm_event_report *)
(global_event_queue.global_event_add);
spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag);
for (i = 0; i < UNF_MAX_GLOBAL_ENENT_NODE; i++) {
INIT_LIST_HEAD(&event_node->list_entry);
list_add_tail(&event_node->list_entry,
&global_event_queue.global_eventlist);
global_event_queue.list_number++;
event_node++;
}
spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock,
flag);
return ret;
}
void unf_destroy_global_event_msg(void)
{
if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_CRITICAL,
"[warn]Global event release not complete with remain nodes(0x%x)",
global_event_queue.list_number);
}
vfree(global_event_queue.global_event_add);
}
unsigned int unf_schedule_global_event(
void *v_para,
unsigned int v_event_asy_flag,
int (*pfn_unf_event_task)(void *v_argin, void *v_argout))
{
struct list_head *list_node = NULL;
struct unf_cm_event_report *event_node = NULL;
unsigned long flag = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x783, UNF_TRUE, pfn_unf_event_task,
return UNF_RETURN_ERROR);
if ((v_event_asy_flag != UNF_GLOBAL_EVENT_ASYN) &&
(v_event_asy_flag != UNF_GLOBAL_EVENT_SYN)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Event async flag(0x%x) abnormity",
v_event_asy_flag);
return UNF_RETURN_ERROR;
}
spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag);
if (list_empty(&global_event_queue.global_eventlist)) {
spin_unlock_irqrestore(
&global_event_queue.global_eventlist_lock, flag);
return UNF_RETURN_ERROR;
}
list_node = (&global_event_queue.global_eventlist)->next;
list_del_init(list_node);
global_event_queue.list_number--;
event_node = list_entry(list_node, struct unf_cm_event_report,
list_entry);
spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock,
flag);
/* Initial global event */
unf_init_event_node(event_node);
init_completion(&event_node->event_comp);
event_node->event_asy_flag = v_event_asy_flag;
event_node->pfn_unf_event_task = pfn_unf_event_task;
event_node->para_in = (void *)v_para;
event_node->para_out = NULL;
unf_post_event(NULL, event_node);
if (v_event_asy_flag == UNF_GLOBAL_EVENT_SYN) {
/* must wait for complete */
wait_for_completion(&event_node->event_comp);
ret = event_node->result;
unf_release_global_event(event_node);
} else {
ret = RETURN_OK;
}
return ret;
}
struct unf_cm_event_report *unf_get_one_event_node(void *v_lport)
{
struct unf_lport_s *lport = (struct unf_lport_s *)v_lport;
UNF_CHECK_VALID(0x785, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x786, UNF_TRUE,
lport->event_mgr.pfn_unf_get_free_event,
return NULL);
return lport->event_mgr.pfn_unf_get_free_event((void *)lport);
}
void unf_post_one_event_node(void *v_lport,
struct unf_cm_event_report *v_event)
{
struct unf_lport_s *lport = (struct unf_lport_s *)v_lport;
UNF_CHECK_VALID(0x787, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x788, UNF_TRUE, v_event, return);
UNF_CHECK_VALID(0x789, UNF_TRUE, lport->event_mgr.pfn_unf_post_event,
return);
UNF_CHECK_VALID(0x790, UNF_TRUE, v_event, return);
lport->event_mgr.pfn_unf_post_event((void *)lport, v_event);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_EVENT_H__
#define __UNF_EVENT_H__
#include "hifc_knl_adp.h"
enum unf_poll_flag {
UNF_POLL_CHIPERROR_FLAG = 0, /* CHIP ERROR POLL */
UNF_POLL_ERROR_CODE, /* CODE ERROR POLL */
UNF_POLL_SFP_FLAG, /* SFP POLL */
UNF_POLL_BUTT
};
#define UNF_MAX_EVENT_NODE 256
enum unf_event_type {
UNF_EVENT_TYPE_ALARM = 0, /* Alarm */
UNF_EVENT_TYPE_REQUIRE, /* Require */
UNF_EVENT_TYPE_RECOVERY, /* Recovery */
UNF_EVENT_TYPE_BUTT
};
struct unf_cm_event_report {
/* event type */
unsigned int event;
/* ASY flag */
unsigned int event_asy_flag;
/* Delay times,must be async event */
unsigned int delay_times;
struct list_head list_entry;
void *lport;
/* parameter */
void *para_in;
void *para_out;
unsigned int result;
/* recovery strategy */
int (*pfn_unf_event_task)(void *v_argin, void *v_argout);
/* recovery strategy */
int (*pfn_unf_event_recovery_strategy)(void *);
/* alarm strategy */
int (*pfn_unf_event_alarm_strategy)(void *);
struct completion event_comp;
};
struct unf_event_mgr {
spinlock_t port_event_lock;
unsigned int free_event_count;
struct list_head list_free_event;
struct completion *emg_completion;
void *pmem_add;
struct unf_cm_event_report *(*pfn_unf_get_free_event)(void *v_lport);
void (*pfn_unf_release_event)(void *v_lport, void *v_event_node);
void (*pfn_unf_post_event)(void *v_lport, void *v_event_node);
};
struct unf_global_event_queue {
void *global_event_add;
unsigned int list_number;
struct list_head global_eventlist;
spinlock_t global_eventlist_lock;
};
struct unf_event_list {
struct list_head list_head;
spinlock_t fc_eventlist_lock;
unsigned int list_num; /* list node number */
};
void unf_handle_event(struct unf_cm_event_report *v_event_node);
unsigned int unf_init_global_event_msg(void);
void unf_destroy_global_event_msg(void);
unsigned int unf_schedule_global_event(
void *v_para,
unsigned int v_event_asy_flag,
int (*pfn_unf_event_task)(void *v_argin, void *v_argout));
struct unf_cm_event_report *unf_get_one_event_node(void *v_lport);
void unf_post_one_event_node(void *v_lport,
struct unf_cm_event_report *v_event);
unsigned int unf_event_center_destroy(void *v_lport);
unsigned int unf_init_event_center(void *v_lport);
extern struct task_struct *event_thread;
extern struct unf_global_event_queue global_event_queue;
extern struct unf_event_list fc_event_list;
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_common.h"
#include "unf_exchg.h"
#include "unf_rport.h"
#include "unf_service.h"
#include "unf_io.h"
#define UNF_DEL_XCHG_TIMER_SAFE(v_xchg) \
do { \
if (cancel_delayed_work(&((v_xchg)->timeout_work))) { \
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, \
UNF_MAJOR, \
"Exchange(0x%p) is free, but timer is pending.", \
v_xchg); \
} else { \
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, \
UNF_CRITICAL, \
"Exchange(0x%p) is free, but timer is running.", \
v_xchg); \
} \
} while (0)
#define UNF_XCHG_IS_ELS_REPLY(v_xchg) \
((((v_xchg)->cmnd_code & 0x0ffff) == ELS_ACC) || \
(((v_xchg)->cmnd_code & 0x0ffff) == ELS_RJT))
static struct unf_ioflow_id_s io_stage[] = {
{ "XCHG_ALLOC" },
{ "TGT_RECEIVE_ABTS" },
{ "TGT_ABTS_DONE" },
{ "TGT_IO_SRR" },
{ "SFS_RESPONSE" },
{ "SFS_TIMEOUT" },
{ "INI_SEND_CMND" },
{ "INI_RESPONSE_DONE" },
{ "INI_EH_ABORT" },
{ "INI_EH_DEVICE_RESET" },
{ "INI_EH_BLS_DONE" },
{ "INI_IO_TIMEOUT" },
{ "INI_REQ_TIMEOUT" },
{ "XCHG_CANCEL_TIMER" },
{ "XCHG_FREE_XCHG" },
{ "SEND_ELS" },
{ "IO_XCHG_WAIT" },
};
void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport)
{
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long hot_pool_lock_flags = 0;
unsigned long xchg_flag = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned int i = 0;
UNF_CHECK_VALID(0x850, UNF_TRUE, v_lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
if (!xchg_mgr) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_EVENT, UNF_MINOR,
"Can't find LPort(0x%x) MgrIdx %u exchange manager.",
v_lport->port_id, i);
continue;
}
spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
list_for_each_safe(node, next_node,
&xchg_mgr->hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag);
if (INI_IO_STATE_UPTASK & xchg->io_state &&
(atomic_read(&xchg->ref_cnt) > 0)) {
UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS);
up(&xchg->task_sema);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_EVENT, UNF_MINOR,
"Wake up task command exchange(0x%p), Hot Pool Tag(0x%x).",
xchg, xchg->hot_pool_tag);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flag);
}
spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
}
void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid, unsigned int v_did,
unsigned int v_extra_io_state)
{
/*
* for target session: set ABORT
* 1. R_Port remove
* 2. Send PLOGI_ACC callback
* 3. RCVD PLOGI
* 4. RCVD LOGO
*/
UNF_CHECK_VALID(0x852, UNF_TRUE, v_lport, return);
if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort) {
/* The SID/DID of the Xchg is in reverse direction in
* different phases. Therefore, the reverse direction
* needs to be considered
*/
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort(
v_lport,
v_rport,
v_sid, v_did,
v_extra_io_state);
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort(
v_lport, v_rport,
v_did, v_sid,
v_extra_io_state);
}
}
void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid, unsigned int v_did)
{
UNF_CHECK_VALID(0x990, UNF_TRUE, v_lport, return);
if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort) {
/* The SID/DID of the Xchg is in reverse direction in different
* phases, therefore, the reverse direction
* needs to be considered
*/
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport,
v_rport,
v_sid,
v_did);
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport,
v_rport,
v_did,
v_sid);
}
}
void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned long long v_lun_id,
void *v_tm_xchg,
int v_abort_all_lun_flag)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
void (*unf_xchg_abort_by_lun)(void*, void*, unsigned long long,
void*, int) = NULL;
UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return);
unf_xchg_abort_by_lun =
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun;
if (unf_xchg_abort_by_lun) {
unf_xchg_abort_by_lun((void *)v_lport, (void *)v_rport,
v_lun_id, v_tm_xchg,
v_abort_all_lun_flag);
}
}
void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport)
{
void (*pfn_unf_xchg_abort_by_session)(void*, void*) = NULL;
UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return);
pfn_unf_xchg_abort_by_session =
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session;
if (pfn_unf_xchg_abort_by_session) {
pfn_unf_xchg_abort_by_session((void *)v_lport,
(void *)v_rport);
}
}
void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x855, UNF_TRUE, unlikely(v_lport), return NULL);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
/* Find the corresponding Lport Xchg management template. */
UNF_CHECK_VALID(0x856, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_xchg_get_free_and_init),
return NULL);
return xch_mgr_temp->pfn_unf_xchg_get_free_and_init(lport, v_xchg_type,
INVALID_VALUE16);
}
void unf_cm_free_xchg(void *v_lport, void *v_xchg)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x857, UNF_TRUE, unlikely(v_lport), return);
UNF_CHECK_VALID(0x858, UNF_TRUE, unlikely(v_xchg), return);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
UNF_CHECK_VALID(0x859, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_xchg_release),
return);
/*
* unf_cm_free_xchg --->>> unf_free_xchg
* --->>> unf_xchg_ref_dec --->>> unf_free_fcp_xchg
* --->>> unf_done_ini_xchg
*/
xch_mgr_temp->pfn_unf_xchg_release(v_lport, v_xchg);
}
void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x860, UNF_TRUE, unlikely(v_lport), return NULL);
/* Find the corresponding Lport Xchg management template */
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
UNF_CHECK_VALID(0x861, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_tag),
return NULL);
return xch_mgr_temp->pfn_unf_look_up_xchg_by_tag(v_lport,
v_hot_pool_tag);
}
void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_ox_id,
unsigned int v_oid)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
UNF_CHECK_VALID(0x862, UNF_TRUE, unlikely(v_lport), return NULL);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
/* Find the corresponding Lport Xchg management template */
UNF_CHECK_VALID(0x863, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_id),
return NULL);
return xch_mgr_temp->pfn_unf_look_up_xchg_by_id(v_lport, v_ox_id,
v_oid);
}
struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn(
void *v_lport,
unsigned long long v_command_sn,
unsigned int v_world_id)
{
struct unf_lport_s *lport = NULL;
struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL;
struct unf_xchg_s *xchg = NULL;
UNF_CHECK_VALID(0x864, UNF_TRUE, unlikely(v_lport), return NULL);
lport = (struct unf_lport_s *)v_lport;
xch_mgr_temp = &lport->xchg_mgr_temp;
UNF_CHECK_VALID(
0x865, UNF_TRUE,
unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn),
return NULL);
xchg =
(struct unf_xchg_s *)xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn(
lport, v_command_sn,
v_world_id);
return xchg;
}
static void unf_free_all_rsp_pages(struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned int buff_index;
UNF_CHECK_VALID(0x868, UNF_TRUE, v_xchg_mgr, return);
if (v_xchg_mgr->rsp_buf_list.buflist) {
for (buff_index = 0; buff_index <
v_xchg_mgr->rsp_buf_list.buf_num;
buff_index++) {
if (v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr) {
dma_free_coherent(
&v_xchg_mgr->hot_pool->lport->low_level_func.dev->dev,
v_xchg_mgr->rsp_buf_list.buf_size,
v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr,
v_xchg_mgr->rsp_buf_list.buflist[buff_index].paddr);
v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr = NULL;
}
}
kfree(v_xchg_mgr->rsp_buf_list.buflist);
v_xchg_mgr->rsp_buf_list.buflist = NULL;
}
}
static unsigned int unf_init_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr,
unsigned int v_xchg_sum,
unsigned int v_sfs_sum)
{
struct unf_xchg_s *xchg_mem = NULL;
union unf_sfs_u *sfs_mm_start = NULL;
dma_addr_t sfs_dma_addr;
struct unf_xchg_s *xchg = NULL;
struct unf_xchg_free_pool_s *free_pool = NULL;
unsigned int rsp_iu_nums_per_page = 0;
unsigned int rsp_iu_size = 0;
unsigned long flags = 0;
unsigned int xchg_sum = 0;
unsigned int i = 0;
unsigned int rsp_iu_loop = 0;
unsigned int buf_num;
unsigned int buf_size;
unsigned int curbuf_idx = 0;
void *page_addr;
dma_addr_t phy_addr;
UNF_CHECK_VALID(0x871, UNF_TRUE, v_sfs_sum <= v_xchg_sum,
return UNF_RETURN_ERROR);
free_pool = &v_xchg_mgr->free_pool;
xchg_sum = v_xchg_sum;
xchg_mem = v_xchg_mgr->fcp_mm_start;
xchg = xchg_mem;
sfs_mm_start = (union unf_sfs_u *)v_xchg_mgr->sfs_mm_start;
sfs_dma_addr = v_xchg_mgr->sfs_phy_addr;
/* 1. Allocate the SFS UNION memory to each SFS XCHG
* and mount the SFS XCHG to the corresponding FREE linked list
*/
free_pool->total_sfs_xchg = 0;
free_pool->sfs_xchg_sum = v_sfs_sum;
for (i = 0; i < v_sfs_sum; i++) {
INIT_LIST_HEAD(&xchg->list_xchg_entry);
INIT_LIST_HEAD(&xchg->list_esgls);
spin_lock_init(&xchg->xchg_state_lock);
sema_init(&xchg->task_sema, 0);
sema_init(&xchg->echo_info.echo_sync_sema, 0);
spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags);
xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr = sfs_mm_start;
xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr = sfs_dma_addr;
xchg->fcp_sfs_union.sfs_entry.sfs_buff_len =
sizeof(*sfs_mm_start);
list_add_tail(&xchg->list_xchg_entry,
&free_pool->list_sfs_xchg_list);
free_pool->total_sfs_xchg++;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
sfs_mm_start++;
sfs_dma_addr = sfs_dma_addr + sizeof(union unf_sfs_u);
xchg++;
}
/*
* 2. Allocate RSP IU memory for each IO XCHG and mount IO
* XCHG to the corresponding FREE linked list
* The memory size of each RSP IU is rsp_iu_size.
*/
rsp_iu_size = (UNF_FCPRSP_CTL_LEN + UNF_MAX_RSP_INFO_LEN +
UNF_SCSI_SENSE_DATA_LEN);
buf_size = BUF_LIST_PAGE_SIZE;
if ((xchg_sum - v_sfs_sum) * rsp_iu_size < BUF_LIST_PAGE_SIZE)
buf_size = (xchg_sum - v_sfs_sum) * rsp_iu_size;
rsp_iu_nums_per_page = buf_size / rsp_iu_size;
buf_num = (xchg_sum - v_sfs_sum) % rsp_iu_nums_per_page ?
(xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page + 1 :
(xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page;
v_xchg_mgr->rsp_buf_list.buflist =
(struct buff_list_s *)kmalloc(
buf_num * sizeof(struct buff_list_s),
GFP_KERNEL);
v_xchg_mgr->rsp_buf_list.buf_num = buf_num;
v_xchg_mgr->rsp_buf_list.buf_size = buf_size;
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) buff num 0x%x buff size 0x%x",
v_lport->port_id, buf_num,
v_xchg_mgr->rsp_buf_list.buf_size);
if (!v_xchg_mgr->rsp_buf_list.buflist) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Allocate BigSfs pool buf list failed out of memory");
goto free_buff;
}
memset(v_xchg_mgr->rsp_buf_list.buflist, 0,
buf_num * sizeof(struct buff_list_s));
free_pool->total_fcp_xchg = 0;
for (i = 0, curbuf_idx = 0; curbuf_idx < buf_num; curbuf_idx++) {
page_addr = dma_alloc_coherent(
&v_lport->low_level_func.dev->dev,
v_xchg_mgr->rsp_buf_list.buf_size,
&phy_addr, GFP_KERNEL);
if (!page_addr)
goto free_buff;
memset(page_addr, 0, v_xchg_mgr->rsp_buf_list.buf_size);
v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].vaddr = page_addr;
v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].paddr = phy_addr;
for (rsp_iu_loop = 0;
(rsp_iu_loop < rsp_iu_nums_per_page &&
i < xchg_sum - v_sfs_sum); rsp_iu_loop++) {
INIT_LIST_HEAD(&xchg->list_xchg_entry);
INIT_LIST_HEAD(&xchg->list_esgls);
spin_lock_init(&xchg->xchg_state_lock);
sema_init(&xchg->task_sema, 0);
sema_init(&xchg->echo_info.echo_sync_sema, 0);
/* alloc dma buffer for fcp_rsp_iu */
spin_lock_irqsave(&free_pool->xchg_free_pool_lock,
flags);
xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu =
(struct unf_fcprsp_iu_s *)page_addr;
xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr =
phy_addr;
list_add_tail(&xchg->list_xchg_entry,
&free_pool->list_free_xchg_list);
free_pool->total_fcp_xchg++;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock,
flags);
page_addr += rsp_iu_size;
phy_addr += rsp_iu_size;
i++;
xchg++;
}
}
free_pool->fcp_xchg_sum = free_pool->total_fcp_xchg;
return RETURN_OK;
free_buff:
unf_free_all_rsp_pages(v_xchg_mgr);
return UNF_RETURN_ERROR;
}
static unsigned int unf_get_xchg_config_sum(struct unf_lport_s *v_lport,
unsigned int *v_xchg_sum)
{
struct unf_lport_cfg_item_s *lport_cfg_items = NULL;
lport_cfg_items = &v_lport->low_level_func.lport_cfg_items;
/* It has been checked at the bottom layer.
* Don't need to check it again.
*/
*v_xchg_sum = lport_cfg_items->max_sfs_xchg + lport_cfg_items->max_io;
if ((*v_xchg_sum / UNF_EXCHG_MGR_NUM) == 0 ||
lport_cfg_items->max_sfs_xchg / UNF_EXCHG_MGR_NUM == 0) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"[err]Port(0x%x) Xchgsum(%u) or SfsXchg(%u) is less than ExchangeMgrNum(%u).",
v_lport->port_id, *v_xchg_sum,
lport_cfg_items->max_sfs_xchg,
UNF_EXCHG_MGR_NUM);
return UNF_RETURN_ERROR;
}
if (*v_xchg_sum > (INVALID_VALUE16 - 1)) {
/* If the format of ox_id/rx_id is exceeded,
* this function is not supported
*/
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) Exchange num(0x%x) is Too Big.",
v_lport->port_id, *v_xchg_sum);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
static void unf_xchg_cancel_timer(void *v_xchg)
{
struct unf_xchg_s *xchg = NULL;
int need_dec_xchg_ref = UNF_FALSE;
unsigned long flag = 0;
UNF_CHECK_VALID(0x874, UNF_TRUE, v_xchg, return);
xchg = (struct unf_xchg_s *)v_xchg;
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
if (cancel_delayed_work(&xchg->timeout_work))
need_dec_xchg_ref = UNF_TRUE;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (need_dec_xchg_ref == UNF_TRUE)
unf_xchg_ref_dec(v_xchg, XCHG_CANCEL_TIMER);
}
void unf_show_all_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x879, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x880, UNF_TRUE, v_xchg_mgr, return);
UNF_REFERNCE_VAR(lport);
UNF_REFERNCE_VAR(xchg);
xchg_mgr = v_xchg_mgr;
lport = v_lport;
/* hot Xchg */
spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"INI busy :");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->ini_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR,
"0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
atomic_read(&xchg->ref_cnt),
(unsigned int)xchg->io_state,
xchg->alloc_jif);
}
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL,
UNF_WARN, "SFS :");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->sfs_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"0x%p---0x%x---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
xchg->cmnd_code,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
atomic_read(&xchg->ref_cnt),
(unsigned int)xchg->io_state,
xchg->alloc_jif);
}
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"Destroy list.");
list_for_each_safe(xchg_node, next_xchg_node,
&xchg_mgr->hot_pool->list_destroy_xchg) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
atomic_read(&xchg->ref_cnt),
(unsigned int)xchg->io_state,
xchg->alloc_jif);
}
spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags);
UNF_REFERNCE_VAR(xchg);
UNF_REFERNCE_VAR(lport);
}
static void unf_delay_work_del_syn(struct unf_xchg_s *v_xchg)
{
struct unf_xchg_s *xchg = NULL;
UNF_CHECK_VALID(0x884, UNF_TRUE, v_xchg, return);
xchg = v_xchg;
/* synchronous release timer */
if (!cancel_delayed_work_sync(&xchg->timeout_work)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Exchange(0x%p), State(0x%x) can't delete work timer, timer is running or no timer.",
xchg, xchg->io_state);
} else {
/* The reference count cannot be directly subtracted.
* This prevents the XCHG from being moved to the
* Free linked list when the card is unloaded.
*/
unf_cm_free_xchg(xchg->lport, xchg);
}
}
static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr_s *v_xchg_mgr,
int v_done_ini_flag)
{
struct list_head *list = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long hot_pool_lock_flags = 0;
UNF_REFERNCE_VAR(v_done_ini_flag);
UNF_CHECK_VALID(0x887, UNF_TRUE, v_xchg_mgr, return);
UNF_CHECK_VALID(0x888, UNF_TRUE, v_xchg_mgr->hot_pool, return);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
while (!list_empty(&v_xchg_mgr->hot_pool->sfs_busylist)) {
list = (&v_xchg_mgr->hot_pool->sfs_busylist)->next;
list_del_init(list);
/* Prevent the xchg of the sfs from being accessed repeatedly.
* The xchg is first mounted to the destroy linked list.
*/
list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg);
xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry);
spin_unlock_irqrestore(
&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
unf_delay_work_del_syn(xchg);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Free SFS Exchange(0x%p), State(0x%x), Reference count(%d), Start time(%llu).",
xchg, xchg->io_state, atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
unf_cm_free_xchg(xchg->lport, xchg);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr_s *v_xchg_mgr)
{
#define UNF_WAIT_DESTROY_EMPTY_STEP_MS 1000
#define UNF_WAIT_IO_STATE_TGT_FRONT_MS (10 * 1000)
struct unf_xchg_s *xchg = NULL;
struct list_head *next_xchg_node = NULL;
unsigned long hot_pool_lock_flags = 0;
unsigned long xchg_flag = 0;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr, return);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr->hot_pool,
return);
/* In this case, the timer on the destroy linked list is deleted.
* You only need to check whether the timer is released
* at the end of the tgt.
*/
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
while (!list_empty(&v_xchg_mgr->hot_pool->list_destroy_xchg)) {
next_xchg_node =
(&v_xchg_mgr->hot_pool->list_destroy_xchg)->next;
xchg = list_entry(next_xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Free Exchange(0x%p), Type(0x%x), State(0x%x), Reference count(%d), Start time(%llu)",
xchg, xchg->xchg_type, xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag);
spin_unlock_irqrestore(
&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
/* This interface can be invoked to ensure that
* the timer is successfully canceled
* or wait until the timer execution is complete
*/
unf_delay_work_del_syn(xchg);
/*
* If the timer is canceled successfully, delete Xchg
* If the timer has burst, the Xchg may have been released,
* In this case, deleting the Xchg will be failed
*/
unf_cm_free_xchg(xchg->lport, xchg);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
};
spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
static unsigned int unf_free_lport_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
#define UNF_OS_WAITIO_TIMEOUT (10 * 1000)
unsigned long free_pool_lock_flags = 0;
int wait = UNF_FALSE;
unsigned int total_xchg = 0;
unsigned int total_xchg_sum = 0;
unsigned int ret = RETURN_OK;
unsigned long long timeout = 0;
struct completion xchg_mgr_completion =
COMPLETION_INITIALIZER(xchg_mgr_completion);
UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x882, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x883, UNF_TRUE, v_xchg_mgr->hot_pool,
return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_lport);
unf_free_lport_sfs_xchg(v_xchg_mgr, UNF_FALSE);
/* free INI Mode exchanges belong to L_Port */
unf_free_lport_ini_xchg(v_xchg_mgr, UNF_FALSE);
spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg +
v_xchg_mgr->free_pool.total_sfs_xchg;
total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum +
v_xchg_mgr->free_pool.sfs_xchg_sum;
if (total_xchg != total_xchg_sum) {
v_xchg_mgr->free_pool.xchg_mgr_completion =
&xchg_mgr_completion;
wait = UNF_TRUE;
}
spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
if (wait == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait for exchange manager completion(%ld) (0x%x:0x%x)",
v_lport->port_id, jiffies, total_xchg,
total_xchg_sum);
unf_show_all_xchg(v_lport, v_xchg_mgr);
timeout = wait_for_completion_timeout(
v_xchg_mgr->free_pool.xchg_mgr_completion,
msecs_to_jiffies(UNF_OS_WAITIO_TIMEOUT));
if (timeout == 0)
unf_free_lport_destroy_xchg(v_xchg_mgr);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait for exchange manager completion end",
v_lport->port_id);
spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
v_xchg_mgr->free_pool.xchg_mgr_completion = NULL;
spin_unlock_irqrestore(
&v_xchg_mgr->free_pool.xchg_free_pool_lock,
free_pool_lock_flags);
}
return ret;
}
void unf_free_lport_all_xchg(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr;
unsigned int i;
UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return);
UNF_REFERNCE_VAR(v_lport);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
v_lport->port_id);
continue;
}
unf_free_lport_sfs_xchg(xchg_mgr, UNF_FALSE);
/* free INI Mode exchanges belong to L_Port */
unf_free_lport_ini_xchg(xchg_mgr, UNF_FALSE);
unf_free_lport_destroy_xchg(xchg_mgr);
}
}
void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr,
int v_done_ini_flag)
{
/*
* 1. L_Port destroy
* 2. AC power down
*/
struct list_head *list = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long hot_pool_lock_flags = 0;
unsigned int up_status = 0;
UNF_REFERNCE_VAR(v_done_ini_flag);
UNF_CHECK_VALID(0x889, UNF_TRUE, v_xchg_mgr, return);
UNF_CHECK_VALID(0x890, UNF_TRUE, v_xchg_mgr->hot_pool, return);
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
while (!list_empty(&v_xchg_mgr->hot_pool->ini_busylist)) {
/* for each INI busy_list (exchange) node */
list = (&v_xchg_mgr->hot_pool->ini_busylist)->next;
/* Put exchange node to destroy_list, prevent done repeatly */
list_del_init(list);
list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg);
xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry);
if (atomic_read(&xchg->ref_cnt) <= 0)
continue;
spin_unlock_irqrestore(
&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
unf_delay_work_del_syn(xchg);
/* In the case of INI done, the command should be set to fail
* to prevent data inconsistency caused by the return of OK
*/
up_status = unf_get_uplevel_cmnd_errcode(
xchg->scsi_cmnd_info.err_code_table,
xchg->scsi_cmnd_info.err_code_table_cout,
UNF_IO_PORT_LOGOUT);
if (xchg->io_state & INI_IO_STATE_UPABORT) {
/*
* About L_Port destroy or AC power down:
* UP_ABORT ---to--->>> ABORT_Port_Removing
*/
up_status = UNF_IO_ABORT_PORT_REMOVING;
}
xchg->scsi_cmnd_info.result = up_status;
up(&xchg->task_sema);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Free INI exchange(0x%p) state(0x%x) reference count(%d) start time(%llu)",
xchg, xchg->io_state, atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
unf_cm_free_xchg(xchg->lport, xchg);
/* go to next INI busy_list (exchange) node */
spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock,
hot_pool_lock_flags);
}
static void unf_free_all_big_sfs(struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_mgr_s *xchg_mgr = v_xchg_mgr;
struct unf_big_sfs_s *big_sfs = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flag = 0;
unsigned int buff_index;
UNF_CHECK_VALID(0x891, UNF_TRUE, xchg_mgr, return);
/* Release the free resources in the busy state */
spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag);
list_for_each_safe(node, next_node,
&xchg_mgr->st_big_sfs_pool.list_busy_pool) {
list_del(node);
list_add_tail(node, &xchg_mgr->st_big_sfs_pool.list_free_pool);
}
list_for_each_safe(node, next_node,
&xchg_mgr->st_big_sfs_pool.list_free_pool) {
list_del(node);
big_sfs = list_entry(node, struct unf_big_sfs_s,
entry_big_sfs);
if (big_sfs->vaddr)
big_sfs->vaddr = NULL;
}
spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock,
flag);
if (xchg_mgr->big_sfs_buf_list.buflist) {
for (buff_index = 0;
buff_index < xchg_mgr->big_sfs_buf_list.buf_num;
buff_index++) {
if (xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr) {
kfree(xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr);
xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr = NULL;
}
}
kfree(xchg_mgr->big_sfs_buf_list.buflist);
xchg_mgr->big_sfs_buf_list.buflist = NULL;
}
}
static void unf_free_big_sfs_pool(struct unf_xchg_mgr_s *v_xchg_mgr)
{
UNF_CHECK_VALID(0x892, UNF_TRUE, v_xchg_mgr, return);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Free Big SFS Pool, Count(0x%x).",
v_xchg_mgr->st_big_sfs_pool.free_count);
unf_free_all_big_sfs(v_xchg_mgr);
v_xchg_mgr->st_big_sfs_pool.free_count = 0;
if (v_xchg_mgr->st_big_sfs_pool.big_sfs_pool) {
vfree(v_xchg_mgr->st_big_sfs_pool.big_sfs_pool);
v_xchg_mgr->st_big_sfs_pool.big_sfs_pool = NULL;
}
}
static void unf_free_xchg_mgr_mem(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int i = 0;
unsigned int xchg_sum = 0;
struct unf_xchg_free_pool_s *free_pool = NULL;
UNF_CHECK_VALID(0x893, UNF_TRUE, v_xchg_mgr, return);
xchg_mgr = v_xchg_mgr;
/* Release the reserved Rsp IU Page */
unf_free_all_rsp_pages(xchg_mgr);
unf_free_big_sfs_pool(xchg_mgr);
/* The sfs is released first, and the XchgMgr is allocated
* by the get free page.
* Therefore, the XchgMgr is compared with the '0'
*/
if (xchg_mgr->sfs_mm_start != 0) {
dma_free_coherent(&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
}
/* Release Xchg first */
if (xchg_mgr->fcp_mm_start) {
unf_get_xchg_config_sum(v_lport, &xchg_sum);
xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM;
xchg = xchg_mgr->fcp_mm_start;
for (i = 0; i < xchg_sum; i++) {
if (!xchg)
break;
xchg++;
}
vfree(xchg_mgr->fcp_mm_start);
xchg_mgr->fcp_mm_start = NULL;
}
/* release the hot pool */
if (xchg_mgr->hot_pool) {
vfree(xchg_mgr->hot_pool);
xchg_mgr->hot_pool = NULL;
}
free_pool = &xchg_mgr->free_pool;
vfree(xchg_mgr);
UNF_REFERNCE_VAR(xchg_mgr);
UNF_REFERNCE_VAR(free_pool);
}
static void unf_free_xchg_mgr(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned long flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x894, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x895, UNF_TRUE, v_xchg_mgr, return);
/* 1. At first, free exchanges for this Exch_Mgr */
ret = unf_free_lport_xchg(v_lport, v_xchg_mgr);
/* 2. Delete this Exch_Mgr entry */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_del_init(&v_xchg_mgr->xchg_mgr_entry);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
/* 3. free Exch_Mgr memory if necessary */
if (ret == RETURN_OK) {
/* free memory directly */
unf_free_xchg_mgr_mem(v_lport, v_xchg_mgr);
} else {
/* Add it to Dirty list */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_add_tail(&v_xchg_mgr->xchg_mgr_entry,
&v_lport->list_dirty_xchg_mgr_head);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
/* Mark dirty flag */
unf_cmmark_dirty_mem(v_lport,
UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY);
}
}
void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned long flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x896, UNF_TRUE, v_lport, return);
/* for each L_Port->Exch_Mgr_List */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
while (!list_empty(&v_lport->list_xchg_mgr_head)) {
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
unf_free_xchg_mgr(v_lport, xchg_mgr);
if (i < UNF_EXCHG_MGR_NUM)
v_lport->p_xchg_mgr[i] = NULL;
i++;
/* go to next */
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
}
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR;
}
static unsigned int unf_init_xchg_mgr(struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x897, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
xchg_mgr = v_xchg_mgr;
memset(xchg_mgr, 0, sizeof(struct unf_xchg_mgr_s));
INIT_LIST_HEAD(&xchg_mgr->xchg_mgr_entry);
xchg_mgr->mgr_type = UNF_XCHG_MGR_FC;
xchg_mgr->min_xid = UNF_XCHG_MIN_XID;
xchg_mgr->max_xid = UNF_XCHG_MAX_XID;
xchg_mgr->fcp_mm_start = NULL;
xchg_mgr->mem_size = sizeof(struct unf_xchg_mgr_s);
return RETURN_OK;
}
static unsigned int unf_init_xchg_mgr_free_pool(
struct unf_xchg_mgr_s *v_xchg_mgr)
{
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x898, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
xchg_mgr = v_xchg_mgr;
free_pool = &xchg_mgr->free_pool;
INIT_LIST_HEAD(&free_pool->list_free_xchg_list);
INIT_LIST_HEAD(&free_pool->list_sfs_xchg_list);
spin_lock_init(&free_pool->xchg_free_pool_lock);
free_pool->fcp_xchg_sum = 0;
free_pool->xchg_mgr_completion = NULL;
return RETURN_OK;
}
static unsigned int unf_init_xchg_hot_pool(
struct unf_lport_s *v_lport,
struct unf_xchg_hot_pool_s *v_hot_pool,
unsigned int v_xchg_sum)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
UNF_CHECK_VALID(0x899, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR);
hot_pool = v_hot_pool;
INIT_LIST_HEAD(&hot_pool->sfs_busylist);
INIT_LIST_HEAD(&hot_pool->ini_busylist);
spin_lock_init(&hot_pool->xchg_hot_pool_lock);
INIT_LIST_HEAD(&hot_pool->list_destroy_xchg);
hot_pool->total_xchges = 0;
hot_pool->total_res_cnt = 0;
hot_pool->wait_state = UNF_FALSE;
hot_pool->lport = v_lport;
/* Slab Pool Index */
hot_pool->slab_next_index = 0;
UNF_TOU16_CHECK(hot_pool->slab_total_sum, v_xchg_sum,
return UNF_RETURN_ERROR);
return RETURN_OK;
}
static unsigned int unf_alloc_and_init_big_sfs_pool(
struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned int i = 0;
unsigned int size = 0;
unsigned int align_size = 0;
unsigned int npiv_cnt = 0;
struct unf_big_sfs_pool_s *big_sfs_pool = NULL;
struct unf_big_sfs_s *big_sfs_buf = NULL;
unsigned int buf_total_size;
unsigned int buf_num;
unsigned int buf_cnt_perhugebuf;
unsigned int alloc_idx;
unsigned int curbuf_idx = 0;
unsigned int curbuf_offset = 0;
UNF_CHECK_VALID(0x900, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x901, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
big_sfs_pool = &v_xchg_mgr->st_big_sfs_pool;
INIT_LIST_HEAD(&big_sfs_pool->list_free_pool);
INIT_LIST_HEAD(&big_sfs_pool->list_busy_pool);
spin_lock_init(&big_sfs_pool->big_sfs_pool_lock);
npiv_cnt = v_lport->low_level_func.support_max_npiv_num;
/*
* The value*6 indicates GID_PT/GID_FT, RSCN, and ECHO
* Another command is received when a command is being responded
* A maximum of 20 resources are reserved for the RSCN.
* During the test, multiple rscn are found. As a result,
* the resources are insufficient and the disc fails.
*/
big_sfs_pool->free_count = (npiv_cnt + 1) * 6 + 20;
big_sfs_buf = (struct unf_big_sfs_s *)vmalloc(
big_sfs_pool->free_count
* sizeof(struct unf_big_sfs_s));
if (!big_sfs_buf) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR,
"Allocate Big SFS buf fail.");
return UNF_RETURN_ERROR;
}
memset(big_sfs_buf, 0, big_sfs_pool->free_count *
sizeof(struct unf_big_sfs_s));
v_xchg_mgr->mem_size +=
(unsigned int)
(big_sfs_pool->free_count * sizeof(struct unf_big_sfs_s));
big_sfs_pool->big_sfs_pool = (void *)big_sfs_buf;
/*
* Use the larger value of sizeof (struct unf_gif_acc_pld_s) and
* sizeof (struct unf_rscn_pld_s) to avoid the icp error.Therefore,
* the value is directly assigned instead of being compared.
*/
size = sizeof(struct unf_gif_acc_pld_s);
align_size = ALIGN(size, PAGE_SIZE);
buf_total_size = align_size * big_sfs_pool->free_count;
v_xchg_mgr->big_sfs_buf_list.buf_size =
buf_total_size > BUF_LIST_PAGE_SIZE ?
BUF_LIST_PAGE_SIZE : buf_total_size;
buf_cnt_perhugebuf =
v_xchg_mgr->big_sfs_buf_list.buf_size / align_size;
buf_num =
big_sfs_pool->free_count % buf_cnt_perhugebuf ?
big_sfs_pool->free_count / buf_cnt_perhugebuf + 1 :
big_sfs_pool->free_count / buf_cnt_perhugebuf;
v_xchg_mgr->big_sfs_buf_list.buflist =
(struct buff_list_s *)kmalloc(
buf_num * sizeof(struct buff_list_s),
GFP_KERNEL);
v_xchg_mgr->big_sfs_buf_list.buf_num = buf_num;
if (!v_xchg_mgr->big_sfs_buf_list.buflist) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[err]Allocate BigSfs pool buf list failed out of memory");
goto free_buff;
}
memset(v_xchg_mgr->big_sfs_buf_list.buflist, 0, buf_num *
sizeof(struct buff_list_s));
for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) {
v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr =
kmalloc(v_xchg_mgr->big_sfs_buf_list.buf_size,
GFP_ATOMIC);
if (!v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr)
goto free_buff;
memset(v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr,
0, v_xchg_mgr->big_sfs_buf_list.buf_size);
}
for (i = 0; i < big_sfs_pool->free_count; i++) {
if ((i != 0) && !(i % buf_cnt_perhugebuf))
curbuf_idx++;
curbuf_offset = align_size * (i % buf_cnt_perhugebuf);
big_sfs_buf->vaddr =
v_xchg_mgr->big_sfs_buf_list.buflist[curbuf_idx].vaddr +
curbuf_offset;
big_sfs_buf->size = size;
v_xchg_mgr->mem_size += size;
list_add_tail(&big_sfs_buf->entry_big_sfs,
&big_sfs_pool->list_free_pool);
big_sfs_buf++;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[EVENT]Allocate BigSfs pool size:%d,uiAlignSize:%d,buf_num:%d,buf_size:%d",
size, align_size, v_xchg_mgr->big_sfs_buf_list.buf_num,
v_xchg_mgr->big_sfs_buf_list.buf_size);
return RETURN_OK;
free_buff:
unf_free_all_big_sfs(v_xchg_mgr);
vfree(big_sfs_buf);
big_sfs_pool->big_sfs_pool = NULL;
return UNF_RETURN_ERROR;
}
/*
* Function Name : unf_free_one_big_sfs
* Function Description: Put the big sfs memory in xchg back to bigsfspool
* Input Parameters : struct unf_xchg_s * v_xchg
* Output Parameters : N/A
* Return Type : static void
*/
static void unf_free_one_big_sfs(struct unf_xchg_s *v_xchg)
{
unsigned long flag = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x902, UNF_TRUE, v_xchg, return);
xchg_mgr = v_xchg->xchg_mgr;
UNF_CHECK_VALID(0x903, UNF_TRUE, xchg_mgr, return);
if (!v_xchg->big_sfs_buf)
return;
if ((v_xchg->cmnd_code != NS_GID_PT) &&
(v_xchg->cmnd_code != NS_GID_FT) &&
(v_xchg->cmnd_code != ELS_ECHO) &&
(UNF_SET_ELS_ACC_TYPE(ELS_ECHO) != v_xchg->cmnd_code) &&
(v_xchg->cmnd_code != ELS_RSCN) &&
(UNF_SET_ELS_ACC_TYPE(ELS_RSCN) != v_xchg->cmnd_code)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT,
UNF_MAJOR,
"Exchange(0x%p), Command(0x%x) big SFS buf is not NULL.",
v_xchg, v_xchg->cmnd_code);
}
spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag);
list_del(&v_xchg->big_sfs_buf->entry_big_sfs);
list_add_tail(&v_xchg->big_sfs_buf->entry_big_sfs,
&xchg_mgr->st_big_sfs_pool.list_free_pool);
xchg_mgr->st_big_sfs_pool.free_count++;
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"Free one big SFS buf(0x%p), Count(0x%x), Exchange(0x%p), Command(0x%x).",
v_xchg->big_sfs_buf->vaddr,
xchg_mgr->st_big_sfs_pool.free_count,
v_xchg, v_xchg->cmnd_code);
spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock,
flag);
}
static void unf_free_exchg_mgr_info(struct unf_lport_s *v_lport)
{
unsigned int i;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
unsigned long flags = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_for_each_safe(node, next_node, &v_lport->list_xchg_mgr_head) {
list_del(node);
xchg_mgr = list_entry(node, struct unf_xchg_mgr_s,
xchg_mgr_entry);
}
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = v_lport->p_xchg_mgr[i];
if (xchg_mgr) {
unf_free_big_sfs_pool(xchg_mgr);
unf_free_all_rsp_pages(xchg_mgr);
if (xchg_mgr->sfs_mm_start) {
dma_free_coherent(
&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
}
if (xchg_mgr->fcp_mm_start) {
vfree(xchg_mgr->fcp_mm_start);
xchg_mgr->fcp_mm_start = NULL;
}
if (xchg_mgr->hot_pool) {
vfree(xchg_mgr->hot_pool);
xchg_mgr->hot_pool = NULL;
}
vfree(xchg_mgr);
v_lport->p_xchg_mgr[i] = NULL;
}
}
}
static unsigned int unf_alloc_and_init_xchg_mgr(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg_mem = NULL;
void *sfs_mm_start = 0;
dma_addr_t sfs_phy_addr = 0;
unsigned int xchg_sum = 0;
unsigned int sfs_xchg_sum = 0;
unsigned long flags = 0;
unsigned int order = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int slab_num = 0;
unsigned int i = 0;
UNF_REFERNCE_VAR(order);
/* SFS_EXCH + I/O_EXCH */
ret = unf_get_xchg_config_sum(v_lport, &xchg_sum);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) can't get Exchange.",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
/* SFS Exchange Sum */
sfs_xchg_sum = v_lport->low_level_func.lport_cfg_items.max_sfs_xchg /
UNF_EXCHG_MGR_NUM;
xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM;
slab_num = v_lport->low_level_func.support_max_xid_range /
UNF_EXCHG_MGR_NUM;
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
/* Alloc Exchange Manager */
xchg_mgr = (struct unf_xchg_mgr_s *)
vmalloc(sizeof(struct unf_xchg_mgr_s));
if (!xchg_mgr) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate Exchange Manager Memory Fail.",
v_lport->port_id);
goto exit;
}
/* Init Exchange Manager */
ret = unf_init_xchg_mgr(xchg_mgr);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) initialization Exchange Manager unsuccessful.",
v_lport->port_id);
goto free_xchg_mgr;
}
/* Initialize the Exchange Free Pool resource */
ret = unf_init_xchg_mgr_free_pool(xchg_mgr);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) initialization Exchange Manager Free Pool unsuccessful.",
v_lport->port_id);
goto free_xchg_mgr;
}
/* Allocate memory for Hot Pool and Xchg slab */
hot_pool = vmalloc(sizeof(struct unf_xchg_hot_pool_s) +
sizeof(struct unf_xchg_s *) * slab_num);
if (!hot_pool) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate Hot Pool Memory Fail.",
v_lport->port_id);
goto free_xchg_mgr;
}
memset(hot_pool, 0,
sizeof(struct unf_xchg_hot_pool_s) +
sizeof(struct unf_xchg_s *) * slab_num);
xchg_mgr->mem_size +=
(unsigned int)(sizeof(struct unf_xchg_hot_pool_s) +
sizeof(struct unf_xchg_s *) * slab_num);
/* Initialize the Exchange Hot Pool resource */
ret = unf_init_xchg_hot_pool(v_lport, hot_pool, slab_num);
if (ret != RETURN_OK)
goto free_hot_pool;
hot_pool->base += (unsigned short)(i * slab_num);
/* Allocate the memory of all Xchg (IO/SFS) */
xchg_mem = vmalloc(sizeof(struct unf_xchg_s) * xchg_sum);
if (!xchg_mem) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate Exchange Memory Fail.",
v_lport->port_id);
goto free_hot_pool;
}
memset(xchg_mem, 0, sizeof(struct unf_xchg_s) * xchg_sum);
xchg_mgr->mem_size +=
(unsigned int)(sizeof(struct unf_xchg_s) * xchg_sum);
xchg_mgr->hot_pool = hot_pool;
xchg_mgr->fcp_mm_start = xchg_mem;
/* Allocate the memory used by the SFS Xchg
* to carry the ELS/BLS/GS command and response
*/
xchg_mgr->sfs_mem_size =
(unsigned int)(sizeof(union unf_sfs_u) * sfs_xchg_sum);
/* Apply for the DMA space for sending sfs frames.
* If the value of DMA32 is less than 4 GB,
* cross-4G problems will not occur
*/
order = (unsigned int)get_order(xchg_mgr->sfs_mem_size);
sfs_mm_start = dma_alloc_coherent(
&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
&sfs_phy_addr, GFP_KERNEL);
if (!sfs_mm_start) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) Get Free Pagers Fail, Order(%u).",
v_lport->port_id, order);
goto free_xchg_mem;
}
memset(sfs_mm_start, 0, sizeof(union unf_sfs_u) * sfs_xchg_sum);
xchg_mgr->mem_size += xchg_mgr->sfs_mem_size;
xchg_mgr->sfs_mm_start = sfs_mm_start;
xchg_mgr->sfs_phy_addr = sfs_phy_addr;
/* The Xchg is initialized and mounted to the Free Pool */
ret = unf_init_xchg(v_lport, xchg_mgr, xchg_sum, sfs_xchg_sum);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) initialization Exchange unsuccessful, Exchange Number(%u), SFS Exchange number(%u).",
v_lport->port_id, xchg_sum, sfs_xchg_sum);
dma_free_coherent(&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
goto free_xchg_mem;
}
/* Apply for the memory used by GID_PT, GID_FT, and RSCN */
ret = unf_alloc_and_init_big_sfs_pool(v_lport, xchg_mgr);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR,
"Port(0x%x) allocate big SFS fail",
v_lport->port_id);
unf_free_all_rsp_pages(xchg_mgr);
dma_free_coherent(&v_lport->low_level_func.dev->dev,
xchg_mgr->sfs_mem_size,
xchg_mgr->sfs_mm_start,
xchg_mgr->sfs_phy_addr);
xchg_mgr->sfs_mm_start = 0;
goto free_xchg_mem;
}
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
v_lport->p_xchg_mgr[i] = (void *)xchg_mgr;
list_add_tail(&xchg_mgr->xchg_mgr_entry,
&v_lport->list_xchg_mgr_head);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) ExchangeMgr:(0x%p),Base:(0x%x).",
v_lport->port_id, v_lport->p_xchg_mgr[i],
hot_pool->base);
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO,
"Port(0x%x) allocate Exchange Manager size(0x%x).",
v_lport->port_id, xchg_mgr->mem_size);
return RETURN_OK;
free_xchg_mem:
vfree(xchg_mem);
free_hot_pool:
vfree(hot_pool);
free_xchg_mgr:
vfree(xchg_mgr);
exit:
unf_free_exchg_mgr_info(v_lport);
return UNF_RETURN_ERROR;
}
void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x905, UNF_TRUE, v_lport, return);
unf_free_all_xchg_mgr(v_lport);
}
unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x906, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
INIT_LIST_HEAD(&v_lport->list_dirty_xchg_mgr_head);
INIT_LIST_HEAD(&v_lport->list_xchg_mgr_head);
spin_lock_init(&v_lport->xchg_mgr_lock);
/* LPort Xchg Management Unit Allocation */
if (unf_alloc_and_init_xchg_mgr(v_lport) != RETURN_OK)
return UNF_RETURN_ERROR;
return RETURN_OK;
}
void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only)
{
unsigned int dirty_xchg = 0;
struct unf_xchg_mgr_s *exch_mgr = NULL;
unsigned long flags = 0;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
UNF_CHECK_VALID(0x908, UNF_TRUE, v_lport, return);
if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) {
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
list_for_each_safe(node, next_node,
&v_lport->list_dirty_xchg_mgr_head) {
exch_mgr = list_entry(node, struct unf_xchg_mgr_s,
xchg_mgr_entry);
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
if (exch_mgr) {
dirty_xchg =
(exch_mgr->free_pool.total_fcp_xchg +
exch_mgr->free_pool.total_sfs_xchg);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) has %u dirty exchange(s)",
v_lport->port_id, dirty_xchg);
unf_show_all_xchg(v_lport, exch_mgr);
if (v_show_only == UNF_FALSE) {
/* Delete Dirty Exchange Mgr entry */
spin_lock_irqsave(
&v_lport->xchg_mgr_lock,
flags);
list_del_init(
&exch_mgr->xchg_mgr_entry);
spin_unlock_irqrestore(
&v_lport->xchg_mgr_lock,
flags);
/* Free Dirty Exchange Mgr memory */
unf_free_xchg_mgr_mem(v_lport,
exch_mgr);
}
}
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
}
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
}
UNF_REFERNCE_VAR(dirty_xchg);
}
struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport,
unsigned int v_idx)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x909, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x910, UNF_TRUE, v_idx < UNF_EXCHG_MGR_NUM,
return NULL);
spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags);
xchg_mgr = v_lport->p_xchg_mgr[v_idx];
spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags);
return xchg_mgr;
}
struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport(
struct unf_lport_s *v_lport,
unsigned int v_mgr_idx)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x910, UNF_TRUE, (v_lport), return NULL);
lport = (struct unf_lport_s *)(v_lport->root_lport);
UNF_CHECK_VALID(0x910, UNF_TRUE, (lport), return NULL);
/* Get Xchg Manager */
xchg_mgr = unf_get_xchg_mgr_by_lport(lport, v_mgr_idx);
if (!xchg_mgr) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) Exchange Manager is NULL.",
lport->port_id);
return NULL;
}
/* Get Xchg Manager Hot Pool */
return xchg_mgr->hot_pool;
}
static inline void unf_hot_pool_slab_set(
struct unf_xchg_hot_pool_s *v_hot_pool,
unsigned short v_slab_index,
struct unf_xchg_s *v_xchg)
{
UNF_CHECK_VALID(0x911, UNF_TRUE, v_hot_pool, return);
v_hot_pool->xchg_slab[v_slab_index] = v_xchg;
}
static inline struct unf_xchg_s *unf_get_xchg_by_xchg_tag(
struct unf_xchg_hot_pool_s *v_hot_pool,
unsigned short v_slab_index)
{
UNF_CHECK_VALID(0x912, UNF_TRUE, v_hot_pool, return NULL);
return v_hot_pool->xchg_slab[v_slab_index];
}
static void *unf_lookup_xchg_by_tag(void *v_lport,
unsigned short v_hot_pool_tag)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned int exchg_mgr_idx = 0;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
UNF_CHECK_VALID(0x913, UNF_TRUE, v_lport, return NULL);
/* In the case of NPIV, v_pstLport is the Vport pointer,
* the share uses the ExchMgr of RootLport
*/
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x914, UNF_TRUE, lport, return NULL);
exchg_mgr_idx = (v_hot_pool_tag * UNF_EXCHG_MGR_NUM) /
lport->low_level_func.support_max_xid_range;
if (unlikely(exchg_mgr_idx >= UNF_EXCHG_MGR_NUM)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) Get ExchgMgr %u err",
lport->port_id, exchg_mgr_idx);
return NULL;
}
xchg_mgr = lport->p_xchg_mgr[exchg_mgr_idx];
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) ExchgMgr %u is null",
lport->port_id, exchg_mgr_idx);
return NULL;
}
hot_pool = xchg_mgr->hot_pool;
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) Hot Pool is NULL.", lport->port_id);
return NULL;
}
if (unlikely(v_hot_pool_tag >=
(hot_pool->slab_total_sum + hot_pool->base))) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]LPort(0x%x) can't Input Tag(0x%x), Max(0x%x).",
lport->port_id, v_hot_pool_tag,
(hot_pool->slab_total_sum + hot_pool->base));
return NULL;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
xchg = unf_get_xchg_by_xchg_tag(hot_pool,
v_hot_pool_tag - hot_pool->base);
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
return (void *)xchg;
}
static void *unf_find_xchg_by_oxid(void *v_lport, unsigned short v_oxid,
unsigned int v_oid)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_lport_s *lport = NULL;
unsigned long flags = 0;
unsigned long xchg_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x915, UNF_TRUE, (v_lport), return NULL);
/* In the case of NPIV, the v_lport is the Vport pointer,
* and the share uses the ExchMgr of the RootLport
*/
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x916, UNF_TRUE, (lport), return NULL);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) MgrIdex %u Hot Pool is NULL.",
lport->port_id, i);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
/* 1. Traverse sfs_busy list */
list_for_each_safe(node, next_node, &hot_pool->sfs_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags);
if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) {
atomic_inc(&xchg->ref_cnt);
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock, flags);
return xchg;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
}
/* 2. Traverse INI_Busy List */
list_for_each_safe(node, next_node, &hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags);
if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) {
atomic_inc(&xchg->ref_cnt);
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock, flags);
return xchg;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
return NULL;
}
static inline int unf_check_xchg_matched(struct unf_xchg_s *xchg,
unsigned long long v_command_sn,
unsigned int v_world_id)
{
int matched = 0;
matched = (v_command_sn == xchg->cmnd_sn);
if (matched && (atomic_read(&xchg->ref_cnt) > 0))
return UNF_TRUE;
else
return UNF_FALSE;
}
static void *unf_lookup_xchg_by_cmnd_sn(void *v_lport,
unsigned long long v_command_sn,
unsigned int v_world_id)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned int i;
UNF_CHECK_VALID(0x919, UNF_TRUE, v_lport, return NULL);
/* In NPIV, v_lport is a Vport pointer, and idle resources are
* shared by ExchMgr of RootLport.
* However, busy resources are mounted on each vport.
* Therefore, vport needs to be used.
*/
lport = (struct unf_lport_s *)v_lport;
UNF_CHECK_VALID(0x920, UNF_TRUE, lport, return NULL);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
/* from busy_list */
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
list_for_each_safe(node, next_node, &hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
if (unf_check_xchg_matched(xchg, v_command_sn,
v_world_id)) {
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock, flags);
return xchg;
}
}
/* vport: from destroy_list */
if (lport != lport->root_lport) {
list_for_each_safe(node, next_node,
&hot_pool->list_destroy_xchg) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
if (unf_check_xchg_matched(xchg, v_command_sn,
v_world_id)) {
spin_unlock_irqrestore(
&hot_pool->xchg_hot_pool_lock,
flags);
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) lookup exchange from destroy list",
lport->port_id);
return xchg;
}
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
return NULL;
}
static inline unsigned int unf_alloc_hot_pool_slab(
struct unf_xchg_hot_pool_s *v_hot_pool,
struct unf_xchg_s *v_xchg,
unsigned short v_rx_id)
{
unsigned short slab_index = 0;
UNF_CHECK_VALID(0x921, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x922, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR);
/* Check whether the hotpool tag is in the specified range sirt.
* If yes, set up the management relationship. If no,
* handle the problem according to the normal IO.
* If the sirt digitmap is used but the tag is occupied,
* it indicates that the I/O is discarded.
*/
v_hot_pool->slab_next_index =
(unsigned short)v_hot_pool->slab_next_index;
slab_index = v_hot_pool->slab_next_index;
while (unf_get_xchg_by_xchg_tag(v_hot_pool, slab_index)) {
slab_index++;
slab_index = slab_index % v_hot_pool->slab_total_sum;
/* Rewind occurs */
if (slab_index == v_hot_pool->slab_next_index) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO,
UNF_LOG_EQUIP_ATT, UNF_MAJOR,
"There is No Slab At Hot Pool(0x%p) for xchg(0x%p).",
v_hot_pool, v_xchg);
return UNF_RETURN_ERROR;
}
}
unf_hot_pool_slab_set(v_hot_pool, slab_index, v_xchg);
v_xchg->hot_pool_tag = slab_index + v_hot_pool->base;
slab_index++;
v_hot_pool->slab_next_index =
slab_index % v_hot_pool->slab_total_sum;
return RETURN_OK;
}
struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg)
{
struct unf_lport_s *lport = NULL;
struct unf_esgl_s *esgl = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *list_head = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x923, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x924, UNF_TRUE, v_xchg, return NULL);
lport = v_lport;
xchg = v_xchg;
/* Obtain a new Esgl from the EsglPool and
* add it to the list_esgls of the Xchg
*/
spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag);
if (!list_empty(&lport->esgl_pool.list_esgl_pool)) {
list_head = (&lport->esgl_pool.list_esgl_pool)->next;
list_del(list_head);
lport->esgl_pool.esgl_pool_count--;
list_add_tail(list_head, &xchg->list_esgls);
esgl = list_entry(list_head, struct unf_esgl_s, entry_esgl);
atomic_inc(&xchg->esgl_cnt);
spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag);
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) esgl pool is empty",
lport->nport_id);
spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag);
return NULL;
}
return &esgl->page;
}
void unf_release_esgls(struct unf_xchg_s *v_xchg)
{
struct unf_lport_s *lport = NULL;
struct list_head *list = NULL;
struct list_head *list_tmp = NULL;
unsigned long flag = 0;
UNF_CHECK_VALID(0x925, UNF_TRUE, v_xchg, return);
UNF_CHECK_VALID(0x926, UNF_TRUE, v_xchg->lport, return);
if (atomic_read(&v_xchg->esgl_cnt) <= 0)
return;
/* In the case of NPIV, the Vport pointer is saved in v_pstExch,
* and the EsglPool of RootLport is shared.
*/
lport = (v_xchg->lport)->root_lport;
UNF_CHECK_VALID(0x927, UNF_TRUE, (lport), return);
spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag);
if (!list_empty(&v_xchg->list_esgls)) {
list_for_each_safe(list, list_tmp, &v_xchg->list_esgls) {
list_del(list);
list_add_tail(list, &lport->esgl_pool.list_esgl_pool);
lport->esgl_pool.esgl_pool_count++;
atomic_dec(&v_xchg->esgl_cnt);
}
}
spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag);
}
static void unf_init_xchg_attribute(struct unf_xchg_s *v_xchg)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x973, UNF_TRUE, (v_xchg), return);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
v_xchg->xchg_mgr = NULL;
v_xchg->free_pool = NULL;
v_xchg->hot_pool = NULL;
v_xchg->lport = NULL;
v_xchg->rport = NULL;
v_xchg->disc_rport = NULL;
v_xchg->io_state = UNF_IO_STATE_NEW;
v_xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE;
v_xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID;
v_xchg->io_send_abort = UNF_FALSE;
v_xchg->io_abort_result = UNF_FALSE;
v_xchg->abts_state = 0;
v_xchg->ox_id = INVALID_VALUE16;
v_xchg->abort_oxid = INVALID_VALUE16;
v_xchg->rx_id = INVALID_VALUE16;
v_xchg->sid = INVALID_VALUE32;
v_xchg->did = INVALID_VALUE32;
v_xchg->oid = INVALID_VALUE32;
v_xchg->disc_port_id = INVALID_VALUE32;
v_xchg->seq_id = INVALID_VALUE8;
v_xchg->cmnd_code = INVALID_VALUE32;
v_xchg->cmnd_sn = INVALID_VALUE64;
v_xchg->data_len = 0;
v_xchg->resid_len = 0;
v_xchg->data_direction = DMA_NONE;
v_xchg->hot_pool_tag = INVALID_VALUE16;
v_xchg->big_sfs_buf = NULL;
v_xchg->may_consume_res_cnt = 0;
v_xchg->fact_consume_res_cnt = 0;
v_xchg->io_front_jif = INVALID_VALUE64;
v_xchg->ob_callback_sts = UNF_IO_SUCCESS;
v_xchg->start_jif = 0;
v_xchg->rport_bind_jifs = INVALID_VALUE64;
v_xchg->scsi_id = INVALID_VALUE32;
v_xchg->world_id = INVALID_VALUE32;
memset(&v_xchg->seq, 0, sizeof(struct unf_seq_s));
memset(&v_xchg->fcp_cmnd, 0, sizeof(struct unf_fcp_cmnd_s));
memset(&v_xchg->scsi_cmnd_info, 0, sizeof(struct unf_scsi_cmd_info_s));
memset(&v_xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s));
memset(&v_xchg->dif_info, 0, sizeof(struct dif_info_s));
memset(v_xchg->private, 0,
(PKG_MAX_PRIVATE_DATA_SIZE * sizeof(unsigned int)));
v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK;
v_xchg->echo_info.response_time = 0;
if (v_xchg->xchg_type == UNF_XCHG_TYPE_INI) {
if (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)
memset(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu,
0, sizeof(struct unf_fcprsp_iu_s));
} else if (v_xchg->xchg_type == UNF_XCHG_TYPE_SFS) {
if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) {
memset(v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr,
0, sizeof(union unf_sfs_u));
v_xchg->fcp_sfs_union.sfs_entry.cur_offset = 0;
}
} else {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Exchange Type(0x%x) SFS Union uninited.",
v_xchg->xchg_type);
}
v_xchg->xchg_type = UNF_XCHG_TYPE_INVALID;
v_xchg->pfn_ob_callback = NULL;
v_xchg->pfn_callback = NULL;
v_xchg->pfn_free_xchg = NULL;
atomic_set(&v_xchg->ref_cnt, 0);
atomic_set(&v_xchg->esgl_cnt, 0);
atomic_set(&v_xchg->delay_flag, 0);
if (delayed_work_pending(&v_xchg->timeout_work))
UNF_DEL_XCHG_TIMER_SAFE(v_xchg);
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
}
static void unf_add_back_to_fcp_list(
struct unf_xchg_free_pool_s *v_free_pool,
struct unf_xchg_s *v_xchg)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x928, UNF_TRUE, v_free_pool, return);
UNF_CHECK_VALID(0x929, UNF_TRUE, v_xchg, return);
unf_init_xchg_attribute(v_xchg);
/* The released I/O resources are added to
* the queue tail to facilitate fault locating
*/
spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags);
list_add_tail(&v_xchg->list_xchg_entry,
&v_free_pool->list_free_xchg_list);
v_free_pool->total_fcp_xchg++;
spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags);
}
static void unf_check_xchg_mgr_status(struct unf_xchg_mgr_s *v_xchg_mgr)
{
unsigned long flags = 0;
unsigned int total_xchg = 0;
unsigned int total_xchg_sum = 0;
UNF_CHECK_VALID(0x930, UNF_TRUE, v_xchg_mgr, return);
spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, flags);
total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg +
v_xchg_mgr->free_pool.total_sfs_xchg;
total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum +
v_xchg_mgr->free_pool.sfs_xchg_sum;
if ((v_xchg_mgr->free_pool.xchg_mgr_completion) &&
(total_xchg == total_xchg_sum)) {
complete(v_xchg_mgr->free_pool.xchg_mgr_completion);
}
spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock,
flags);
}
static void unf_free_fcp_xchg(struct unf_xchg_s *v_xchg)
{
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
UNF_CHECK_VALID(0x932, UNF_TRUE, v_xchg, return);
/* Releasing a Specified INI I/O and Invoking the scsi_done Process */
unf_done_ini_xchg(v_xchg);
free_pool = v_xchg->free_pool;
xchg_mgr = v_xchg->xchg_mgr;
lport = v_xchg->lport;
rport = v_xchg->rport;
atomic_dec(&rport->pending_io_cnt);
/* Release the Esgls in the Xchg structure and
* return it to the EsglPool of the Lport
*/
unf_release_esgls(v_xchg);
/* Mount I/O resources to the FCP Free linked list */
unf_add_back_to_fcp_list(free_pool, v_xchg);
/* The Xchg is released synchronously and then forcibly released to
* prevent the Xchg from accessing the Xchg in the normal I/O process
*/
if (unlikely(lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(xchg_mgr);
}
static void unf_fc_abort_timeout_cmnd(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg)
{
struct unf_lport_s *lport = v_lport;
struct unf_xchg_s *xchg = v_xchg;
struct unf_scsi_cmd_s scsi_cmnd = { 0 };
unsigned long flag = 0;
unsigned int timeout_value = 2000;
unsigned int return_value = 0;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
UNF_CHECK_VALID(0x936, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x937, UNF_TRUE, v_xchg, return);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
if (v_xchg->io_state & INI_IO_STATE_UPABORT) {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"LPort(0x%x) xchange(0x%p) OX_ID(0x%x), RX_ID(0x%x) Cmdsn(0x%lx) has been aborted.",
lport->port_id, v_xchg, v_xchg->ox_id,
v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn);
return;
}
v_xchg->io_state |= INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_NORMAL, UNF_KEVENT,
"LPort(0x%x) exchg(0x%p) OX_ID(0x%x) RX_ID(0x%x) Cmdsn(0x%lx) timeout abort it",
lport->port_id, v_xchg, v_xchg->ox_id,
v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn);
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)v_xchg,
(unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT,
UNF_TIMER_TYPE_INI_ABTS);
sema_init(&v_xchg->task_sema, 0);
scsi_cmnd.scsi_id = xchg->scsi_cmnd_info.scsi_id;
scsi_cmnd.upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd;
scsi_cmnd.pfn_done = xchg->scsi_cmnd_info.pfn_done;
scsi_image_table = &lport->rport_scsi_table;
if (unf_send_abts(lport, v_xchg) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"LPort(0x%x) send ABTS, Send ABTS unsuccessful. Exchange OX_ID(0x%x), RX_ID(0x%x).",
lport->port_id, v_xchg->ox_id,
v_xchg->rx_id);
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
/* The message fails to be sent.
* It is released internally and does not
* need to be released externally.
*/
return;
}
if (down_timeout(&v_xchg->task_sema,
(long long)msecs_to_jiffies(timeout_value))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x)",
lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->rx_id);
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
/* Cnacel the flag of INI_IO_STATE_UPABORT
* and process the io in TMF
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
return;
}
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) ||
(v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)",
lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->rx_id,
v_xchg->ucode_abts_state);
return_value = DID_BUS_BUSY;
UNF_IO_RESULT_CNT(scsi_image_table, scsi_cmnd.scsi_id,
return_value);
unf_complete_cmnd(&scsi_cmnd, DID_BUS_BUSY << 16);
return;
}
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) send ABTS failed. Exch(0x%p) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)",
lport->port_id, v_xchg, v_xchg->hot_pool_tag,
v_xchg->scsi_cmnd_info.result, v_xchg->io_state);
}
static void unf_fc_ini_send_abts_timeout(struct unf_lport_s *lport,
struct unf_rport_s *rport,
struct unf_xchg_s *xchg)
{
if (xchg->rport_bind_jifs == rport->rport_alloc_jifs &&
xchg->rport_bind_jifs != INVALID_VALUE64) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) first time to send abts timeout, retry again OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id, xchg->io_state);
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)xchg,
(unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT,
UNF_TIMER_TYPE_INI_ABTS);
if (unf_send_abts(lport, xchg) != RETURN_OK) {
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(
(void *)xchg);
unf_abts_timeout_recovery_default(rport, xchg);
unf_cm_free_xchg(lport, xchg);
}
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) rport is invalid, exchg rport jiff(0x%llx 0x%llx), free exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id, xchg,
xchg->rport_bind_jifs, rport->rport_alloc_jifs,
xchg->ox_id, xchg->rx_id, xchg->io_state);
unf_cm_free_xchg(lport, xchg);
}
}
static void unf_fc_ini_io_rec_wait_timeout(struct unf_lport_s *lport,
struct unf_rport_s *rport,
struct unf_xchg_s *xchg)
{
unsigned long io_time_out = 0;
if (xchg->rport_bind_jifs == rport->rport_alloc_jifs) {
unf_send_rec(lport, rport, xchg);
if (xchg->scsi_cmnd_info.abort_timeout > 0) {
io_time_out =
(xchg->scsi_cmnd_info.abort_timeout >
UNF_REC_TOV) ?
(xchg->scsi_cmnd_info.abort_timeout -
UNF_REC_TOV) : 0;
if (io_time_out > 0) {
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)xchg,
io_time_out,
UNF_TIMER_TYPE_REQ_IO);
} else {
unf_fc_abort_timeout_cmnd(lport, xchg);
}
}
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) Rec timeout exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x), bindjifs(0x%llx)no eqal Rport alloc jifs(0x%llx)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id,
xchg->io_state, xchg->rport_bind_jifs,
rport->rport_alloc_jifs);
}
}
static void unf_fc_ini_io_xchg_timeout(struct work_struct *v_work)
{
struct unf_xchg_s *xchg = NULL;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
unsigned int port_valid_flag = 0;
UNF_REFERNCE_VAR(ret);
xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work);
UNF_CHECK_VALID(0x939, UNF_TRUE, xchg, return);
ret = unf_xchg_ref_inc(xchg, INI_IO_TIMEOUT);
UNF_CHECK_VALID(0x940, UNF_TRUE, ret == RETURN_OK, return);
lport = xchg->lport;
rport = xchg->rport;
port_valid_flag = !lport || !rport;
if (port_valid_flag) {
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
return;
}
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
/* 1. for Send RRQ failed Timer timeout */
if (INI_IO_STATE_RRQSEND_ERR & xchg->io_state) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[info]LPort(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for RRQ send failed OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id, xchg->io_state);
unf_cm_free_xchg(lport, xchg);
}
/* Second ABTS timeout and enter LOGO process */
else if ((INI_IO_STATE_ABORT_TIMEOUT & xchg->io_state) &&
(!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for second abts send OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id,
xchg, xchg->ox_id, xchg->rx_id,
xchg->io_state);
unf_abts_timeout_recovery_default(rport, xchg);
unf_cm_free_xchg(lport, xchg);
}
/* First time to send ABTS, timeout and retry to send ABTS again */
else if ((xchg->io_state & INI_IO_STATE_UPABORT) &&
(!(xchg->abts_state & ABTS_RESPONSE_RECEIVED))) {
xchg->io_state |= INI_IO_STATE_ABORT_TIMEOUT;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_fc_ini_send_abts_timeout(lport, rport, xchg);
}
/* 3. IO_DONE */
else if ((xchg->io_state & INI_IO_STATE_DONE) &&
(xchg->abts_state & ABTS_RESPONSE_RECEIVED)) {
/*
* for IO_DONE:
* 1. INI ABTS first timer time out
* 2. INI RCVD ABTS Response
* 3. Normal case for I/O Done
*/
/* Send ABTS & RCVD RSP & no timeout */
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
/* Send RRQ */
if (unf_send_rrq(lport, rport, xchg) == RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"[info]LPort(0x%x) send RRQ succeed to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id, xchg,
xchg->ox_id, xchg->rx_id, xchg->io_state);
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]LPort(0x%x) can't send RRQ to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)",
lport->port_id, rport->nport_id, xchg,
xchg->ox_id, xchg->rx_id, xchg->io_state);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->io_state |= INI_IO_STATE_RRQSEND_ERR;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)xchg,
(unsigned long)UNF_WRITE_RRQ_SENDERR_INTERVAL,
UNF_TIMER_TYPE_INI_IO);
}
} else if (xchg->io_state & INI_IO_STATE_REC_TIMEOUT_WAIT) {
xchg->io_state &= ~INI_IO_STATE_REC_TIMEOUT_WAIT;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_fc_ini_io_rec_wait_timeout(lport, rport, xchg);
} else {
/* 4. I/O Timer Timeout */
/* vmware */
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_fc_abort_timeout_cmnd(lport, xchg);
}
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT);
UNF_REFERNCE_VAR(ret);
}
static inline struct unf_xchg_s *unf_alloc_io_xchg(
struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr,
unsigned int v_xchg_type,
unsigned short v_rx_id)
{
struct unf_xchg_s *xchg = NULL;
struct list_head *list_node = NULL;
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long flags = 0;
static atomic64_t s_exhg_id;
void (*unf_fc_io_xchg_timeout)(struct work_struct *v_work) = NULL;
UNF_CHECK_VALID(0x941, UNF_TRUE, v_xchg_mgr, return NULL);
UNF_CHECK_VALID(0x942, UNF_TRUE, v_lport, return NULL);
free_pool = &v_xchg_mgr->free_pool;
hot_pool = v_xchg_mgr->hot_pool;
UNF_CHECK_VALID(0x943, UNF_TRUE, free_pool, return NULL);
UNF_CHECK_VALID(0x944, UNF_TRUE, hot_pool, return NULL);
/* 1. Free Pool */
spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags);
if (unlikely(list_empty(&free_pool->list_free_xchg_list))) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"Port(0x%x) have no Exchange anymore.",
v_lport->port_id);
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
return NULL;
}
/* Select an idle node from free pool */
list_node = (&free_pool->list_free_xchg_list)->next;
list_del(list_node);
free_pool->total_fcp_xchg--;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry);
/*
* Hot Pool:
* When xchg is mounted to Hot Pool, the mount mode and release mode
* of Xchg must be specified and stored in the sfs linked list.
*/
flags = 0;
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) {
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
unf_add_back_to_fcp_list(free_pool, xchg);
if (unlikely(v_lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(v_xchg_mgr);
return NULL;
}
list_add_tail(&xchg->list_xchg_entry, &hot_pool->ini_busylist);
unf_fc_io_xchg_timeout = unf_fc_ini_io_xchg_timeout;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
/* 3. Exchange State */
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->start_jif = atomic64_inc_return(&s_exhg_id);
xchg->xchg_mgr = v_xchg_mgr;
xchg->free_pool = free_pool;
xchg->hot_pool = hot_pool;
xchg->lport = v_lport;
xchg->xchg_type = v_xchg_type;
xchg->pfn_free_xchg = unf_free_fcp_xchg;
xchg->io_state = UNF_IO_STATE_NEW;
xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE;
xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID;
xchg->io_send_abort = UNF_FALSE;
xchg->io_abort_result = UNF_FALSE;
xchg->ox_id = INVALID_VALUE16;
xchg->abort_oxid = INVALID_VALUE16;
xchg->rx_id = INVALID_VALUE16;
xchg->sid = INVALID_VALUE32;
xchg->did = INVALID_VALUE32;
xchg->oid = INVALID_VALUE32;
xchg->seq_id = INVALID_VALUE8;
xchg->cmnd_code = INVALID_VALUE32;
xchg->data_len = 0;
xchg->resid_len = 0;
xchg->data_direction = DMA_NONE;
xchg->may_consume_res_cnt = 0;
xchg->fact_consume_res_cnt = 0;
xchg->io_front_jif = 0;
xchg->tmf_state = 0;
xchg->ucode_abts_state = INVALID_VALUE32;
xchg->abts_state = 0;
xchg->rport_bind_jifs = INVALID_VALUE64;
xchg->scsi_id = INVALID_VALUE32;
xchg->world_id = INVALID_VALUE32;
memset(&xchg->dif_control, 0, sizeof(struct unf_dif_control_info_s));
memset(&xchg->req_sgl_info, 0, sizeof(struct unf_req_sgl_info_s));
memset(&xchg->dif_sgl_info, 0, sizeof(struct unf_req_sgl_info_s));
memset(&xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s));
xchg->scsi_cmnd_info.result = 0;
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)atomic64_inc_return(&v_lport->exchg_index);
if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0)
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)atomic64_inc_return(&v_lport->exchg_index);
atomic_set(&xchg->ref_cnt, 0);
atomic_set(&xchg->delay_flag, 0);
if (delayed_work_pending(&xchg->timeout_work))
UNF_DEL_XCHG_TIMER_SAFE(xchg);
INIT_DELAYED_WORK(&xchg->timeout_work, unf_fc_io_xchg_timeout);
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
return xchg;
}
static void unf_add_back_to_sfs_list(
struct unf_xchg_free_pool_s *v_free_pool,
struct unf_xchg_s *v_xchg)
{
unsigned long flags = 0;
UNF_CHECK_VALID(0x945, UNF_TRUE, v_free_pool, return);
UNF_CHECK_VALID(0x946, UNF_TRUE, v_xchg, return);
unf_init_xchg_attribute(v_xchg);
spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags);
list_add_tail(&v_xchg->list_xchg_entry,
&v_free_pool->list_sfs_xchg_list);
v_free_pool->total_sfs_xchg++;
spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags);
}
static void unf_free_sfs_xchg(struct unf_xchg_s *v_xchg)
{
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x947, UNF_TRUE, v_xchg, return);
free_pool = v_xchg->free_pool;
lport = v_xchg->lport;
xchg_mgr = v_xchg->xchg_mgr;
/* The memory is applied for when the GID_PT/GID_FT is sent.
* If no response is received, the GID_PT/GID_FT
* needs to be forcibly released.
*/
unf_free_one_big_sfs(v_xchg);
unf_add_back_to_sfs_list(free_pool, v_xchg);
if (unlikely(lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(xchg_mgr);
}
static void unf_fc_xchg_add_timer(void *v_xchg,
unsigned long v_time_ms,
enum unf_timer_type_e v_en_time_type)
{
unsigned long flag = 0;
struct unf_xchg_s *xchg = NULL;
unsigned long time_ms = v_time_ms;
struct unf_lport_s *lport;
UNF_CHECK_VALID(0x948, UNF_TRUE, v_xchg, return);
xchg = (struct unf_xchg_s *)v_xchg;
lport = xchg->lport;
UNF_CHECK_VALID(0x948, UNF_TRUE, lport, return);
/* update timeout */
switch (v_en_time_type) {
case UNF_TIMER_TYPE_INI_RRQ:
time_ms = time_ms - UNF_INI_RRQ_REDUNDANT_TIME;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_INFO, "INI RRQ Timer set.");
break;
case UNF_TIMER_TYPE_SFS:
time_ms = time_ms + UNF_INI_ELS_REDUNDANT_TIME;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_INFO, "INI ELS Timer set.");
break;
default:
break;
}
/* The xchg of the timer must be valid.
* If the reference count of xchg is 0,
* the timer must not be added
*/
if (atomic_read(&xchg->ref_cnt) <= 0) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT,
"[warn]Abnormal Exchange(0x%p), Reference count(0x%x), Can't add timer.",
xchg, atomic_read(&xchg->ref_cnt));
return;
}
/* Delay Work: Hold for timer */
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
if (queue_delayed_work(lport->xchg_wq,
&xchg->timeout_work,
(unsigned long)
msecs_to_jiffies((unsigned int)time_ms))) {
/* hold for timer */
atomic_inc(&xchg->ref_cnt);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
}
static void unf_sfs_xchg_timeout(struct work_struct *v_work)
{
struct unf_xchg_s *xchg = NULL;
unsigned int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x949, UNF_TRUE, v_work, return);
xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work);
UNF_CHECK_VALID(0x950, UNF_TRUE, xchg, return);
ret = unf_xchg_ref_inc(xchg, SFS_TIMEOUT);
UNF_REFERNCE_VAR(ret);
UNF_CHECK_VALID(0x951, UNF_TRUE, ret == RETURN_OK, return);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
lport = xchg->lport;
rport = xchg->rport;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]SFS Exch(%p) Cmnd(0x%x) IO Exch(0x%p) Sid_Did(0x%x:0x%x) HotTag(0x%x) State(0x%x) Timeout.",
xchg, xchg->cmnd_code, xchg->io_xchg, xchg->sid,
xchg->did, xchg->hot_pool_tag, xchg->io_state);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
if ((xchg->io_state & TGT_IO_STATE_ABORT) &&
(xchg->cmnd_code != ELS_RRQ) &&
(xchg->cmnd_code != ELS_LOGO)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"SFS Exch(0x%p) Cmnd(0x%x) Hot Pool Tag(0x%x) timeout, but aborted, no need to handle.",
xchg, xchg->cmnd_code, xchg->hot_pool_tag);
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
return;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
/* The sfs times out. If the sfs is ELS reply,
* go to unf_rport_error_recovery/unf_lport_error_recovery.
* Otherwise, go to the corresponding obCallback.
*/
if (UNF_XCHG_IS_ELS_REPLY(xchg) && (rport)) {
if (rport->nport_id >= UNF_FC_FID_DOM_MGR)
unf_lport_error_recovery(lport);
else
unf_rport_error_recovery(rport);
} else if (xchg->pfn_ob_callback) {
xchg->pfn_ob_callback(xchg);
} else {
/* Do nothing */
}
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
unf_xchg_ref_dec(xchg, SFS_TIMEOUT);
}
static struct unf_xchg_s *unf_alloc_sfs_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr,
unsigned int v_xchg_type,
unsigned short v_rx_id)
{
struct unf_xchg_s *xchg = NULL;
struct list_head *list_node = NULL;
struct unf_xchg_free_pool_s *free_pool = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long flags = 0;
UNF_CHECK_VALID(0x952, UNF_TRUE, v_lport, return NULL);
UNF_CHECK_VALID(0x953, UNF_TRUE, v_xchg_mgr, return NULL);
free_pool = &v_xchg_mgr->free_pool;
hot_pool = v_xchg_mgr->hot_pool;
UNF_CHECK_VALID(0x954, UNF_TRUE, free_pool, return NULL);
UNF_CHECK_VALID(0x955, UNF_TRUE, hot_pool, return NULL);
/* Select an idle node from free pool */
spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags);
if (list_empty(&free_pool->list_sfs_xchg_list)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) have no Exchange anymore.",
v_lport->port_id);
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
return NULL;
}
list_node = (&free_pool->list_sfs_xchg_list)->next;
list_del(list_node);
free_pool->total_sfs_xchg--;
spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags);
xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry);
/*
* The xchg is mounted to the Hot Pool.
* The mount mode and release mode of the xchg must be specified
* and stored in the sfs linked list.
*/
flags = 0;
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) {
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
unf_add_back_to_sfs_list(free_pool, xchg);
if (unlikely(v_lport->b_port_removing == UNF_TRUE))
unf_check_xchg_mgr_status(v_xchg_mgr);
return NULL;
}
list_add_tail(&xchg->list_xchg_entry, &hot_pool->sfs_busylist);
hot_pool->total_xchges++;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->free_pool = free_pool;
xchg->hot_pool = hot_pool;
xchg->lport = v_lport;
xchg->xchg_mgr = v_xchg_mgr;
xchg->pfn_free_xchg = unf_free_sfs_xchg;
xchg->xchg_type = v_xchg_type;
xchg->io_state = UNF_IO_STATE_NEW;
xchg->scsi_cmnd_info.result = 0;
xchg->ob_callback_sts = UNF_IO_SUCCESS;
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)atomic64_inc_return(&v_lport->exchg_index);
if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0)
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
(unsigned int)
atomic64_inc_return(&v_lport->exchg_index);
if (delayed_work_pending(&xchg->timeout_work))
UNF_DEL_XCHG_TIMER_SAFE(xchg);
INIT_DELAYED_WORK(&xchg->timeout_work, unf_sfs_xchg_timeout);
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
return xchg;
}
static void *unf_get_new_xchg(void *v_lport, unsigned int v_xchg_type,
unsigned short v_rx_id)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_mgr_s *xchg_mgr = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int xchg_type = 0;
unsigned short xchg_mgr_type;
unsigned int rtry_cnt = 0;
unsigned int last_exchg_mgr_idx;
xchg_mgr_type = (v_xchg_type >> 16);
xchg_type = v_xchg_type & 0xFFFF;
UNF_CHECK_VALID(0x956, UNF_TRUE, v_lport, return NULL);
/* In the case of NPIV, the v_lport is the Vport pointer,
* and the share uses the ExchMgr of the RootLport.
*/
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x957, UNF_TRUE, (lport), return NULL);
if (unlikely((atomic_read(&lport->port_no_operater_flag) ==
UNF_LPORT_NOP) ||
(atomic_read(&((struct unf_lport_s *)v_lport)->port_no_operater_flag) ==
UNF_LPORT_NOP)))
return NULL;
last_exchg_mgr_idx =
(unsigned int)atomic64_inc_return(&lport->last_exchg_mgr_idx);
try_next_mgr:
rtry_cnt++;
if (unlikely(rtry_cnt > UNF_EXCHG_MGR_NUM))
return NULL;
/* If Fixed mode,only use XchgMgr 0 */
if (unlikely(xchg_mgr_type == UNF_XCHG_MGR_TYPE_FIXED))
xchg_mgr = (struct unf_xchg_mgr_s *)lport->p_xchg_mgr[0];
else
xchg_mgr =
(struct unf_xchg_mgr_s *)
lport->p_xchg_mgr[last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM];
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) get exchangemgr %u is null.",
lport->port_id,
last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM);
return NULL;
}
last_exchg_mgr_idx++;
/* Allocate entries based on the Exchange type */
switch (xchg_type) {
case UNF_XCHG_TYPE_SFS:
xchg = unf_alloc_sfs_xchg(v_lport, xchg_mgr, xchg_type,
INVALID_VALUE16);
break;
case UNF_XCHG_TYPE_INI:
xchg = unf_alloc_io_xchg(v_lport, xchg_mgr, xchg_type,
INVALID_VALUE16);
break;
default:
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) unwonted, Exchange type(0x%x).",
lport->port_id, xchg_type);
break;
}
if (likely(xchg)) {
xchg->ox_id = INVALID_VALUE16;
xchg->abort_oxid = INVALID_VALUE16;
xchg->rx_id = INVALID_VALUE16;
xchg->debug_hook = UNF_FALSE;
xchg->alloc_jif = jiffies;
atomic_set(&xchg->ref_cnt, 1);
atomic_set(&xchg->esgl_cnt, 0);
} else {
goto try_next_mgr;
}
return xchg;
}
static void unf_free_xchg(void *v_lport, void *v_xchg)
{
struct unf_xchg_s *xchg = NULL;
UNF_REFERNCE_VAR(v_lport);
UNF_CHECK_VALID(0x958, UNF_TRUE, (v_xchg), return);
xchg = (struct unf_xchg_s *)v_xchg;
unf_xchg_ref_dec(xchg, XCHG_FREE_XCHG);
}
void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x960, UNF_TRUE, v_lport, return);
if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"Port(0x%x) has dirty exchange, Don't release exchange manager template.",
v_lport->port_id);
return;
}
memset(&v_lport->xchg_mgr_temp, 0,
sizeof(struct unf_cm_xchg_mgr_template_s));
v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP;
}
static void unf_xchg_abort_all_sfs_xchg(struct unf_lport_s *v_lport,
int v_clean)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long pool_lock_flags = 0;
unsigned long xchg_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x961, UNF_TRUE, v_lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(v_lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"Port(0x%x) Hot Pool is NULL.",
v_lport->port_id);
continue;
}
if (v_clean == UNF_FALSE) {
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* Clearing the SFS_Busy_list Exchange Resource */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->sfs_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_lock_flags);
if (atomic_read(&xchg->ref_cnt) > 0)
xchg->io_state |= TGT_IO_STATE_ABORT;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
} else {
continue;
}
}
}
static void unf_xchg_abort_ini_io_xchg(struct unf_lport_s *v_lport,
int v_clean)
{
/* Clean L_Port/V_Port Link Down I/O: Abort */
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long pool_lock_flags = 0;
unsigned long xchg_lock_flags = 0;
unsigned int io_state = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x962, UNF_TRUE, (v_lport), return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(v_lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) hot pool is NULL",
v_lport->port_id);
continue;
}
if (v_clean == UNF_FALSE) {
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* 1. Abort INI_Busy_List IO */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->ini_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_lock_flags);
if (atomic_read(&xchg->ref_cnt) > 0)
xchg->io_state |=
INI_IO_STATE_DRABORT | io_state;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
} else {
/* Do nothing, just return */
continue;
}
}
}
static void unf_xchg_abort_all_xchg(void *v_lport,
unsigned int v_xchg_type,
int v_clean)
{
struct unf_lport_s *lport = NULL;
UNF_CHECK_VALID(0x964, UNF_TRUE, v_lport, return);
lport = (struct unf_lport_s *)v_lport;
switch (v_xchg_type) {
case UNF_XCHG_TYPE_SFS:
unf_xchg_abort_all_sfs_xchg(lport, v_clean);
break;
/* Clean L_Port/V_Port Link Down I/O: Abort */
case UNF_XCHG_TYPE_INI:
unf_xchg_abort_ini_io_xchg(lport, v_clean);
break;
default:
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) unknown exch type(0x%x)",
lport->port_id, v_xchg_type);
break;
}
}
static void unf_xchg_abort_ini_send_tm_cmd(void *v_lport,
void *v_rport,
unsigned long long v_lun_id)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned long xchg_flag = 0;
unsigned int i = 0;
unsigned long long raw_lunid = 0;
UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return);
rport = (struct unf_rport_s *)v_rport;
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
/* 1. for each exchange from busy list */
list_for_each_safe(node, next_node,
&hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
raw_lunid = *(unsigned long long *)
(xchg->fcp_cmnd.lun) >> 16 &
0x000000000000ffff;
if ((v_lun_id == raw_lunid) &&
(rport == xchg->rport)) {
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_flag);
xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD",
xchg, xchg->io_state, lport->nport_id,
rport->nport_id, xchg->hot_pool_tag);
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
}
static void unf_xchg_abort_by_lun(void *v_lport,
void *v_rport,
unsigned long long v_lun_id,
void *v_tm_xchg,
int v_abort_all_lun_flag)
{
/* ABORT: set UP_ABORT tag for target LUN I/O */
struct unf_xchg_s *tm_xchg = (struct unf_xchg_s *)v_tm_xchg;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[event]Port(0x%x) LUN_ID(0x%llx) TM_EXCH(0x%p) flag(%d)",
((struct unf_lport_s *)v_lport)->port_id,
v_lun_id, v_tm_xchg, v_abort_all_lun_flag);
/* for INI Mode */
if (!tm_xchg) {
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
unf_xchg_abort_ini_send_tm_cmd(v_lport, v_rport, v_lun_id);
return;
}
}
static void unf_xchg_abort_ini_tmf_target_reset(void *v_lport, void *v_rport)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *node = NULL;
struct list_head *next_node = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned long flags = 0;
unsigned long xchg_flag = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return);
UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return);
rport = (struct unf_rport_s *)v_rport;
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
/* 1. for each exchange from busy_list */
list_for_each_safe(node, next_node,
&hot_pool->ini_busylist) {
xchg = list_entry(node, struct unf_xchg_s,
list_xchg_entry);
if (rport == xchg->rport) {
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_flag);
xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD",
xchg, xchg->io_state,
lport->nport_id,
rport->nport_id, xchg->hot_pool_tag);
}
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
}
}
static void unf_xchg_abort_by_session(void *v_lport, void *v_rport)
{
/*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[event]Port(0x%x) Rport(0x%x) start session reset with TMF",
((struct unf_lport_s *)v_lport)->port_id,
((struct unf_rport_s *)v_rport)->nport_id);
unf_xchg_abort_ini_tmf_target_reset(v_lport, v_rport);
}
static void unf_ini_busy_io_xchg_abort(void *v_hot_pool, void *v_rport,
unsigned int v_sid, unsigned int v_did,
unsigned int v_extra_io_state)
{
/*
* for target session: Set (DRV) ABORT
* 1. R_Port remove
* 2. Send PLOGI_ACC callback
* 3. RCVD PLOGI
* 4. RCVD LOGO
*/
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_xchg_s *xchg = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_rport_s *rport = NULL;
unsigned long xchg_lock_flags = 0;
rport = (struct unf_rport_s *)v_rport;
hot_pool = (struct unf_xchg_hot_pool_s *)v_hot_pool;
/* ABORT INI IO: INI_BUSY_LIST */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->ini_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags);
if ((v_did == xchg->did) && (v_sid == xchg->sid) &&
(rport == xchg->rport) &&
(atomic_read(&xchg->ref_cnt) > 0)) {
xchg->scsi_cmnd_info.result =
UNF_SCSI_HOST(DID_IMM_RETRY);
xchg->io_state |= INI_IO_STATE_DRABORT;
xchg->io_state |= v_extra_io_state;
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Abort INI:0x%p, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, %llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
(unsigned int)xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
}
static void unf_xchg_mgr_io_xchg_abort(void *v_lport, void *v_rport,
unsigned int v_sid, unsigned int v_did,
unsigned int v_extra_io_state)
{
/*
* for target session: set ABORT
* 1. R_Port remove
* 2. Send PLOGI_ACC callback
* 3. RCVD PLOGI
* 4. RCVD LOGO
*/
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct unf_lport_s *lport = NULL;
unsigned long pool_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x983, UNF_TRUE, v_lport, return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x984, UNF_TRUE, lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN,
UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) hot pool is NULL",
lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* 1. Clear INI (session) IO: INI Mode */
unf_ini_busy_io_xchg_abort(hot_pool, v_rport, v_sid,
v_did, v_extra_io_state);
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
static void unf_xchg_mgr_sfs_xchg_abort(void *v_lport, void *v_rport,
unsigned int v_sid, unsigned int v_did)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
struct list_head *xchg_node = NULL;
struct list_head *next_xchg_node = NULL;
struct unf_xchg_s *xchg = NULL;
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long pool_lock_flags = 0;
unsigned long xchg_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x991, UNF_TRUE, (v_lport), return);
lport = ((struct unf_lport_s *)v_lport)->root_lport;
UNF_CHECK_VALID(0x992, UNF_TRUE, (lport), return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(lport, i);
if (!hot_pool) {
UNF_TRACE(UNF_EVTLOG_IO_INFO,
UNF_LOG_IO_ATT, UNF_MAJOR,
"Port(0x%x) Hot Pool is NULL.",
lport->port_id);
continue;
}
rport = (struct unf_rport_s *)v_rport;
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
/* Clear the SFS exchange of the corresponding connection */
list_for_each_safe(xchg_node, next_xchg_node,
&hot_pool->sfs_busylist) {
xchg = list_entry(xchg_node, struct unf_xchg_s,
list_xchg_entry);
spin_lock_irqsave(&xchg->xchg_state_lock,
xchg_lock_flags);
if ((v_did == xchg->did) && (v_sid == xchg->sid) &&
(rport == xchg->rport) &&
(atomic_read(&xchg->ref_cnt) > 0)) {
xchg->io_state |= TGT_IO_STATE_ABORT;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT,
UNF_MAJOR,
"Abort SFS:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.",
xchg,
(unsigned int)xchg->hot_pool_tag,
(unsigned int)xchg->xchg_type,
(unsigned int)xchg->ox_id,
(unsigned int)xchg->rx_id,
(unsigned int)xchg->sid,
(unsigned int)xchg->did,
(unsigned int)xchg->io_state,
atomic_read(&xchg->ref_cnt),
xchg->alloc_jif);
}
spin_unlock_irqrestore(&xchg->xchg_state_lock,
xchg_lock_flags);
}
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x959, UNF_TRUE, v_lport, return UNF_RETURN_ERROR);
v_lport->xchg_mgr_temp.pfn_unf_xchg_get_free_and_init =
unf_get_new_xchg;
v_lport->xchg_mgr_temp.pfn_unf_xchg_release = unf_free_xchg;
v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag =
unf_lookup_xchg_by_tag;
v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_id =
unf_find_xchg_by_oxid;
v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer =
unf_fc_xchg_add_timer;
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer =
unf_xchg_cancel_timer;
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io =
unf_xchg_abort_all_xchg;
v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_cmnd_sn =
unf_lookup_xchg_by_cmnd_sn;
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun =
unf_xchg_abort_by_lun;
v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session =
unf_xchg_abort_by_session;
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort =
unf_xchg_mgr_io_xchg_abort;
v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort =
unf_xchg_mgr_sfs_xchg_abort;
return RETURN_OK;
}
void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport,
enum int_e v_wait_state)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long pool_lock_flags = 0;
unsigned int i = 0;
UNF_CHECK_VALID(0x965, UNF_TRUE, v_lport, return);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
hot_pool = unf_get_hot_pool_by_lport(v_lport, i);
if (unlikely(!hot_pool)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) hot pool is NULL",
v_lport->port_id);
continue;
}
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
hot_pool->wait_state = v_wait_state;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
}
unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage)
{
struct unf_xchg_hot_pool_s *hot_pool = NULL;
unsigned long flags = 0;
unsigned int ret = UNF_RETURN_ERROR;
UNF_CHECK_VALID(0x967, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR);
if (unlikely(v_xchg->debug_hook == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OX_ID_RX_ID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Stage(%s)",
v_xchg, v_xchg->io_state, v_xchg->sid,
v_xchg->did, v_xchg->ox_id, v_xchg->rx_id,
v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt),
io_stage[v_io_stage].stage);
}
hot_pool = v_xchg->hot_pool;
UNF_CHECK_VALID(0x968, UNF_TRUE, hot_pool, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_io_stage);
/* Exchange -> Hot Pool Tag check */
if (unlikely((v_xchg->hot_pool_tag >=
(hot_pool->slab_total_sum + hot_pool->base)) ||
(v_xchg->hot_pool_tag < hot_pool->base))) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Xchg(0x%p) S_ID(%xh) D_ID(0x%x) hot_pool_tag(0x%x) is bigger than slab total num(0x%x) base(0x%x)",
v_xchg, v_xchg->sid, v_xchg->did,
v_xchg->hot_pool_tag,
hot_pool->slab_total_sum + hot_pool->base,
hot_pool->base);
return UNF_RETURN_ERROR;
}
/* atomic read & inc */
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
if (unlikely(atomic_read(&v_xchg->ref_cnt) <= 0)) {
ret = UNF_RETURN_ERROR;
} else {
if (unf_get_xchg_by_xchg_tag(hot_pool,
v_xchg->hot_pool_tag -
hot_pool->base) ==
v_xchg) {
atomic_inc(&v_xchg->ref_cnt);
ret = RETURN_OK;
} else {
ret = UNF_RETURN_ERROR;
}
}
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
return ret;
}
void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage)
{
/* Atomic dec ref_cnt & test, free exchange
* if necessary (ref_cnt==0)
*/
struct unf_xchg_hot_pool_s *hot_pool = NULL;
void (*pfn_free_xchg)(struct unf_xchg_s *) = NULL;
unsigned long flags = 0;
unsigned long xchg_lock_flags = 0;
UNF_CHECK_VALID(0x969, UNF_TRUE, (v_xchg), return);
if (v_xchg->debug_hook == UNF_TRUE) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OXID_RXID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Statge %s",
v_xchg, v_xchg->io_state, v_xchg->sid,
v_xchg->did, v_xchg->ox_id, v_xchg->rx_id,
v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt),
io_stage[v_io_stage].stage);
}
hot_pool = v_xchg->hot_pool;
UNF_CHECK_VALID(0x970, UNF_TRUE, hot_pool, return);
UNF_CHECK_VALID(0x970, UNF_TRUE,
v_xchg->hot_pool_tag >= hot_pool->base, return);
UNF_REFERNCE_VAR(v_io_stage);
/*
* 1. Atomic dec & test
* 2. Free exchange if necessary (ref_cnt == 0)
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, xchg_lock_flags);
if (atomic_dec_and_test(&v_xchg->ref_cnt)) {
pfn_free_xchg = v_xchg->pfn_free_xchg;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock,
xchg_lock_flags);
spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags);
unf_hot_pool_slab_set(hot_pool,
v_xchg->hot_pool_tag - hot_pool->base,
NULL);
/* Delete exchange list entry */
list_del_init(&v_xchg->list_xchg_entry);
hot_pool->total_xchges--;
spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags);
// unf_free_fcp_xchg --->>> unf_done_ini_xchg
if (pfn_free_xchg)
pfn_free_xchg(v_xchg);
} else {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock,
xchg_lock_flags);
}
}
bool unf_busy_io_completed(struct unf_lport_s *v_lport)
{
struct unf_xchg_mgr_s *xchg_mgr = NULL;
unsigned long pool_lock_flags = 0;
unsigned int i;
UNF_CHECK_VALID(0x5841, UNF_TRUE, v_lport, return UNF_TRUE);
for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) {
xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i);
if (unlikely(!xchg_mgr)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) Exchange Manager is NULL",
v_lport->port_id);
continue;
}
spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
if (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT,
UNF_INFO, "[info]Port(0x%x) ini busylist is not empty.",
v_lport->port_id);
spin_unlock_irqrestore(
&xchg_mgr->hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
return UNF_FALSE;
}
spin_unlock_irqrestore(
&xchg_mgr->hot_pool->xchg_hot_pool_lock,
pool_lock_flags);
}
return UNF_TRUE;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_FCEXCH_H__
#define __UNF_FCEXCH_H__
#include "unf_scsi_common.h"
#include "unf_lport.h"
#define DRV_VERIFY_CRC_MASK (1 << 1)
#define DRV_VERIFY_APP_MASK (1 << 2)
#define DRV_VERIFY_LBA_MASK (1 << 3)
#define DRV_DIF_CRC_POS 0
#define DRV_DIF_CRC_LEN 2
#define DRV_DIF_APP_POS 2
#define DRV_DIF_APP_LEN 2
#define DRV_DIF_LBA_POS 4
#define DRV_DIF_LBA_LEN 4
enum unf_ioflow_id_e {
XCHG_ALLOC = 0,
TGT_RECEIVE_ABTS,
TGT_ABTS_DONE,
TGT_IO_SRR,
SFS_RESPONSE,
SFS_TIMEOUT,
INI_SEND_CMND,
INI_RESPONSE_DONE,
INI_EH_ABORT,
INI_EH_DEVICE_RESET,
INI_EH_BLS_DONE,
INI_IO_TIMEOUT,
INI_REQ_TIMEOUT,
XCHG_CANCEL_TIMER,
XCHG_FREE_XCHG,
SEND_ELS,
IO_XCHG_WAIT,
XCHG_BUTT
};
enum unf_xchg_type_e {
UNF_XCHG_TYPE_INI = 0, /* INI IO */
UNF_XCHG_TYPE_SFS = 1, /* SFS IO */
UNF_XCHG_TYPE_INVALID
};
enum unf_xchg_mgr_type_e {
UNF_XCHG_MGR_TYPE_RANDOM = 0,
UNF_XCHG_MGR_TYPE_FIXED = 1,
UNF_XCHG_MGR_TYPE_INVALID
};
enum tgt_io_xchg_send_stage_e {
TGT_IO_SEND_STAGE_NONE = 0,
TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */
TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */
TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */
TGT_IO_SEND_STAGE_INVALID
};
enum tgt_io_send_result_e {
TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */
TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */
TGT_IO_SEND_RESULT_INVALID
};
struct unf_ioflow_id_s {
char *stage;
};
#define UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg) \
((v_oxid == xchg->ox_id) && (v_oid == xchg->oid) && \
(atomic_read(&xchg->ref_cnt) > 0))
#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \
xchg_alloc_time) \
do { \
if (unlikely((pkg_alloc_time != 0) && \
(pkg_alloc_time != xchg_alloc_time))) { \
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, \
UNF_ERR, \
"Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not equal,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \
lport->port_id, lport->nport_id, \
xchg_tag, exchg, \
pkg_alloc_time, xchg_alloc_time); \
return UNF_RETURN_ERROR; \
}; \
if (unlikely(pkg_alloc_time == 0)) { \
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, \
UNF_MAJOR, \
"Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \
lport->port_id, lport->nport_id, \
xchg_tag, exchg, \
pkg_alloc_time, xchg_alloc_time); \
}; \
} while (0)
#define UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code) \
do { \
if (DRV_VERIFY_CRC_MASK & \
v_xchg->dif_control.protect_opcode) { \
if (memcmp(&dif_control->actual_dif[DRV_DIF_CRC_POS], \
&dif_control->expected_dif[DRV_DIF_CRC_POS], \
DRV_DIF_CRC_LEN) != 0) { \
tgt_err_code = default_err_code; \
} \
} \
} while (0)
#define UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code) \
do { \
if ((check_err_code == tgt_err_code) && \
(DRV_VERIFY_LBA_MASK & v_xchg->dif_control.protect_opcode)) { \
if (memcmp(&dif_control->actual_dif[DRV_DIF_LBA_POS], \
&dif_control->expected_dif[DRV_DIF_LBA_POS], \
DRV_DIF_LBA_LEN) != 0) { \
tgt_err_code = default_err_code; \
} \
} \
} while (0)
#define UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code) \
UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \
tgt_err_code, default_err_code)
#define UNF_SET_SCSI_CMND_RESULT(v_xchg, v_result) \
((v_xchg)->scsi_cmnd_info.result = (v_result))
#define UNF_GET_GS_SFS_XCHG_TIMER(v_lport) (3 * \
(unsigned long)(v_lport)->ra_tov)
#define UNF_GET_BLS_SFS_XCHG_TIMER(v_lport) (2 * \
(unsigned long)(v_lport)->ra_tov)
#define UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) (2 * \
(unsigned long)(v_lport)->ra_tov)
#define UNF_XCHG_MGR_FC 0
#define UNF_XCHG_MIN_XID 0x0000
#define UNF_XCHG_MAX_XID 0xffff
#define UNF_ELS_ECHO_RESULT_OK 0
#define UNF_ELS_ECHO_RESULT_FAIL 1
struct unf_xchg_s;
/* Xchg hot pool, busy IO lookup Xchg */
struct unf_xchg_hot_pool_s {
/* Xchg sum, in hot pool */
unsigned short total_xchges;
/* Total number of resources consumedcorresponding to buffer */
unsigned int total_res_cnt;
enum int_e wait_state;
/* pool lock */
spinlock_t xchg_hot_pool_lock;
/* Xchg posiontion list */
struct list_head sfs_busylist;
struct list_head ini_busylist;
struct list_head list_destroy_xchg;
/* Next free hot point */
unsigned short slab_next_index;
unsigned short slab_total_sum;
unsigned short base;
struct unf_lport_s *lport;
struct unf_xchg_s *xchg_slab[0];
};
/* FREE POOL of Xchg*/
struct unf_xchg_free_pool_s {
spinlock_t xchg_free_pool_lock;
unsigned int fcp_xchg_sum;
/* IO used Xchg */
struct list_head list_free_xchg_list;
unsigned int total_fcp_xchg;
/* SFS used Xchg */
struct list_head list_sfs_xchg_list;
unsigned int total_sfs_xchg;
unsigned int sfs_xchg_sum;
struct completion *xchg_mgr_completion;
};
struct unf_big_sfs_s {
struct list_head entry_big_sfs;
void *vaddr;
unsigned int size;
};
struct unf_big_sfs_pool_s {
void *big_sfs_pool;
unsigned int free_count;
struct list_head list_free_pool;
struct list_head list_busy_pool;
spinlock_t big_sfs_pool_lock;
};
/* Xchg Manager for vport Xchg */
struct unf_xchg_mgr_s {
/* MG type */
unsigned int mgr_type;
/* MG entry */
struct list_head xchg_mgr_entry;
/* MG attribution */
unsigned short min_xid;
unsigned short max_xid;
unsigned int mem_size;
/* MG alloced resource */
void *fcp_mm_start;
unsigned int sfs_mem_size;
void *sfs_mm_start;
dma_addr_t sfs_phy_addr;
struct unf_xchg_free_pool_s free_pool;
struct unf_xchg_hot_pool_s *hot_pool;
struct unf_big_sfs_pool_s st_big_sfs_pool;
struct buf_describe_s big_sfs_buf_list;
struct buf_describe_s rsp_buf_list;
};
struct unf_seq_s {
/* Seq ID */
unsigned char seq_id;
/* Seq Cnt */
unsigned short seq_cnt;
/* Seq state and len,maybe used for fcoe */
unsigned short seq_stat;
unsigned int rec_data_len;
};
union unf_xchg_fcp_sfs_u {
struct unf_sfs_entry_s sfs_entry;
struct unf_fcp_rsp_iu_entry_s fcp_rsp_entry;
};
#define UNF_IO_STATE_NEW 0
#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */
#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */
#define TGT_IO_STATE_ABORT (1 << 7)
/* INI Upper-layer Task Management Commands */
#define INI_IO_STATE_UPTASK (1 << 15)
/* INI Upper-layer timeout Abort flag */
#define INI_IO_STATE_UPABORT (1 << 16)
#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */
#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */
#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */
#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */
/* INI only clear firmware resource flag */
#define INI_IO_STATE_ABORT_RESOURCE (1 << 21)
/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */
#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22)
#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */
/* INI busy IO session logo status */
#define INI_IO_STATE_LOGO (1 << 24)
#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */
#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */
#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */
#define TMF_RESPONSE_RECEIVED (1 << 0)
#define MARKER_STS_RECEIVED (1 << 1)
#define ABTS_RESPONSE_RECEIVED (1 << 2)
struct unf_scsi_cmd_info_s {
unsigned long time_out;
unsigned long abort_timeout;
void *scsi_cmnd;
void (*pfn_done)(struct unf_scsi_cmd_s *);
ini_get_sgl_entry_buf pfn_unf_get_sgl_entry_buf;
struct unf_ini_error_code_s *err_code_table; /* error code table */
char *sense_buf;
unsigned int err_code_table_cout; /* Size of the error code table */
unsigned int buf_len;
unsigned int entry_cnt;
unsigned int result; /* Stores command execution results */
unsigned int port_id;
/* Re-search for rport based on scsiid during retry. Otherwise,
* data inconsistency will occur
*/
unsigned int scsi_id;
void *sgl;
};
struct unf_req_sgl_info_s {
void *sgl;
void *sgl_start;
unsigned int req_index;
unsigned int entry_index;
};
struct unf_els_echo_info_s {
unsigned long long response_time;
struct semaphore echo_sync_sema;
unsigned int echo_result;
};
struct unf_xchg_s {
/* Mg resouce relative */
/* list delete from HotPool */
struct unf_xchg_hot_pool_s *hot_pool;
/* attach to FreePool */
struct unf_xchg_free_pool_s *free_pool;
struct unf_xchg_mgr_s *xchg_mgr;
struct unf_lport_s *lport; /* Local LPort/VLPort */
struct unf_rport_s *rport; /* Rmote Port */
struct unf_rport_s *disc_rport; /* Discover Rmote Port */
struct list_head list_xchg_entry;
struct list_head list_abort_xchg_entry;
spinlock_t xchg_state_lock;
/* Xchg reference */
atomic_t ref_cnt;
atomic_t esgl_cnt;
int debug_hook;
/* Xchg attribution */
unsigned short hot_pool_tag; /* Hot pool tag */
/* Only used for abort,ox_id
* lunrset/logo/plogi/linkdown set to 0xffff
*/
unsigned short abort_oxid;
unsigned int xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */
unsigned short ox_id;
unsigned short rx_id;
unsigned int sid;
unsigned int did;
unsigned int oid; /* ID of the exchange initiator */
unsigned int disc_port_id; /* Send GNN_ID/GFF_ID NPortId */
unsigned char seq_id;
unsigned char byte_orders; /* Byte order */
struct unf_seq_s seq;
unsigned int cmnd_code;
unsigned int world_id;
/* Dif control */
struct unf_dif_control_info_s dif_control;
struct dif_info_s dif_info;
/* IO status Abort,timer out */
unsigned int io_state; /* TGT_IO_STATE_E */
unsigned int tmf_state; /* TMF STATE */
unsigned int ucode_abts_state;
unsigned int abts_state;
/* IO Enqueuing */
enum tgt_io_xchg_send_stage_e io_send_stage; /* TGT_IO_SEND_STAGE_E */
/* IO Enqueuing result, success or failure */
enum tgt_io_send_result_e io_send_result; /* TGT_IO_SEND_RESULT_E */
/* Whether ABORT is delivered to the chip for IO */
unsigned char io_send_abort;
/* Result of delivering ABORT to the chip
* (success: UNF_TRUE; failure: UNF_FALSE)
*/
unsigned char io_abort_result;
/* for INI,Indicates the length of the data
* transmitted over the PCI link
*/
unsigned int data_len;
/* ResidLen,greater than 0 UnderFlow or Less than Overflow */
int resid_len;
/* +++++++++++++++++IO Special++++++++++++++++++++ */
/* point to tgt cmnd/req/scsi cmnd */
/* Fcp cmnd */
struct unf_fcp_cmnd_s fcp_cmnd;
struct unf_scsi_cmd_info_s scsi_cmnd_info;
struct unf_req_sgl_info_s req_sgl_info;
struct unf_req_sgl_info_s dif_sgl_info;
unsigned long long cmnd_sn;
/* timestamp */
unsigned long long start_jif;
unsigned long long alloc_jif;
unsigned long long io_front_jif;
/* I/O resources to be consumed,Corresponding to buffer */
unsigned int may_consume_res_cnt;
/* Number of resources consumed by I/Os. The value is not zero
* only when it is sent to the chip
*/
unsigned int fact_consume_res_cnt;
/* scsi req info */
unsigned int data_direction;
struct unf_big_sfs_s *big_sfs_buf;
/* scsi cmnd sense_buffer pointer */
union unf_xchg_fcp_sfs_u fcp_sfs_union;
/* One exchange may use several External Sgls */
struct list_head list_esgls;
struct unf_els_echo_info_s echo_info;
/* +++++++++++++++++Task Special++++++++++++++++++++ */
struct semaphore task_sema;
/* for RRQ ,IO Xchg add to SFS Xchg */
void *io_xchg;
/* Xchg delay work */
struct delayed_work timeout_work;
/* send result callback */
void (*pfn_ob_callback)(struct unf_xchg_s *);
/*Response IO callback */
void (*pfn_callback)(void *v_lport,
void *v_rport,
void *v_xchg);
/* Xchg release function */
void (*pfn_free_xchg)(struct unf_xchg_s *);
/* +++++++++++++++++low level Special++++++++++++++++++++ */
unsigned int private[PKG_MAX_PRIVATE_DATA_SIZE];
/* ABTS_RSP info */
struct unf_abts_rsps_s abts_rsps;
unsigned long long rport_bind_jifs;
/* sfs exchg ob callback status */
unsigned int ob_callback_sts;
unsigned int scsi_id;
atomic_t delay_flag;
void *upper_ct;
};
struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg);
void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport);
unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport);
unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport);
void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport);
void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport);
unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage);
void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg,
enum unf_ioflow_id_e v_io_stage);
struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport,
unsigned int);
struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport(
struct unf_lport_s *v_lport, unsigned int);
void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr,
int v_done_ini_flag);
struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn(
void *v_lport,
unsigned long long v_command_sn,
unsigned int v_world_id);
void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_oxid,
unsigned int v_oid);
void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned long long v_lun_id,
void *v_tm_xchg, int v_abort_all_lun_flag);
void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid,
unsigned int v_did,
unsigned int extra_io_stat);
void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int v_sid,
unsigned int v_did);
void unf_cm_free_xchg(void *v_lport, void *v_xchg);
void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type);
void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag);
void unf_release_esgls(struct unf_xchg_s *v_xchg);
void unf_show_all_xchg(struct unf_lport_s *v_lport,
struct unf_xchg_mgr_s *v_xchg_mgr);
void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only);
void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport);
void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport,
enum int_e v_wait_state);
void unf_free_lport_all_xchg(struct unf_lport_s *v_lport);
bool unf_busy_io_completed(struct unf_lport_s *v_lport);
#endif
因为 它太大了无法显示 source diff 。你可以改为 查看blob
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_SERVICE_H__
#define __UNF_SERVICE_H__
extern unsigned int max_frame_size;
#define UNF_SET_ELS_ACC_TYPE(v_els_cmd) \
((unsigned int)(v_els_cmd) << 16 | ELS_ACC)
#define UNF_SET_ELS_RJT_TYPE(v_els_cmd) \
((unsigned int)(v_els_cmd) << 16 | ELS_RJT)
unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
unsigned int unf_send_gff_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
unsigned int unf_send_flogi(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_fdisc(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_plogi(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_prli(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_receive_els_pkg(void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg);
unsigned int unf_send_rff_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_rft_id(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_logo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_echo(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
unsigned int *v_time);
unsigned int unf_send_abts(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg);
unsigned int unf_send_scr(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_send_rrq(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg);
void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_receive_bls_pkg(void *v_lport,
struct unf_frame_pkg_s *v_pkg);
struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport,
unsigned int v_rport_nport_id,
unsigned long long v_port_name);
void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int alpa);
unsigned int unf_receive_gs_pkg(void *v_lport,
struct unf_frame_pkg_s *v_fra_pkg);
void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport,
struct unf_rport_s *v_sns_port,
unsigned int v_nport_id);
void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport,
unsigned int v_nport_id);
void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport,
unsigned int v_nport_id);
unsigned int unf_release_rport_res(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport);
unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport);
unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg);
unsigned int unf_send_rec(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg);
typedef int (*unf_evt_task)(void *v_arg_in, void *v_arg_out);
#endif /* __UNF_SERVICE_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册