提交 e1ab4d02 编写于 作者: D denglei 提交者: Zheng Zengkai

scsi/hifc: add io module of hifc driver

driver inclusion
category: feature
bugzilla: 21

-----------------------------------------------------------------------

This module is mainly used to process I/O commands from the SCSI layer
and the exception mechanism.
1.read/write IO to chip.
2.Handling I/O Exceptions.
Signed-off-by: Ndenglei <denglei25@huawei.com>
Reviewed-by: Nchenguangli <chenguangli2@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 825fba35
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "hifc_module.h"
#include "hifc_hba.h"
#include "hifc_service.h"
#include "hifc_io.h"
/* Whether to enable the payload printing
* function depends on the content of exchange
*/
#ifdef HIFC_PRINT_PAYLOADINFO_ENABLE
#include "unf_exchg.h"
#endif
/* Set this parameter based on EDTOV 2S */
#define HIFC_IMMIDATA_ABORT_TIME 2000
#define hifc_fill_pkg_status(com_err_code, control, scsi_status) \
(((unsigned int)(com_err_code) << 16) |\
((unsigned int)(control) << 8) |\
(unsigned int)(scsi_status))
unsigned int dif_protect_op_code = INVALID_VALUE32;
unsigned int dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
unsigned int dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK;
unsigned int dif_sect_size;
unsigned int no_dif_sect_size;
unsigned int dix_flag;
unsigned int grd_ctrl;
unsigned int grd_agm_ctrl = HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16;
unsigned int cmp_app_tag_mask = 0xffff;
unsigned int ref_tag_mod = INVALID_VALUE32;
unsigned int rep_ref_tag;
unsigned short cmp_app_tag;
unsigned short rep_app_tag;
static void hifc_dif_err_count(struct hifc_hba_s *v_hba,
unsigned char v_dif_info)
{
unsigned char dif_info = v_dif_info;
HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_ALL);
if (dif_info & HIFC_DIF_ERROR_CODE_CRC)
HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_CRC);
if (dif_info & HIFC_DIF_ERROR_CODE_APP)
HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_APP);
if (dif_info & HIFC_DIF_ERROR_CODE_REF)
HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_REF);
}
static void hifc_build_no_dif_control(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_fc_dif_info_s *v_dif_info)
{
struct hifcoe_fc_dif_info_s *dif_info = v_dif_info;
/* dif enable or disable */
dif_info->wd0.difx_en = HIFC_DIF_DISABLE;
dif_info->wd1.vpid = v_pkg->qos_level;
dif_info->wd1.lun_qos_en = 0;
}
void hifc_dif_action_forward(struct hifcoe_fc_dif_info_s *v_dif_info_l1,
struct unf_dif_control_info_s *v_dif_ctrl_u1)
{
v_dif_info_l1->wd0.grd_ctrl |=
(v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) ?
HIFC_DIF_GARD_REF_APP_CTRL_VERIFY :
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
v_dif_info_l1->wd0.grd_ctrl |= (v_dif_ctrl_u1->protect_opcode &
UNF_REPLACE_CRC_MASK) ? HIFC_DIF_GARD_REF_APP_CTRL_REPLACE :
HIFC_DIF_GARD_REF_APP_CTRL_FORWARD;
v_dif_info_l1->wd0.ref_tag_ctrl |=
(v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) ?
HIFC_DIF_GARD_REF_APP_CTRL_VERIFY :
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
v_dif_info_l1->wd0.ref_tag_ctrl |=
(v_dif_ctrl_u1->protect_opcode & UNF_REPLACE_LBA_MASK) ?
HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : HIFC_DIF_GARD_REF_APP_CTRL_FORWARD;
v_dif_info_l1->wd1.app_tag_ctrl |= (v_dif_ctrl_u1->protect_opcode &
UNF_VERIFY_APP_MASK) ? HIFC_DIF_GARD_REF_APP_CTRL_VERIFY :
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
v_dif_info_l1->wd1.app_tag_ctrl |=
(v_dif_ctrl_u1->protect_opcode & UNF_REPLACE_APP_MASK) ?
HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : HIFC_DIF_GARD_REF_APP_CTRL_FORWARD;
}
void hifc_dif_action_delete(struct hifcoe_fc_dif_info_s *v_dif_info_l1,
struct unf_dif_control_info_s *v_dif_ctrl_u1)
{
v_dif_info_l1->wd0.grd_ctrl |=
(v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) ?
HIFC_DIF_GARD_REF_APP_CTRL_VERIFY :
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
v_dif_info_l1->wd0.grd_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE;
v_dif_info_l1->wd0.ref_tag_ctrl |=
(v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) ?
HIFC_DIF_GARD_REF_APP_CTRL_VERIFY :
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
v_dif_info_l1->wd0.ref_tag_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE;
v_dif_info_l1->wd1.app_tag_ctrl |=
(v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_APP_MASK) ?
HIFC_DIF_GARD_REF_APP_CTRL_VERIFY :
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
v_dif_info_l1->wd1.app_tag_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE;
}
static void hifc_convert_dif_action(
struct unf_dif_control_info_s *v_dif_ctrl_u1,
struct hifcoe_fc_dif_info_s *v_dif_info_l1)
{
struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL;
struct unf_dif_control_info_s *dif_ctrl_u1 = NULL;
dif_info_l1 = v_dif_info_l1;
dif_ctrl_u1 = v_dif_ctrl_u1;
switch (UNF_DIF_ACTION_MASK & dif_ctrl_u1->protect_opcode) {
case UNF_DIF_ACTION_VERIFY_AND_REPLACE:
case UNF_DIF_ACTION_VERIFY_AND_FORWARD:
hifc_dif_action_forward(dif_info_l1, dif_ctrl_u1);
break;
case UNF_DIF_ACTION_INSERT:
dif_info_l1->wd0.grd_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
dif_info_l1->wd0.grd_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_INSERT;
dif_info_l1->wd0.ref_tag_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
dif_info_l1->wd0.ref_tag_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_INSERT;
dif_info_l1->wd1.app_tag_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY;
dif_info_l1->wd1.app_tag_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_INSERT;
break;
case UNF_DIF_ACTION_VERIFY_AND_DELETE:
hifc_dif_action_delete(dif_info_l1, dif_ctrl_u1);
break;
default:
HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"Unknown dif protect opcode 0x%x",
dif_ctrl_u1->protect_opcode);
break;
}
}
void hifc_get_dif_info_l1(struct hifcoe_fc_dif_info_s *v_dif_info_l1,
struct unf_dif_control_info_s *v_dif_ctrl_u1)
{
v_dif_info_l1->wd1.cmp_app_tag_msk = cmp_app_tag_mask;
v_dif_info_l1->rep_app_tag = v_dif_ctrl_u1->app_tag;
v_dif_info_l1->rep_ref_tag = v_dif_ctrl_u1->start_lba;
v_dif_info_l1->cmp_app_tag = v_dif_ctrl_u1->app_tag;
v_dif_info_l1->cmp_ref_tag = v_dif_ctrl_u1->start_lba;
if (cmp_app_tag != 0)
v_dif_info_l1->cmp_app_tag = cmp_app_tag;
if (rep_app_tag != 0)
v_dif_info_l1->rep_app_tag = rep_app_tag;
if (rep_ref_tag != 0)
v_dif_info_l1->rep_ref_tag = rep_ref_tag;
}
static void hifc_build_dif_control(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifcoe_fc_dif_info_s *v_dif_info_l1)
{
struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL;
struct unf_dif_control_info_s *dif_ctrl_u1 = NULL;
dif_info_l1 = v_dif_info_l1;
dif_ctrl_u1 = &v_pkg->dif_control;
/* dif enable or disable */
dif_info_l1->wd0.difx_en = HIFC_DIF_ENABLE;
dif_info_l1->wd1.vpid = v_pkg->qos_level;
dif_info_l1->wd1.lun_qos_en = 0;
/* 512B + 8 size mode */
dif_info_l1->wd0.sct_size =
(dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ?
HIFC_DIF_SECTOR_4KB_MODE : HIFC_DIF_SECTOR_512B_MODE;
no_dif_sect_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ?
HIFC_SECT_SIZE_4096 : HIFC_SECT_SIZE_512;
dif_sect_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ?
HIFC_SECT_SIZE_4096_8 : HIFC_SECT_SIZE_512_8;
/* The length is adjusted when the burst len is adjusted.
* The length is initialized to 0
*/
dif_info_l1->wd0.difx_len = 0;
/* dif type 1 */
dif_info_l1->wd0.dif_verify_type = dif_type;
dif_info_l1->wd0.dif_ins_rep_type = dif_type;
/* Check whether the 0xffff app or ref domain is isolated
* If all ff messages are displayed in type1 app, checkcheck sector
* v_dif_info_l1->wd0.difx_app_esc = HIFC_DIF_APP_REF_ESC_CHECK
*/
dif_info_l1->wd0.difx_app_esc = dif_app_esc_check;
/* type1 ref tag If all ff is displayed, check sector is required */
dif_info_l1->wd0.difx_ref_esc = dif_ref_esc_check;
/* Currently, only t10 crc is supported */
dif_info_l1->wd0.grd_agm_ctrl = 0;
/* Set this parameter based on the values of bit zero and bit one.
* The initial value is 0, and the value is UNF_DEFAULT_CRC_GUARD_SEED
*/
dif_info_l1->wd0.grd_agm_ini_ctrl =
HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1;
dif_info_l1->wd1.app_tag_ctrl = 0;
dif_info_l1->wd0.grd_ctrl = 0;
dif_info_l1->wd0.ref_tag_ctrl = 0;
/* Convert the verify operation, replace, forward, insert,
* and delete operations based on the actual operation code of
* the upper layer
*/
if (dif_protect_op_code != INVALID_VALUE32) {
dif_ctrl_u1->protect_opcode = dif_protect_op_code |
(dif_ctrl_u1->protect_opcode & UNF_DIF_ACTION_MASK);
}
hifc_convert_dif_action(dif_ctrl_u1, dif_info_l1);
/* Address self-increase mode */
dif_info_l1->wd0.ref_tag_mode = (dif_ctrl_u1->protect_opcode &
UNF_DIF_ACTION_NO_INCREASE_REFTAG) ? (BOTH_NONE) : (BOTH_INCREASE);
if (ref_tag_mod != INVALID_VALUE32)
dif_info_l1->wd0.ref_tag_mode = ref_tag_mod;
/* This parameter is used only when type 3 is set to 0xffff. */
hifc_get_dif_info_l1(dif_info_l1, dif_ctrl_u1);
}
static unsigned int hifc_fill_external_sgl_page(
struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct unf_esgl_page_s *v_esgl_page,
unsigned int sge_num,
int v_direct,
unsigned int context_id,
unsigned int dif_flag)
{
unsigned int ret = UNF_RETURN_ERROR;
unsigned int index = 0;
unsigned int sge_num_per_page = 0;
unsigned int buffer_addr = 0;
unsigned int buf_len = 0;
char *buf = NULL;
unsigned long phys = 0;
struct unf_esgl_page_s *esgl_page = NULL;
struct hifcoe_variable_sge_s *sge = NULL;
esgl_page = v_esgl_page;
while (sge_num > 0) {
/* Obtains the initial address of the sge page */
sge = (struct hifcoe_variable_sge_s *)esgl_page->page_address;
/* Calculate the number of sge on each page */
sge_num_per_page = (esgl_page->page_size) /
sizeof(struct hifcoe_variable_sge_s);
/* Fill in sgl page. The last sge of each page is link sge
* by default
*/
for (index = 0; index < (sge_num_per_page - 1); index++) {
UNF_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf,
&buf_len, dif_flag);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
phys = (unsigned long)buf;
sge[index].buf_addr_hi = UNF_DMA_HI32(phys);
sge[index].buf_addr_lo = UNF_DMA_LO32(phys);
sge[index].wd0.buf_len = buf_len;
sge[index].wd0.r_flag = 0;
sge[index].wd1.extension_flag =
HIFC_WQE_SGE_NOT_EXTEND_FLAG;
sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG;
/* parity bit */
sge[index].wd1.buf_addr_gpa =
(sge[index].buf_addr_lo >> 16);
sge[index].wd1.xid = (context_id & 0x3fff);
hifc_cpu_to_big32(&sge[index],
sizeof(struct hifcoe_variable_sge_s));
sge_num--;
if (sge_num == 0)
break;
}
/* sge Set the end flag on the last sge of the page if all the
* pages have been filled.
*/
if (sge_num == 0) {
sge[index].wd1.extension_flag =
HIFC_WQE_SGE_NOT_EXTEND_FLAG;
sge[index].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG;
/* parity bit */
buffer_addr = be32_to_cpu(sge[index].buf_addr_lo);
sge[index].wd1.buf_addr_gpa = (buffer_addr >> 16);
sge[index].wd1.xid = (context_id & 0x3fff);
hifc_cpu_to_big32(&sge[index].wd1, HIFC_DWORD_BYTE);
}
/* If only one sge is left empty, the sge reserved on the page
* is used for filling.
*/
else if (sge_num == 1) {
UNF_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf,
&buf_len, dif_flag);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
phys = (unsigned long)buf;
sge[index].buf_addr_hi = UNF_DMA_HI32(phys);
sge[index].buf_addr_lo = UNF_DMA_LO32(phys);
sge[index].wd0.buf_len = buf_len;
sge[index].wd0.r_flag = 0;
sge[index].wd1.extension_flag =
HIFC_WQE_SGE_NOT_EXTEND_FLAG;
sge[index].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG;
/* parity bit */
sge[index].wd1.buf_addr_gpa =
(sge[index].buf_addr_lo >> 16);
sge[index].wd1.xid = (context_id & 0x3fff);
hifc_cpu_to_big32(&sge[index],
sizeof(struct hifcoe_variable_sge_s));
sge_num--;
} else {
/* Apply for a new sgl page and fill in link sge */
UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg);
if (!esgl_page) {
HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR,
UNF_LOG_REG_ATT, UNF_ERR, "Get free esgl page failed.");
return UNF_RETURN_ERROR;
}
phys = esgl_page->esgl_phyaddr;
sge[index].buf_addr_hi = UNF_DMA_HI32(phys);
sge[index].buf_addr_lo = UNF_DMA_LO32(phys);
/* For the cascaded wqe, you only need to enter the
* cascading buffer address and extension flag, and do
* not need to fill in other fields
*/
sge[index].wd0.buf_len = 0;
sge[index].wd0.r_flag = 0;
sge[index].wd1.extension_flag =
HIFC_WQE_SGE_EXTEND_FLAG;
sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG;
/* parity bit */
sge[index].wd1.buf_addr_gpa =
(sge[index].buf_addr_lo >> 16);
sge[index].wd1.xid = (context_id & 0x3fff);
hifc_cpu_to_big32(&sge[index],
sizeof(struct hifcoe_variable_sge_s));
}
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_INFO,
"Port(0x%x) SID(0x%x) DID(0x%x) RXID(0x%x) build esgl left sge num: %u.",
v_hba->port_cfg.port_id,
v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did,
v_pkg->frame_head.oxid_rxid,
sge_num);
}
return RETURN_OK;
}
static unsigned int hifc_build_local_dif_sgl(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
int v_direct,
unsigned int v_bd_sge_num)
{
unsigned int ret = UNF_RETURN_ERROR;
char *buf = NULL;
unsigned int buf_len = 0;
unsigned long phys = 0;
unsigned int dif_sge_place = 0;
struct hifc_parent_sq_info_s *parent_sq = NULL;
parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg);
if (unlikely(!parent_sq)) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.",
v_hba->port_cfg.port_id,
v_pkg->frame_head.oxid_rxid,
v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return UNF_RETURN_ERROR;
}
/* DIF SGE must be followed by BD SGE */
dif_sge_place = ((v_bd_sge_num <= v_pkg->entry_count) ?
v_bd_sge_num : v_pkg->entry_count);
/* The entry_count= 0 needs to be specially processed and does not
* need to be mounted. As long as len is set to zero, Last-bit is set
* to one, and E-bit is set to 0.
*/
if (v_pkg->dif_control.dif_sge_count == 0) {
v_sqe->sge[dif_sge_place].buf_addr_hi = 0;
v_sqe->sge[dif_sge_place].buf_addr_lo = 0;
v_sqe->sge[dif_sge_place].wd0.buf_len = 0;
} else {
UNF_CM_GET_DIF_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len);
if (ret != RETURN_OK) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, "DOUBLE DIF Get Dif Buf Fail.");
return UNF_RETURN_ERROR;
}
phys = (unsigned long)buf;
v_sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys);
v_sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys);
v_sqe->sge[dif_sge_place].wd0.buf_len = buf_len;
}
/* rdma flag. If the fc is not used, enter 0. */
v_sqe->sge[dif_sge_place].wd0.r_flag = 0;
/* parity bit */
v_sqe->sge[dif_sge_place].wd1.buf_addr_gpa =
(v_sqe->sge[dif_sge_place].buf_addr_lo >> 16);
v_sqe->sge[dif_sge_place].wd1.xid = (parent_sq->context_id & 0x3fff);
/* The local sgl does not use the cascading SGE. Therefore, the value
* of this field is always 0.
*/
v_sqe->sge[dif_sge_place].wd1.extension_flag =
HIFC_WQE_SGE_NOT_EXTEND_FLAG;
v_sqe->sge[dif_sge_place].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG;
hifc_cpu_to_big32(&v_sqe->sge[dif_sge_place],
sizeof(struct hifcoe_variable_sge_s));
return RETURN_OK;
}
static unsigned int hifc_build_external_dif_sgl(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
int v_direct,
unsigned int v_bd_sge_num)
{
unsigned int ret = UNF_RETURN_ERROR;
struct unf_esgl_page_s *esgl_page = NULL;
unsigned long phys = 0;
unsigned int left_sge_num = 0;
unsigned int dif_sge_place = 0;
struct hifc_parent_sq_info_s *parent_sq = NULL;
parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg);
if (unlikely(!parent_sq)) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.",
v_hba->port_cfg.port_id,
v_pkg->frame_head.oxid_rxid,
v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return UNF_RETURN_ERROR;
}
/* DIF SGE must be followed by BD SGE */
dif_sge_place = ((v_bd_sge_num <= v_pkg->entry_count) ?
v_bd_sge_num : v_pkg->entry_count);
/* Allocate the first page first */
UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg);
if (!esgl_page) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, "DOUBLE DIF Get External Page Fail.");
return UNF_RETURN_ERROR;
}
phys = esgl_page->esgl_phyaddr;
/* Configuring the Address of the Cascading Page */
v_sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys);
v_sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys);
/* Configuring Control Information About the Cascading Page */
v_sqe->sge[dif_sge_place].wd0.buf_len = 0;
v_sqe->sge[dif_sge_place].wd0.r_flag = 0;
v_sqe->sge[dif_sge_place].wd1.extension_flag = HIFC_WQE_SGE_EXTEND_FLAG;
v_sqe->sge[dif_sge_place].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG;
/* parity bit */
v_sqe->sge[dif_sge_place].wd1.buf_addr_gpa =
(v_sqe->sge[dif_sge_place].buf_addr_lo >> 16);
v_sqe->sge[dif_sge_place].wd1.xid = (parent_sq->context_id & 0x3fff);
hifc_cpu_to_big32(&v_sqe->sge[dif_sge_place],
sizeof(struct hifcoe_variable_sge_s));
/* Fill in the sge information on the cascading page */
left_sge_num = v_pkg->dif_control.dif_sge_count;
ret = hifc_fill_external_sgl_page(v_hba, v_pkg, esgl_page, left_sge_num,
v_direct, parent_sq->context_id,
UNF_TRUE);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
return RETURN_OK;
}
static unsigned int hifc_build_local_sgl(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
int v_direct)
{
unsigned int ret = UNF_RETURN_ERROR;
char *buf = NULL;
unsigned int buf_len = 0;
unsigned int index = 0;
unsigned long phys = 0;
struct hifc_parent_sq_info_s *parent_sq = NULL;
parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg);
if (unlikely(!parent_sq)) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[fail]Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.",
v_hba->port_cfg.port_id,
v_pkg->frame_head.oxid_rxid,
v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return UNF_RETURN_ERROR;
}
for (index = 0; index < v_pkg->entry_count; index++) {
UNF_CM_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
phys = (unsigned long)buf;
v_sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys);
v_sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys);
v_sqe->sge[index].wd0.buf_len = buf_len;
/* rdma flag. If the fc is not used, enter 0. */
v_sqe->sge[index].wd0.r_flag = 0;
/* parity bit */
v_sqe->sge[index].wd1.buf_addr_gpa =
(v_sqe->sge[index].buf_addr_lo >> 16);
v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff);
/* The local sgl does not use the cascading SGE. Therefore, the
* value of this field is always 0.
*/
v_sqe->sge[index].wd1.extension_flag =
HIFC_WQE_SGE_NOT_EXTEND_FLAG;
v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG;
if (index == (v_pkg->entry_count - 1)) {
/* Sets the last WQE end flag 1 */
v_sqe->sge[index].wd1.last_flag =
HIFC_WQE_SGE_LAST_FLAG;
}
hifc_cpu_to_big32(&v_sqe->sge[index],
sizeof(struct hifcoe_variable_sge_s));
}
/* Adjust the length of the BDSL field in the CTRL domain. */
HIFC_ADJUST_DATA(v_sqe->ctrl_sl.ch.wd0.bdsl,
HIFC_BYTES_TO_QW_NUM((v_pkg->entry_count *
sizeof(struct hifcoe_variable_sge_s))));
/* The entry_count= 0 needs to be specially processed and does not
* need to be mounted. As long as len is set to zero, Last-bit is set
* to one, and E-bit is set to 0.
*/
if (v_pkg->entry_count == 0) {
v_sqe->sge[0].buf_addr_hi = 0;
v_sqe->sge[0].buf_addr_lo = 0;
v_sqe->sge[0].wd0.buf_len = 0;
/* rdma flag. This field is not used in fc. Set it to 0. */
v_sqe->sge[0].wd0.r_flag = 0;
/* parity bit */
v_sqe->sge[0].wd1.buf_addr_gpa =
(v_sqe->sge[0].buf_addr_lo >> 16);
v_sqe->sge[0].wd1.xid = (parent_sq->context_id & 0x3fff);
/* The local sgl does not use the cascading SGE. Therefore,
* the value of this field is always 0.
*/
v_sqe->sge[0].wd1.extension_flag = HIFC_WQE_SGE_NOT_EXTEND_FLAG;
v_sqe->sge[0].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG;
hifc_cpu_to_big32(&v_sqe->sge[0],
sizeof(struct hifcoe_variable_sge_s));
/* Adjust the length of the BDSL field in the CTRL domain. */
HIFC_ADJUST_DATA(
v_sqe->ctrl_sl.ch.wd0.bdsl,
HIFC_BYTES_TO_QW_NUM(
sizeof(struct hifcoe_variable_sge_s)));
}
return RETURN_OK;
}
static unsigned int hifc_build_external_sgl(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
int v_direct,
unsigned int v_bd_sge_num)
{
unsigned int ret = UNF_RETURN_ERROR;
char *buf = NULL;
struct unf_esgl_page_s *esgl_page = NULL;
unsigned long phys = 0;
unsigned int buf_len = 0;
unsigned int index = 0;
unsigned int left_sge_num = 0;
unsigned int local_sge_num = 0;
struct hifc_parent_sq_info_s *parent_sq = NULL;
parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg);
if (unlikely(!parent_sq)) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.",
v_hba->port_cfg.port_id,
v_pkg->frame_head.oxid_rxid,
v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did);
return UNF_RETURN_ERROR;
}
/* Ensure that the value of v_bd_sge_num is greater than or equal to one
*/
local_sge_num = v_bd_sge_num - 1;
for (index = 0; index < local_sge_num; index++) {
UNF_CM_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len);
if (unlikely(ret != RETURN_OK))
return UNF_RETURN_ERROR;
phys = (unsigned long)buf;
v_sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys);
v_sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys);
v_sqe->sge[index].wd0.buf_len = buf_len;
/* RDMA flag, which is not used by FC. */
v_sqe->sge[index].wd0.r_flag = 0;
v_sqe->sge[index].wd1.extension_flag =
HIFC_WQE_SGE_NOT_EXTEND_FLAG;
v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG;
/* parity bit */
v_sqe->sge[index].wd1.buf_addr_gpa =
(v_sqe->sge[index].buf_addr_lo >> 16);
v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff);
hifc_cpu_to_big32(&v_sqe->sge[index],
sizeof(struct hifcoe_variable_sge_s));
}
/* Allocating the first cascading page */
UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg);
if (unlikely(!esgl_page))
return UNF_RETURN_ERROR;
phys = esgl_page->esgl_phyaddr;
/* Adjust the length of the BDSL field in the CTRL domain. */
HIFC_ADJUST_DATA(v_sqe->ctrl_sl.ch.wd0.bdsl,
HIFC_BYTES_TO_QW_NUM((v_bd_sge_num *
sizeof(struct hifcoe_variable_sge_s))));
/* Configuring the Address of the Cascading Page */
v_sqe->sge[index].buf_addr_hi = (u32)UNF_DMA_HI32(phys);
v_sqe->sge[index].buf_addr_lo = (u32)UNF_DMA_LO32(phys);
/* Configuring Control Information About the Cascading Page */
v_sqe->sge[index].wd0.buf_len = 0;
v_sqe->sge[index].wd0.r_flag = 0;
v_sqe->sge[index].wd1.extension_flag = HIFC_WQE_SGE_EXTEND_FLAG;
v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG;
/* parity bit */
v_sqe->sge[index].wd1.buf_addr_gpa =
(v_sqe->sge[index].buf_addr_lo >> 16);
v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff);
hifc_cpu_to_big32(&v_sqe->sge[index],
sizeof(struct hifcoe_variable_sge_s));
/* Calculate the number of remaining sge. */
left_sge_num = v_pkg->entry_count - local_sge_num;
/* Fill in the sge information on the cascading page. */
ret = hifc_fill_external_sgl_page(v_hba, v_pkg, esgl_page,
left_sge_num, v_direct,
parent_sq->context_id,
UNF_FALSE);
if (ret != RETURN_OK)
return UNF_RETURN_ERROR;
return RETURN_OK;
}
unsigned int hifc_build_sql_by_local_sge_num(struct unf_frame_pkg_s *v_pkg,
struct hifc_hba_s *v_hba,
struct hifcoe_sqe_s *v_sqe,
int v_direct,
unsigned int bd_sge_num)
{
unsigned int ret = RETURN_OK;
if (v_pkg->entry_count <= bd_sge_num) {
ret = hifc_build_local_sgl(v_hba, v_pkg, v_sqe, v_direct);
} else {
ret = hifc_build_external_sgl(v_hba, v_pkg, v_sqe,
v_direct, bd_sge_num);
}
return ret;
}
unsigned int hifc_conf_dual_sgl_info(struct unf_frame_pkg_s *v_pkg,
struct hifc_hba_s *v_hba,
struct hifcoe_sqe_s *v_sqe,
int v_direct,
unsigned int bd_sge_num,
int double_sgl)
{
unsigned int ret = RETURN_OK;
if (double_sgl == UNF_TRUE) {
/* Adjust the length of the DIF_SL field in the CTRL domain */
HIFC_ADJUST_DATA(
v_sqe->ctrl_sl.ch.wd0.dif_sl,
HIFC_BYTES_TO_QW_NUM(
sizeof(struct hifcoe_variable_sge_s)));
if (v_pkg->dif_control.dif_sge_count <=
HIFC_WQE_SGE_DIF_ENTRY_NUM) {
ret = hifc_build_local_dif_sgl(v_hba, v_pkg, v_sqe,
v_direct, bd_sge_num);
} else {
ret = hifc_build_external_dif_sgl(v_hba, v_pkg, v_sqe,
v_direct, bd_sge_num);
}
}
return ret;
}
static unsigned int hifc_build_sgl(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
int v_direct,
unsigned int dif_flag)
{
unsigned int ret = RETURN_OK;
unsigned int bd_sge_num = HIFC_WQE_SGE_ENTRY_NUM;
int double_sgl = UNF_FALSE;
if ((dif_flag != 0) &&
(v_pkg->dif_control.flags & UNF_DIF_DOUBLE_SGL)) {
bd_sge_num =
HIFC_WQE_SGE_ENTRY_NUM - HIFC_WQE_SGE_DIF_ENTRY_NUM;
double_sgl = UNF_TRUE;
}
/* Only one wqe local sge can be loaded. If more than one wqe local sge
* is used, use the esgl
*/
ret = hifc_build_sql_by_local_sge_num(v_pkg, v_hba, v_sqe,
v_direct, bd_sge_num);
if (unlikely(ret != RETURN_OK))
return ret;
/* Configuring Dual SGL Information for DIF */
ret = hifc_conf_dual_sgl_info(v_pkg, v_hba, v_sqe, v_direct,
bd_sge_num, double_sgl);
return ret;
}
static void hifc_adjust_dix(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_fc_dif_info_s *v_dif_info_l1,
unsigned char v_task_type)
{
unsigned char task_type = v_task_type;
struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL;
dif_info_l1 = v_dif_info_l1;
if (dix_flag == 1) {
if ((task_type == HIFC_SQE_FCP_IWRITE) ||
(task_type == HIFC_SQE_FCP_TRD)) {
if ((UNF_DIF_ACTION_MASK &
(v_pkg->dif_control.protect_opcode)) ==
UNF_DIF_ACTION_VERIFY_AND_FORWARD) {
dif_info_l1->wd0.grd_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_REPLACE;
dif_info_l1->wd0.grd_agm_ctrl =
HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16;
}
if ((UNF_DIF_ACTION_MASK &
(v_pkg->dif_control.protect_opcode)) ==
UNF_DIF_ACTION_VERIFY_AND_DELETE) {
dif_info_l1->wd0.grd_agm_ctrl =
HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16;
}
}
if ((task_type == HIFC_SQE_FCP_IREAD) ||
(task_type == HIFC_SQE_FCP_TWR)) {
if ((UNF_DIF_ACTION_MASK &
(v_pkg->dif_control.protect_opcode)) ==
UNF_DIF_ACTION_VERIFY_AND_FORWARD) {
dif_info_l1->wd0.grd_ctrl |=
HIFC_DIF_GARD_REF_APP_CTRL_REPLACE;
dif_info_l1->wd0.grd_agm_ctrl =
HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM;
}
if ((UNF_DIF_ACTION_MASK &
(v_pkg->dif_control.protect_opcode)) ==
UNF_DIF_ACTION_INSERT) {
dif_info_l1->wd0.grd_agm_ctrl =
HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM;
}
}
}
if (grd_agm_ctrl != 0)
dif_info_l1->wd0.grd_agm_ctrl = grd_agm_ctrl;
if (grd_ctrl != 0)
dif_info_l1->wd0.grd_ctrl = grd_ctrl;
}
void hifc_get_dma_direction_by_fcp_cmnd(const struct unf_fcp_cmnd_s *v_fcp_cmnd,
int *v_pi_dma_direction,
unsigned char *v_task_type)
{
if (UNF_FCP_WR_DATA & v_fcp_cmnd->control) {
*v_task_type = HIFC_SQE_FCP_IWRITE;
*v_pi_dma_direction = DMA_TO_DEVICE;
} else if (UNF_GET_TASK_MGMT_FLAGS(v_fcp_cmnd->control) != 0) {
*v_task_type = HIFC_SQE_FCP_ITMF;
*v_pi_dma_direction = DMA_FROM_DEVICE;
} else {
*v_task_type = HIFC_SQE_FCP_IREAD;
*v_pi_dma_direction = DMA_FROM_DEVICE;
}
}
static void hifc_adjust_icmnd_burst_len(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_ts_s *v_sqe_ts,
int direction)
{
struct hifcoe_sqe_icmnd_s *icmnd = &v_sqe_ts->cont.icmnd;
icmnd->info.dif_info.wd0.difx_len = 0;
}
static inline unsigned int hifc_build_cmnd_wqe(struct hifc_hba_s *v_hba,
struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sge)
{
unsigned int ret = RETURN_OK;
int direction = 0;
unsigned char task_type = 0;
struct unf_fcp_cmnd_s *fcp_cmnd = NULL;
struct hifcoe_sqe_s *sqe = v_sge;
unsigned int dif_flag = 0;
fcp_cmnd = v_pkg->fcp_cmnd;
if (unlikely(!fcp_cmnd)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Package's FCP commond pointer is NULL.");
return UNF_RETURN_ERROR;
}
hifc_get_dma_direction_by_fcp_cmnd(fcp_cmnd, &direction, &task_type);
hifc_build_icmnd_wqe_ts_header(v_pkg, sqe, task_type,
v_hba->exit_base, v_hba->port_index);
hifc_build_trd_twr_wqe_ctrls(v_pkg, sqe);
hifc_build_icmnd_wqe_ts(v_hba, v_pkg, &sqe->ts_sl);
if (task_type != HIFC_SQE_FCP_ITMF) {
if (v_pkg->dif_control.protect_opcode == UNF_DIF_ACTION_NONE) {
dif_flag = 0;
hifc_build_no_dif_control(
v_pkg,
&sqe->ts_sl.cont.icmnd.info.dif_info);
} else {
dif_flag = 1;
hifc_build_dif_control(
v_hba, v_pkg,
&sqe->ts_sl.cont.icmnd.info.dif_info);
hifc_adjust_dix(
v_pkg, &sqe->ts_sl.cont.icmnd.info.dif_info,
task_type);
hifc_adjust_icmnd_burst_len(v_pkg, &sqe->ts_sl,
direction);
}
}
ret = hifc_build_sgl(v_hba, v_pkg, sqe, direction, dif_flag);
return ret;
}
unsigned int hifc_send_scsi_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg)
{
struct hifc_hba_s *hba = NULL;
struct hifc_parent_sq_info_s *parent_sq = NULL;
unsigned int ret = UNF_RETURN_ERROR;
struct hifcoe_sqe_s sqe;
/* input param check */
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pkg,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE,
(UNF_GET_OXID(v_pkg) != INVALID_VALUE16), return UNF_RETURN_ERROR);
HIFC_CHECK_PKG_ALLOCTIME(v_pkg);
memset(&sqe, 0, sizeof(struct hifcoe_sqe_s));
hba = v_hba;
/* 1. find parent sq for scsi_cmnd(pkg) */
parent_sq = hifc_find_parent_sq_by_pkg(hba, v_pkg);
if (unlikely(!parent_sq))
/* Do not need to print info */
return UNF_RETURN_ERROR;
v_pkg->qos_level += hba->vpid_start;
/* 2. build cmnd wqe (to sqe) for scsi_cmnd(pkg) */
ret = hifc_build_cmnd_wqe(hba, v_pkg, &sqe);
if (unlikely(ret != RETURN_OK)) {
HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_ERR,
"[fail]Port(0x%x) Build WQE failed, SID(0x%x) DID(0x%x) OXID(0x%x) pkg type(0x%x) hot pool tag(0x%x).",
hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did, UNF_GET_OXID(v_pkg),
v_pkg->type, UNF_GET_XCHG_TAG(v_pkg));
return ret;
}
/* 3. En-Queue Parent SQ for scsi_cmnd(pkg) sqe */
ret = hifc_parent_sq_enqueue(parent_sq, &sqe);
return ret;
}
static void hifc_ini_status_default_handler(struct hifcoe_scqe_iresp_s *v_iresp,
struct unf_frame_pkg_s *v_pkg)
{
unsigned char control = 0;
unsigned short com_err_code = 0;
control = v_iresp->wd2.fcp_flag & HIFC_CTRL_MASK;
if (v_iresp->fcp_resid != 0) {
com_err_code = UNF_IO_FAILED;
v_pkg->residus_len = v_iresp->fcp_resid;
} else {
com_err_code = UNF_IO_SUCCESS;
v_pkg->residus_len = 0;
}
v_pkg->status = hifc_fill_pkg_status(com_err_code, control,
v_iresp->wd2.scsi_status);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"[info]Fill package with status: 0x%x, residus len: 0x%x",
v_pkg->status, v_pkg->residus_len);
}
void hifc_check_fcp_rsp_iu(struct hifcoe_scqe_iresp_s *v_iresp,
struct unf_frame_pkg_s *v_pkg)
{
unsigned char scsi_status = 0;
unsigned char control = 0;
control = (unsigned char)v_iresp->wd2.fcp_flag;
scsi_status = (unsigned char)v_iresp->wd2.scsi_status;
/* FcpRspIU with Little End from IOB/WQE, to COM's pstPkg also */
if (control & FCP_RESID_UNDER_MASK) {
/* under flow: usually occurs in inquiry */
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"[info]I_STS IOB posts under flow with residus len: %u, FCP residue: %u.",
v_pkg->residus_len, v_iresp->fcp_resid);
if (v_pkg->residus_len != v_iresp->fcp_resid) {
v_pkg->status = hifc_fill_pkg_status(UNF_IO_FAILED,
control,
scsi_status);
} else {
v_pkg->status =
hifc_fill_pkg_status(UNF_IO_UNDER_FLOW,
control, scsi_status);
}
}
if (control & FCP_RESID_OVER_MASK) {
/* over flow: error happened */
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]I_STS IOB posts over flow with residus len: %u, FCP residue: %u.",
v_pkg->residus_len, v_iresp->fcp_resid);
if (v_pkg->residus_len != v_iresp->fcp_resid) {
v_pkg->status = hifc_fill_pkg_status(UNF_IO_FAILED,
control,
scsi_status);
} else {
v_pkg->status = hifc_fill_pkg_status(UNF_IO_OVER_FLOW,
control,
scsi_status);
}
}
v_pkg->unf_rsp_pload_bl.length = 0;
v_pkg->unf_sense_pload_bl.length = 0;
if (control & FCP_RSP_LEN_VALID_MASK) {
/* dma by chip */
v_pkg->unf_rsp_pload_bl.buffer_ptr = NULL;
v_pkg->unf_rsp_pload_bl.length = v_iresp->fcp_rsp_len;
v_pkg->byte_orders |= UNF_BIT_3;
}
if (control & FCP_SNS_LEN_VALID_MASK) {
/* dma by chip */
v_pkg->unf_sense_pload_bl.buffer_ptr = NULL;
v_pkg->unf_sense_pload_bl.length = v_iresp->fcp_sns_len;
v_pkg->byte_orders |= UNF_BIT_4;
}
}
unsigned short hifc_get_com_err_code(struct unf_frame_pkg_s *v_pkg)
{
unsigned short com_err_code = UNF_IO_FAILED;
if (v_pkg->status_sub_code == DRV_DIF_CRC_ERR)
com_err_code = UNF_IO_DIF_ERROR;
else if (v_pkg->status_sub_code == DRV_DIF_LBA_ERR)
com_err_code = UNF_IO_DIF_REF_ERROR;
else
com_err_code = UNF_IO_DIF_GEN_ERROR;
return com_err_code;
}
void hifc_process_ini_fail_io(struct hifc_hba_s *v_hba,
struct hifcoe_scqe_iresp_s *v_iresp,
struct unf_frame_pkg_s *v_pkg)
{
unsigned short com_err_code = UNF_IO_FAILED;
unsigned char dif_info = 0;
/* 1. error stats process */
if (HIFC_GET_SCQE_STATUS((union hifcoe_scqe_u *)(void *)v_iresp) != 0) {
switch (HIFC_GET_SCQE_STATUS(
(union hifcoe_scqe_u *)(void *)v_iresp)) {
/* DIF error process */
case HIFC_COMPLETION_STATUS_DIF_ERROR:
dif_info = (unsigned char)v_iresp->wd1.dif_info;
v_pkg->status_sub_code =
(dif_info & HIFC_DIF_ERROR_CODE_CRC) ?
DRV_DIF_CRC_ERR : ((dif_info &
HIFC_DIF_ERROR_CODE_REF) ? DRV_DIF_LBA_ERR :
((dif_info & HIFC_DIF_ERROR_CODE_APP) ?
DRV_DIF_APP_ERR : 0));
com_err_code = hifc_get_com_err_code(v_pkg);
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_MAJOR,
"[err]Port(0x%x) INI io oxid(0x%x), rxid(0x%x) status with dif err(0x%x)",
v_hba->port_cfg.port_id, v_iresp->wd0.ox_id,
v_iresp->wd0.rx_id, dif_info);
hifc_dif_err_count(v_hba, dif_info);
break;
/* I/O not complete: 1.session reset; 2.clear buffer */
case FCOE_CQE_BUFFER_CLEAR_IO_COMPLETED:
case FCOE_CQE_SESSION_RST_CLEAR_IO_COMPLETED:
case FCOE_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED:
case FCOE_CQE_WQE_FLUSH_IO_COMPLETED:
com_err_code = UNF_IO_CLEAN_UP;
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR,
"[warn]Port(0x%x) INI IO not complete, OX_ID(0x%x) RX_ID(0x%x) status(0x%x)",
v_hba->port_cfg.port_id, v_iresp->wd0.ox_id,
v_iresp->wd0.rx_id, com_err_code);
break;
/* any other: I/O failed --->>> DID error */
default:
com_err_code = UNF_IO_FAILED;
break;
}
/* fill pkg status & return directly */
v_pkg->status =
hifc_fill_pkg_status(com_err_code, v_iresp->wd2.fcp_flag,
v_iresp->wd2.scsi_status);
return;
}
/* 2. default stats process */
hifc_ini_status_default_handler(v_iresp, v_pkg);
/* 3. FCP RSP IU check */
hifc_check_fcp_rsp_iu(v_iresp, v_pkg);
}
unsigned int hifc_scq_recv_iresp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_wqe)
{
struct hifcoe_scqe_iresp_s *iresp = NULL;
struct unf_frame_pkg_s pkg;
unsigned int ret = RETURN_OK;
iresp = (struct hifcoe_scqe_iresp_s *)(void *)v_wqe;
/* 1. Constraints: I_STS remain cnt must be zero */
if (unlikely(HIFC_GET_SCQE_REMAIN_CNT(v_wqe) != 0)) {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) ini_wqe(OX_ID:0x%x RX_ID:0x%x) remain_cnt(0x%x) abnormal, status(0x%x)",
v_hba->port_cfg.port_id,
iresp->wd0.ox_id,
iresp->wd0.rx_id,
HIFC_GET_SCQE_REMAIN_CNT(v_wqe),
HIFC_GET_SCQE_STATUS(v_wqe));
UNF_PRINT_SFS_LIMIT(UNF_MAJOR, v_hba->port_cfg.port_id, v_wqe,
sizeof(union hifcoe_scqe_u));
/* return directly */
return UNF_RETURN_ERROR;
}
memset(&pkg, 0, sizeof(struct unf_frame_pkg_s));
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = iresp->magic_num;
/* 2. OX_ID validity check */
if (likely(((unsigned short)iresp->wd0.ox_id >= v_hba->exit_base) &&
((unsigned short)iresp->wd0.ox_id <
v_hba->exit_base + v_hba->exit_count))) {
pkg.status = UNF_IO_SUCCESS;
pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] =
iresp->wd0.ox_id - v_hba->exit_base;
} else {
/* OX_ID error: return by COM */
pkg.status = UNF_IO_FAILED;
pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE16;
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) ini_cmnd_wqe(OX_ID:0x%x RX_ID:0x%x) ox_id invalid, status(0x%x)",
v_hba->port_cfg.port_id,
iresp->wd0.ox_id,
iresp->wd0.rx_id,
HIFC_GET_SCQE_STATUS(v_wqe));
UNF_PRINT_SFS_LIMIT(UNF_MAJOR, v_hba->port_cfg.port_id,
v_wqe, sizeof(union hifcoe_scqe_u));
}
/* 3. status check */
if (unlikely(HIFC_GET_SCQE_STATUS(v_wqe) ||
(iresp->wd2.scsi_status != 0) ||
(iresp->fcp_resid != 0) ||
((iresp->wd2.fcp_flag & HIFC_CTRL_MASK) != 0))) {
HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"[warn]Port(0x%x) scq_status(0x%x) scsi_status(0x%x) fcp_resid(0x%x) fcp_flag(0x%x)",
v_hba->port_cfg.port_id, HIFC_GET_SCQE_STATUS(v_wqe),
iresp->wd2.scsi_status, iresp->fcp_resid,
iresp->wd2.fcp_flag);
/* set pkg status & check fcp_rsp IU */
hifc_process_ini_fail_io(v_hba, iresp, &pkg);
}
/* 4. LL_Driver ---to--->>> COM_Driver */
UNF_LOWLEVEL_SCSI_COMPLETED(ret, v_hba->lport, &pkg);
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_IO_H__
#define __HIFC_IO_H__
enum dif_mode_e {
DIF_MODE_NONE = 0x0,
DIF_MODE_INSERT = 0x1,
DIF_MODE_REMOVE = 0x2,
DIF_MODE_FORWARD_OR_REPLACE = 0x3
};
enum ref_tag_mode_e {
BOTH_NONE = 0x0,
RECEIVE_INCREASE = 0x1,
REPLACE_INCREASE = 0x2,
BOTH_INCREASE = 0x3
};
#define HIFC_DIF_DISABLE 0
#define HIFC_DIF_ENABLE 1
#define HIFC_DIF_SECTOR_512B_MODE 0
#define HIFC_DIF_SECTOR_4KB_MODE 1
#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16 0x0
#define HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM 0x1
#define HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16 0x2
#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_IP_CHECKSUM 0x3
#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_REGISTER 0
#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1 0x4
#define HIFC_DIF_GARD_REF_APP_CTRL_VERIFY 0x4
#define HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY 0x0
#define HIFC_DIF_GARD_REF_APP_CTRL_INSERT 0x0
#define HIFC_DIF_GARD_REF_APP_CTRL_DELETE 0x1
#define HIFC_DIF_GARD_REF_APP_CTRL_FORWARD 0x2
#define HIFC_DIF_GARD_REF_APP_CTRL_REPLACE 0x3
#define HIFC_DIF_ERROR_CODE_MASK 0xe
#define HIFC_DIF_ERROR_CODE_CRC 0x2
#define HIFC_DIF_ERROR_CODE_REF 0x4
#define HIFC_DIF_ERROR_CODE_APP 0x8
#define HIFC_DIF_SEND_DIFERR_PAYLOAD 0
#define HIFC_DIF_SEND_DIFERR_CRC 1
#define HIFC_DIF_SEND_DIFERR_APP 2
#define HIFC_DIF_SEND_DIFERR_REF 3
#define HIFC_DIF_RECV_DIFERR_ALL 4
#define HIFC_DIF_RECV_DIFERR_CRC 5
#define HIFC_DIF_RECV_DIFERR_APP 6
#define HIFC_DIF_RECV_DIFERR_REF 7
#define HIFC_SECT_SIZE_512 512
#define HIFC_SECT_SIZE_4096 4096
#define HIFC_SECT_SIZE_512_8 520
#define HIFC_SECT_SIZE_4096_8 4104
#define HIFC_CTRL_MASK 0x1f
unsigned int hifc_send_scsi_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg);
unsigned int hifc_scq_recv_iresp(struct hifc_hba_s *v_hba,
union hifcoe_scqe_u *v_wqe);
#endif /* __HIFC_IO_H__ */
// SPDX-License-Identifier: GPL-2.0
/* Huawei Fabric Channel Linux driver
* Copyright(c) 2018 Huawei Technologies Co., Ltd
*
*/
#include "hifc_module.h"
#include "hifc_service.h"
void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl,
unsigned char v_task_len)
{
/* "BDSL" field of CtrlS - defines the size of BDS,
* which varies from 0 to 2040 bytes (8 bits of 8 bytes' chunk)
*/
v_ctrl_sl->ch.wd0.bdsl = 0;
/*
* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from
* 0 to 24 bytes
*/
v_ctrl_sl->ch.wd0.drv_sl = 0;
/* a.
* b1 - linking WQE, which will be only used in linked page architecture
* instead of ring, it's a special control WQE which does not contain
* any buffer or inline data information, and will only be consumed by
* hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either
* normal SEG WQE or inline data WQE
*/
v_ctrl_sl->ch.wd0.wf = 0;
/*
* "CF" field of CtrlS - Completion Format - defines the format of CS.
* a.b0 - Status information is embedded inside of Completion Section
* b.b1 - Completion Section keeps SGL, where Status information
* should be written. (For the definition of SGLs see ?4.1* .)
*/
v_ctrl_sl->ch.wd0.cf = 0;
/*
* "TSL" field of CtrlS - defines the size of TS, which varies from 0
* to 248 bytes
*/
v_ctrl_sl->ch.wd0.tsl = v_task_len;
/*
* Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE
* format is of two types, which are defined by "VA " field of CtrlS.
* "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's
* pointer and 31-bits Length, each SGE can only support up to 2G-1B,
* it can guarantee each single SGE length can not exceed 2GB by nature,
* A byte count value of zero means a 0byte data transfer.o b1.
* SGE comprises 64-bits buffer's pointer, 31-bits Length and 30-bits
* Key of the Translation table ,each SGE can only support up to 2G-1B,
* it can guarantee each single SGE length can notexceed 2GB by nature,
* A byte count value of zero means a 0byte data transfer
*/
v_ctrl_sl->ch.wd0.va = 0;
/*
* "DF" field of CtrlS - Data Format - defines the format of BDS
* a. b0 - BDS carries the list of SGEs (SGL)
* b. b1 - BDS carries the inline data
*/
v_ctrl_sl->ch.wd0.df = 0;
/*
* "CR" - Completion is Required - marks CQE generation request per WQE
*/
v_ctrl_sl->ch.wd0.cr = 1;
/*
* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from
* 0 to 56 bytes
*/
v_ctrl_sl->ch.wd0.dif_sl = 0;
/*
* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to
* 24 bytes
*/
v_ctrl_sl->ch.wd0.csl = 0;
/* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks.
*The value Zero is not valid
*/
v_ctrl_sl->ch.wd0.ctrl_sl = 1;
/* "O" - Owner - marks ownership of WQE */
v_ctrl_sl->ch.wd0.owner = 0;
}
void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe)
{
/* "BDSL" field of CtrlS - defines the size of BDS, which varies from
* 0 to 2040 bytes (8 bits of 8 bytes' chunk)
*/
/* TrdWqe carry 2 SGE defaultly, 4DW per SGE, the value is 4 because
* unit is 2DW, in double SGL mode, bdsl is 2
*/
v_sqe->ctrl_sl.ch.wd0.bdsl = HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE;
/*
* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from
* 0 to 24 bytes DrvSL config for 0
*/
v_sqe->ctrl_sl.ch.wd0.drv_sl = 0;
/* a. b1 - linking WQE, which will be only used in linked page
* architecture instead of ring, it's a special control WQE which does
* not contain any buffer or inline data information, and will only be
* consumed by hardware. The size is aligned to WQEBB/WQE b0 - normal
* WQE, either normal SEG WQE or inline data WQE
*/
/* normal wqe */
v_sqe->ctrl_sl.ch.wd0.wf = 0;
/*
* "CF" field of CtrlS - Completion Format - defines the format of CS.
* a.b0 - Status information is embedded inside of Completion Section
* b.b1 - Completion Section keeps SGL, where Status information
* should be written. (For the definition of SGLs see ?4.1.)
*/
/* by SCQE mode, the value is ignored */
v_sqe->ctrl_sl.ch.wd0.cf = 0;
/* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to
* 248 bytes
*/
/* TSL is configured by 56 bytes */
v_sqe->ctrl_sl.ch.wd0.tsl = sizeof(struct hifcoe_sqe_ts_s) /
HIFC_WQE_SECTION_CHUNK_SIZE;
/*
* Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE
* format is of two types, which are defined by "VA" field of CtrlS.
* "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's
* pointer and 31-bits Length, each SGE can only support up to 2G-1B, it
* can guarantee each single SGE length can not exceed 2GB by nature, A
* byte count value of zero means a 0byte data transfer. o b1. SGE
* comprises 64-bits buffer's pointer, 31-bits Length and 30-bits Key of
* the Translation table , each SGE can only support up to 2G-1B, it can
* guarantee each single SGE length can not exceed 2GB by nature, A byte
* count value of zero means a 0byte data transfer
*/
v_sqe->ctrl_sl.ch.wd0.va = 0;
/*
* "DF" field of CtrlS - Data Format - defines the format of BDS
* a. b0 - BDS carries the list of SGEs (SGL)
* b. b1 - BDS carries the inline data
*/
v_sqe->ctrl_sl.ch.wd0.df = 0;
/* "CR" - Completion is Required marks CQE generation request per WQE */
/* by SCQE mode, this value is ignored */
v_sqe->ctrl_sl.ch.wd0.cr = 1;
/*
* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from
* 0 to 56 bytes.
*/
v_sqe->ctrl_sl.ch.wd0.dif_sl = 0;
/*
* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to
* 24 bytes
*/
v_sqe->ctrl_sl.ch.wd0.csl = 0;
/* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks.
* The value Zero is not valid.
*/
v_sqe->ctrl_sl.ch.wd0.ctrl_sl = HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE;
/* "O" - Owner - marks ownership of WQE */
v_sqe->ctrl_sl.ch.wd0.owner = 0;
}
void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts,
unsigned int rport_index,
unsigned short local_xid,
unsigned short remote_xid,
unsigned short data_len)
{
v_sqe_ts->local_xid = local_xid;
v_sqe_ts->wd0.conn_id = (unsigned short)rport_index;
v_sqe_ts->wd0.remote_xid = remote_xid;
v_sqe_ts->cont.els_gs_elsrsp_comm.data_len = data_len;
}
void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr,
unsigned long long v_phy_addr,
unsigned int buf_len,
unsigned int xid, void *v_hba)
{
unsigned long long els_rsp_phy_addr;
struct hifcoe_variable_sge_s *psge = NULL;
/* Fill in SGE and convert it to big-endian. */
psge = &v_sqe->sge[0];
els_rsp_phy_addr = v_phy_addr;
psge->buf_addr_hi = HIFC_HIGH_32_BITS(els_rsp_phy_addr);
psge->buf_addr_lo = HIFC_LOW_32_BITS(els_rsp_phy_addr);
psge->wd0.buf_len = buf_len;
psge->wd0.r_flag = 0;
psge->wd1.extension_flag = HIFC_WQE_SGE_NOT_EXTEND_FLAG;
psge->wd1.buf_addr_gpa = (psge->buf_addr_lo >> 16);
psge->wd1.xid = (xid & 0x3fff);
psge->wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG;
hifc_cpu_to_big32(psge, sizeof(*psge));
/* Converts the payload of an FC frame into a big end. */
hifc_cpu_to_big32(v_buf_addr, buf_len);
}
void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info,
void *v_frame_pld, unsigned short type,
unsigned short cmnd, unsigned int v_scqn)
{
struct unf_pril_payload_s *pri_acc_pld = NULL;
struct hifcoe_sqe_els_rsp_s *els_rsp = NULL;
struct hifcoe_sqe_ts_s *sqe_ts = NULL;
struct hifc_parent_sq_info_s *sq_info = NULL;
struct hifc_hba_s *hba = NULL;
UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sqe, return);
UNF_CHECK_VALID(0x5015, UNF_TRUE, v_frame_pld, return);
UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sq_info, return);
sqe_ts = &v_sqe->ts_sl;
els_rsp = &sqe_ts->cont.els_rsp;
sqe_ts->task_type = HIFC_SQE_ELS_RSP;
/* The default chip does not need to update parameters. */
els_rsp->wd1.para_update = 0x0;
sq_info = (struct hifc_parent_sq_info_s *)v_sq_info;
hba = (struct hifc_hba_s *)sq_info->phba;
/* When the PLOGI request is sent, the microcode needs to be instructed
* to clear the I/O related to the link to avoid data inconsistency
* caused by the disorder of the IO.
*/
if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) {
els_rsp->wd1.clr_io = 1;
els_rsp->wd6.reset_exch_start = hba->exit_base;
els_rsp->wd6.reset_exch_end = hba->exit_base +
(hba->exit_count - 1);
els_rsp->wd7.scqn = v_scqn;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) send cmd(0x%x) to RPort(0x%x),rport index(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.",
sq_info->local_port_id,
cmnd,
sq_info->remote_port_id,
sq_info->rport_index,
els_rsp->wd6.reset_exch_start,
els_rsp->wd6.reset_exch_end,
v_scqn);
return;
}
if (type == ELS_RJT)
return;
/*
* Enter WQE in the PrliAcc negotiation parameter, and fill in the
* Update flag in WQE.
*/
if (cmnd == ELS_PRLI) {
/* The chip updates the PLOGI ACC negotiation parameters. */
els_rsp->wd2.seq_cnt = sq_info->plogi_coparams.seq_cnt;
els_rsp->wd2.e_d_tov = sq_info->plogi_coparams.ed_tov;
els_rsp->wd2.tx_mfs = sq_info->plogi_coparams.tx_mfs;
els_rsp->e_d_tov_timer_val =
sq_info->plogi_coparams.ed_tov_timer_val;
/* The chip updates the PRLI ACC parameter. */
pri_acc_pld = (struct unf_pril_payload_s *)v_frame_pld;
els_rsp->wd4.xfer_dis = HIFC_GET_PRLI_PARAM_WXFER(
pri_acc_pld->parms);
els_rsp->wd4.conf = HIFC_GET_PRLI_PARAM_CONF(
pri_acc_pld->parms);
els_rsp->wd4.rec = HIFC_GET_PRLI_PARAM_REC(pri_acc_pld->parms);
els_rsp->wd1.para_update = 0x03;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x,xfer_dis:0x%x, conf:0x%x,rec:0x%x.",
sq_info->local_port_id,
sq_info->rport_index, els_rsp->wd2.seq_cnt,
els_rsp->wd2.e_d_tov, els_rsp->wd2.tx_mfs,
els_rsp->e_d_tov_timer_val, els_rsp->wd4.xfer_dis,
els_rsp->wd4.conf, els_rsp->wd4.rec);
}
}
void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, void *v_sq_info,
unsigned short cmnd, unsigned int v_scqn,
void *v_frame_pld)
{
struct hifcoe_sqe_ts_s *v_sqe_ts = NULL;
struct hifcoe_sqe_t_els_gs_s *els_req = NULL;
struct hifc_parent_sq_info_s *sq_info = NULL;
struct hifc_hba_s *hba = NULL;
struct unf_rec_pld_s *rec_pld = NULL;
v_sqe_ts = &v_sqe->ts_sl;
v_sqe_ts->task_type = HIFC_SQE_ELS_CMND;
els_req = &v_sqe_ts->cont.t_els_gs;
sq_info = (struct hifc_parent_sq_info_s *)v_sq_info;
hba = (struct hifc_hba_s *)sq_info->phba;
/*
* When the PLOGI request is sent, the microcode needs to be instructed
* to clear the I/O related to the link to avoid data inconsistency
* caused by the disorder of the IO.
*/
if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) {
els_req->wd4.clr_io = 1;
els_req->wd6.reset_exch_start = hba->exit_base;
els_req->wd6.reset_exch_end = hba->exit_base +
(hba->exit_count - 1);
els_req->wd7.scqn = v_scqn;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) Rport(0x%x) SID(0x%x) send %s to DID(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.",
hba->port_cfg.port_id, sq_info->rport_index,
sq_info->local_port_id,
(cmnd == ELS_PLOGI) ? "PLOGI" : "LOGO",
sq_info->remote_port_id,
els_req->wd6.reset_exch_start,
els_req->wd6.reset_exch_end,
v_scqn);
return;
}
/* The chip updates the PLOGI ACC negotiation parameters. */
if (cmnd == ELS_PRLI) {
els_req->wd5.seq_cnt = sq_info->plogi_coparams.seq_cnt;
els_req->wd5.e_d_tov = sq_info->plogi_coparams.ed_tov;
els_req->wd5.tx_mfs = sq_info->plogi_coparams.tx_mfs;
els_req->e_d_tov_timer_val =
sq_info->plogi_coparams.ed_tov_timer_val;
els_req->wd4.rec_support = hba->port_cfg.tape_support ? 1 : 0;
els_req->wd4.para_update = 0x01;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO,
"Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x, e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x.",
sq_info->local_port_id, sq_info->rport_index,
els_req->wd5.seq_cnt, els_req->wd5.e_d_tov,
els_req->wd5.tx_mfs,
els_req->e_d_tov_timer_val);
}
if (cmnd == ELS_ECHO)
els_req->echo_flag = UNF_TRUE;
if (cmnd == ELS_REC) {
rec_pld = (struct unf_rec_pld_s *)v_frame_pld;
els_req->wd4.rec_flag = 1;
rec_pld->ox_id += hba->exit_base;
els_req->wd4.orign_oxid = rec_pld->ox_id;
HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR,
"Port(0x%x) Rport(0x%x) SID(0x%x) send Rec to DID(0x%x), origin_oxid 0x%x",
hba->port_cfg.port_id, sq_info->rport_index,
sq_info->local_port_id,
sq_info->remote_port_id,
els_req->wd4.orign_oxid);
}
}
void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe,
unsigned short els_cmnd_type,
unsigned int v_magic_num)
{
struct hifcoe_sqe_t_els_gs_s *els_req;
struct hifcoe_sqe_els_rsp_s *els_rsp;
if (els_cmnd_type == ELS_ACC || els_cmnd_type == ELS_RJT) {
els_rsp = &v_sqe->ts_sl.cont.els_rsp;
els_rsp->magic_num = v_magic_num;
} else {
els_req = &v_sqe->ts_sl.cont.t_els_gs;
els_req->magic_num = v_magic_num;
}
}
void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int magic_num)
{
struct hifcoe_sqe_ts_s *v_sqe_ts = NULL;
struct hifcoe_sqe_t_els_gs_s *gs_req = NULL;
v_sqe_ts = &v_sqe->ts_sl;
v_sqe_ts->task_type = HIFC_SQE_GS_CMND;
gs_req = &v_sqe_ts->cont.t_els_gs;
gs_req->magic_num = magic_num;
}
void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int abts_param,
unsigned int magic_num)
{
struct hifcoe_sqe_abts_s *abts_ts;
v_sqe->ts_sl.task_type = HIFC_SQE_BLS_CMND;
abts_ts = &v_sqe->ts_sl.cont.abts;
abts_ts->fh_parm_abts = abts_param;
abts_ts->magic_num = magic_num;
}
void hifc_build_service_wqe_root_ts(void *v_hba,
struct hifc_root_sqe_s *v_rt_sqe,
unsigned int rx_id, unsigned int rport_id,
unsigned int scq_num)
{
unsigned char data_cos = 0;
unsigned int port_id = 0;
unsigned int service_type = 0;
struct hifc_hba_s *hba = NULL;
struct hifc_parent_queue_info_s *parent_queue_info = NULL;
hba = (struct hifc_hba_s *)v_hba;
port_id = HIFC_GET_HBA_PORT_ID(hba);
service_type = HIFC_GET_SERVICE_TYPE(hba);
if (rport_id >= UNF_HIFC_MAXRPORT_NUM) {
data_cos = HIFC_GET_PACKET_COS(service_type);
} else {
parent_queue_info =
&hba->parent_queue_mgr->parent_queues[rport_id];
data_cos = parent_queue_info->queue_data_cos;
}
v_rt_sqe->task_section.fc_dw0.exch_id = rx_id;
v_rt_sqe->task_section.fc_dw0.host_id = 0;
v_rt_sqe->task_section.fc_dw0.port_id = port_id;
v_rt_sqe->task_section.fc_dw0.off_load = HIFC_NO_OFFLOAD;
v_rt_sqe->task_section.fc_dw3.rport_index = HIFC_LSW(rport_id);
v_rt_sqe->task_section.fc_dw3.scq_num = HIFC_LSW(scq_num);
v_rt_sqe->task_section.fc_dw4.service_type = UNF_GET_SHIFTMASK(
service_type, 0, 0x1f);
v_rt_sqe->task_section.fc_dw4.pkt_type = HIFC_GET_PACKET_TYPE(
service_type);
v_rt_sqe->task_section.fc_dw4.pkt_cos = data_cos;
}
void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe,
void *v_buf_addr,
unsigned long long v_phy_addr,
unsigned int buf_len,
void *v_hba)
{
unsigned long long frame_phy_addr;
/* Enter the SGE and convert it to the big-endian mode. */
frame_phy_addr = v_phy_addr;
v_rt_sqe->sge.buf_addr_hi = HIFC_HIGH_32_BITS(frame_phy_addr);
v_rt_sqe->sge.buf_addr_lo = HIFC_LOW_32_BITS(frame_phy_addr);
v_rt_sqe->sge.wd0.buf_len = buf_len;
v_rt_sqe->sge.wd0.ext_flag = 0;
v_rt_sqe->sge.wd1.rsvd = 0;
hifc_cpu_to_big32(&v_rt_sqe->sge, sizeof(v_rt_sqe->sge));
/* Converting FC Frames into big Ends */
hifc_cpu_to_big32(v_buf_addr, buf_len);
}
void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe,
unsigned long long v_ctxt_addr,
unsigned int buf_len)
{
/* The SGE is filled in and converted to the big-endian mode. */
v_rt_sqe->ctx_sge.buf_addr_hi = HIFC_HIGH_32_BITS(v_ctxt_addr);
v_rt_sqe->ctx_sge.buf_addr_lo = HIFC_LOW_32_BITS(v_ctxt_addr);
v_rt_sqe->ctx_sge.wd0.buf_len = buf_len;
v_rt_sqe->ctx_sge.wd0.ext_flag = 0;
v_rt_sqe->ctx_sge.wd1.rsvd = 0;
hifc_cpu_to_big32(&v_rt_sqe->ctx_sge, sizeof(v_rt_sqe->ctx_sge));
}
void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe,
dma_addr_t ctxt_addr,
unsigned int xid)
{
/* update Task Section DW0.OFFLOAD */
v_rt_sqe->task_section.fc_dw0.off_load = HIFC_HAVE_OFFLOAD;
/* update Context GPA DW1~2 */
v_rt_sqe->task_section.fc_dw1.context_gpa_hi =
HIFC_HIGH_32_BITS(ctxt_addr);
v_rt_sqe->task_section.fc_dw2.context_gpa_lo =
HIFC_LOW_32_BITS(ctxt_addr);
/* fill Context DW4 */
v_rt_sqe->task_section.fc_dw4.parent_xid = xid;
v_rt_sqe->task_section.fc_dw4.csize = HIFC_CNTX_SIZE_T_256B;
/* The sqe of the offload request has two sge. The first is the packet,
* and the second is the ctx.
*/
v_rt_sqe->ctrl_section.ch.wd0.bdsl =
2 * HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s));
}
void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned int ts_size,
unsigned int bdsi)
{
v_wqe_cs->ch.wd0.bdsl = bdsi;
v_wqe_cs->ch.wd0.drv_sl = 0;
v_wqe_cs->ch.wd0.rsvd0 = 0;
v_wqe_cs->ch.wd0.wf = 0;
v_wqe_cs->ch.wd0.cf = 0;
v_wqe_cs->ch.wd0.tsl = ts_size;
v_wqe_cs->ch.wd0.va = 0;
v_wqe_cs->ch.wd0.df = 0;
v_wqe_cs->ch.wd0.cr = 1;
v_wqe_cs->ch.wd0.dif_sl = 0;
v_wqe_cs->ch.wd0.csl = 0;
/* divided by 8 */
v_wqe_cs->ch.wd0.ctrl_sl = HIFC_BYTES_TO_QW_NUM(sizeof(*v_wqe_cs));
v_wqe_cs->ch.wd0.owner = 0;
}
void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned short owner,
unsigned short pmsn)
{
v_wqe_cs->qsf.wqe_sn = pmsn;
v_wqe_cs->qsf.dump_wqe_sn = v_wqe_cs->qsf.wqe_sn;
v_wqe_cs->ch.wd0.owner = (unsigned int)owner;
}
void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe)
{
if (likely((v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TRESP) &&
(v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TMF_RESP))) {
/*
* Convert Control Secton and Task Section to big-endian. Before
* the SGE enters the queue, the upper-layer driver converts the
* SGE and Task Section to the big-endian mode.
*/
hifc_cpu_to_big32(&v_sqe->ctrl_sl, sizeof(v_sqe->ctrl_sl));
hifc_cpu_to_big32(&v_sqe->ts_sl, sizeof(v_sqe->ts_sl));
} else {
/*
* The HIFCOE_TASK_T_TRESP may use the SGE as the Task Section
* to convert the entire SQE into a large end.
*/
hifc_cpu_to_big32(v_sqe, sizeof(struct hifcoe_sqe_tresp_s));
}
}
void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe)
{
hifc_cpu_to_big32(&v_sqe->ctrl_section, sizeof(v_sqe->ctrl_section));
hifc_cpu_to_big32(&v_sqe->task_section, sizeof(v_sqe->task_section));
}
void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe,
enum hifcoe_task_type_e task_type,
unsigned short rx_id)
{
cmdqe->common.wd0.task_type = task_type;
cmdqe->common.wd0.rx_id = rx_id;
cmdqe->common.wd0.rsvd0 = 0;
}
#define HIFC_STANDARD_SIRT_ENABLE 1
#define HIFC_STANDARD_SIRT_DISABLE 0
#define HIFC_UNKNOWN_ID 0xFFFF
void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
unsigned char task_type,
unsigned short exit_base,
unsigned char v_port_idx)
{
v_sqe->ts_sl.local_xid = UNF_GET_OXID(v_pkg) + exit_base;
v_sqe->ts_sl.task_type = task_type;
v_sqe->ts_sl.wd0.conn_id =
(unsigned short)(v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]);
v_sqe->ts_sl.wd0.remote_xid = HIFC_UNKNOWN_ID;
}
void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_ts_s *v_sqe_ts)
{
struct hifcoe_sqe_icmnd_s *icmd = &v_sqe_ts->cont.icmnd;
void *phy_add = NULL;
struct hifc_hba_s *hba = NULL;
hba = (struct hifc_hba_s *)v_hba;
v_sqe_ts->cdb_type = 0;
memcpy(icmd->fcp_cmnd_iu, v_pkg->fcp_cmnd,
sizeof(struct unf_fcp_cmnd_s));
icmd->magic_num = UNF_GETXCHGALLOCTIME(v_pkg);
if (v_pkg->unf_rsp_pload_bl.buffer_ptr) {
phy_add = (void *)v_pkg->unf_rsp_pload_bl.buf_dma_addr;
icmd->rsp_gpa_hi = HIFC_HIGH_32_BITS(phy_add);
icmd->rsp_gpa_lo = HIFC_LOW_32_BITS(phy_add);
} else {
HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]INI Build WQE sense buffer should not be null,sid_did (0x%x_0x%x) oxid(0x%x) pkg type(0x%x) hot pool tag(0x%x).",
v_pkg->frame_head.csctl_sid,
v_pkg->frame_head.rctl_did,
UNF_GET_OXID(v_pkg),
v_pkg->type, UNF_GET_XCHG_TAG(v_pkg));
}
if (v_sqe_ts->task_type != HIFC_SQE_FCP_ITMF) {
icmd->info.tmf.w0.bs.reset_exch_start = hba->exit_base;
icmd->info.tmf.w0.bs.reset_exch_end = hba->exit_base +
hba->exit_count - 1;
icmd->info.tmf.w1.bs.reset_did = UNF_GET_DID(v_pkg);
/* delivers the marker status flag to the microcode. */
icmd->info.tmf.w1.bs.marker_sts = 1;
HIFC_GET_RESET_TYPE(UNF_GET_TASK_MGMT_FLAGS(
v_pkg->fcp_cmnd->control),
icmd->info.tmf.w1.bs.reset_type);
icmd->info.tmf.w2.bs.reset_sid = UNF_GET_SID(v_pkg);
memcpy(icmd->info.tmf.reset_lun, v_pkg->fcp_cmnd->lun,
sizeof(icmd->info.tmf.reset_lun));
}
}
void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe,
unsigned short owner,
unsigned short pmsn)
{
struct hifcoe_wqe_ctrl_ch_s *wqe_ctrls = NULL;
wqe_ctrls = &v_rqe->ctrl_sl.ch;
wqe_ctrls->wd0.owner = owner;
wqe_ctrls->wd0.ctrl_sl = sizeof(struct hifcoe_wqe_ctrl_s) >> 3;
wqe_ctrls->wd0.csl = 1;
wqe_ctrls->wd0.dif_sl = 0;
wqe_ctrls->wd0.cr = 1;
wqe_ctrls->wd0.df = 0;
wqe_ctrls->wd0.va = 0;
wqe_ctrls->wd0.tsl = 0;
wqe_ctrls->wd0.cf = 0;
wqe_ctrls->wd0.wf = 0;
wqe_ctrls->wd0.drv_sl = sizeof(struct hifcoe_rqe_drv_s) >> 3;
wqe_ctrls->wd0.bdsl = sizeof(struct hifcoe_constant_sge_s) >> 3;
v_rqe->ctrl_sl.wd0.wqe_msn = pmsn;
v_rqe->ctrl_sl.wd0.dump_wqe_msn = v_rqe->ctrl_sl.wd0.wqe_msn;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFC_WQE_H__
#define __HIFC_WQE_H__
#include "hifcoe_wqe.h"
#include "hifcoe_parent_context.h"
/* TGT WQE type */
/* DRV->uCode via Root or Parent SQ */
#define HIFC_SQE_FCP_TRD HIFCOE_TASK_T_TREAD
#define HIFC_SQE_FCP_TWR HIFCOE_TASK_T_TWRITE
#define HIFC_SQE_FCP_TRSP HIFCOE_TASK_T_TRESP
#define HIFC_SQE_FCP_TACK HIFCOE_TASK_T_TACK
#define HIFC_SQE_ELS_CMND HIFCOE_TASK_T_ELS
#define HIFC_SQE_ELS_RSP HIFCOE_TASK_T_ELS_RSP
#define HIFC_SQE_GS_CMND HIFCOE_TASK_T_GS
#define HIFC_SQE_BLS_CMND HIFCOE_TASK_T_ABTS
#define HIFC_SQE_FCP_IREAD HIFCOE_TASK_T_IREAD
#define HIFC_SQE_FCP_IWRITE HIFCOE_TASK_T_IWRITE
#define HIFC_SQE_FCP_ITMF HIFCOE_TASK_T_ITMF
#define HIFC_SQE_SESS_RST HIFCOE_TASK_T_SESS_RESET
#define HIFC_SQE_FCP_TMF_TRSP HIFCOE_TASK_T_TMF_RESP
/* DRV->uCode Via CMDQ */
#define HIFC_CMDQE_ABTS_RSP HIFCOE_TASK_T_ABTS_RSP
#define HIFC_CMDQE_ABORT HIFCOE_TASK_T_ABORT
#define HIFC_CMDQE_SESS_DIS HIFCOE_TASK_T_SESS_DIS
#define HIFC_CMDQE_SESS_DEL HIFCOE_TASK_T_SESS_DEL
/* uCode->Drv Via CMD SCQ */
#define HIFC_SCQE_FCP_TCMND HIFCOE_TASK_T_RCV_TCMND
#define HIFC_SCQE_ELS_CMND HIFCOE_TASK_T_RCV_ELS_CMD
#define HIFC_SCQE_ABTS_CMD HIFCOE_TASK_T_RCV_ABTS_CMD
#define HIFC_SCQE_FCP_IRSP HIFCOE_TASK_T_IRESP
#define HIFC_SCQE_FCP_ITMF_RSP HIFCOE_TASK_T_ITMF_RESP
/* uCode->Drv Via STS SCQ */
#define HIFC_SCQE_FCP_TSTS HIFCOE_TASK_T_TSTS
#define HIFC_SCQE_GS_RSP HIFCOE_TASK_T_RCV_GS_RSP
#define HIFC_SCQE_ELS_RSP HIFCOE_TASK_T_RCV_ELS_RSP
#define HIFC_SCQE_ABTS_RSP HIFCOE_TASK_T_RCV_ABTS_RSP
#define HIFC_SCQE_ELS_RSP_STS HIFCOE_TASK_T_ELS_RSP_STS
#define HIFC_SCQE_ABTS_RSP_STS HIFCOE_TASK_T_ABTS_RSP_STS
#define HIFC_SCQE_ABORT_STS HIFCOE_TASK_T_ABORT_STS
#define HIFC_SCQE_SESS_EN_STS HIFCOE_TASK_T_SESS_EN_STS
#define HIFC_SCQE_SESS_DIS_STS HIFCOE_TASK_T_SESS_DIS_STS
#define HIFC_SCQE_SESS_DEL_STS HIFCOE_TASK_T_SESS_DEL_STS
#define HIFC_SCQE_SESS_RST_STS HIFCOE_TASK_T_SESS_RESET_STS
#define HIFC_SCQE_ITMF_MARKER_STS HIFCOE_TASK_T_ITMF_MARKER_STS
#define HIFC_SCQE_ABTS_MARKER_STS HIFCOE_TASK_T_ABTS_MARKER_STS
#define HIFC_SCQE_FLUSH_SQ_STS HIFCOE_TASK_T_FLUSH_SQ_STS
#define HIFC_SCQE_BUF_CLEAR_STS HIFCOE_TASK_T_BUFFER_CLEAR_STS
#define HIFC_SCQE_CLEAR_SRQ_STS HIFCOE_TASK_T_CLEAR_SRQ_STS
#define HIFC_LOW_32_BITS(__addr) \
((unsigned int)((unsigned long long)(__addr) & 0xffffffff))
#define HIFC_HIGH_32_BITS(__addr)\
((unsigned int)(((unsigned long long)(__addr) >> 32) & 0xffffffff))
/* Error Code from SCQ */
#define HIFC_COMPLETION_STATUS_SUCCESS FCOE_CQE_COMPLETED
#define HIFC_COMPLETION_STATUS_ABORTED_SETUP_FAIL FCOE_IMMI_CMDPKT_SETUP_FAIL
#define HIFC_COMPLETION_STATUS_TIMEOUT FCOE_ERROR_CODE_E_D_TIMER_EXPIRE
#define HIFC_COMPLETION_STATUS_DIF_ERROR FCOE_ERROR_CODE_DATA_DIFX_FAILED
#define HIFC_COMPLETION_STATUS_DATA_OOO FCOE_ERROR_CODE_DATA_OOO_RO
#define HIFC_COMPLETION_STATUS_DATA_OVERFLOW \
FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS
#define HIFC_SCQE_INVALID_CONN_ID 0xffff
#define HIFC_GET_SCQE_TYPE(scqe) ((scqe)->common.ch.wd0.task_type)
#define HIFC_GET_SCQE_STATUS(scqe) ((scqe)->common.ch.wd0.err_code)
#define HIFC_GET_SCQE_REMAIN_CNT(scqe) ((scqe)->common.ch.wd0.cqe_remain_cnt)
#define HIFC_GET_SCQE_CONN_ID(scqe) ((scqe)->common.conn_id)
#define HIFC_GET_WQE_TYPE(wqe) ((wqe)->ts_sl.task_type)
#define HIFC_WQE_IS_IO(wqe) \
(HIFC_GET_WQE_TYPE(wqe) != HIFC_SQE_SESS_RST)
#define HIFC_SCQE_HAS_ERRCODE(scqe) \
(HIFC_GET_SCQE_STATUS(scqe) != HIFC_COMPLETION_STATUS_SUCCESS)
#define HIFC_SCQE_ERR_TO_CM(scqe)\
(HIFC_GET_SCQE_STATUS(scqe) != FCOE_ELS_GS_RSP_EXCH_CHECK_FAIL)
#define HIFC_SCQE_CONN_ID_VALID(scqe) \
(HIFC_GET_SCQE_CONN_ID(scqe) != HIFC_SCQE_INVALID_CONN_ID)
#define HIFC_WQE_SECTION_CHUNK_SIZE 8 /* 8 bytes' chunk */
#define HIFC_T_RESP_WQE_CTR_TSL_SIZE 15 /* 8 bytes' chunk */
#define HIFC_T_RD_WR_WQE_CTR_TSL_SIZE 9 /* 8 bytes' chunk */
#define HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE 4 /* 8 bytes' chunk */
#define HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE 1 /* 8 bytes' chunk */
#define HIFC_WQE_SGE_ENTRY_NUM 2 /* BD SGE and DIF SGE count */
#define HIFC_WQE_SGE_DIF_ENTRY_NUM 1 /* DIF SGE count */
#define HIFC_WQE_SGE_LAST_FLAG 1
#define HIFC_WQE_SGE_NOT_LAST_FLAG 0
#define HIFC_WQE_SGE_EXTEND_FLAG 1
#define HIFC_WQE_SGE_NOT_EXTEND_FLAG 0
#define HIFC_FCP_TMF_PORT_RESET 0
#define HIFC_FCP_TMF_LUN_RESET 1
#define HIFC_FCP_TMF_TGT_RESET 2
#define HIFC_FCP_TMF_RSVD 3
#define HIFC_NO_OFFLOAD 0
#define HIFC_HAVE_OFFLOAD 1
#define HIFC_QID_SQ 0
#define HIFC_ADJUST_DATA(old_val, new_val) ((old_val) = (new_val))
#define HIFC_GET_RESET_TYPE(tmf_flag, reset_flag) \
do { \
switch (tmf_flag) { \
case UNF_FCP_TM_ABORT_TASK_SET: \
case UNF_FCP_TM_LOGICAL_UNIT_RESET: \
reset_flag = HIFC_FCP_TMF_LUN_RESET; \
break; \
case UNF_FCP_TM_TARGET_RESET: \
reset_flag = HIFC_FCP_TMF_TGT_RESET; \
break; \
case UNF_FCP_TM_CLEAR_TASK_SET: \
reset_flag = HIFC_FCP_TMF_PORT_RESET; \
break; \
default: \
reset_flag = HIFC_FCP_TMF_RSVD; \
} \
} while (0)
/*
* nic_wqe_ctrl_sec table define
*/
struct nic_wqe_ctrl_sec {
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* marks ownership of WQE */
u32 owner : 1;
/* Control Section Length */
u32 ctrl_sec_len : 2;
/* Completion Section Length */
u32 completion_sec_len : 2;
/* DIF Section Length */
u32 dif_sec_len : 3;
/*
* Completion is Required - marks CQE generation request
* per WQE
*/
u32 cr : 1;
/* Data Format - format of BDS */
u32 df : 1;
/* Virtual Address */
u32 va : 1;
/* Task Section Length */
u32 task_sec_len : 5;
/* Completion Format */
u32 cf : 1;
u32 wf : 1;
/* reserved */
u32 rsvd : 4;
/* Driver Section Length */
u32 drv_sec_len : 2;
/* Buffer Descriptors Section Length */
u32 buf_desc_sec_len : 8;
#else
/* Buffer Descriptors Section Length */
u32 buf_desc_sec_len : 8;
/* Driver Section Length */
u32 drv_sec_len : 2;
/* reserved */
u32 rsvd : 4;
u32 wf : 1;
/* Completion Format */
u32 cf : 1;
/* Task Section Length */
u32 task_sec_len : 5;
/* Virtual Address */
u32 va : 1;
/* Data Format - format of BDS */
u32 df : 1;
/*
* Completion is Required - marks CQE generation request
* per WQE
*/
u32 cr : 1;
/* DIF Section Length */
u32 dif_sec_len : 3;
/* Completion Section Length */
u32 completion_sec_len : 2;
/* Control Section Length */
u32 ctrl_sec_len : 2;
/* marks ownership of WQE */
u32 owner : 1;
#endif
} bs;
u32 dw;
};
};
/*
* nic_rq_sge_sec table define
*/
struct nic_rq_sge_sec {
/* packet buffer address high */
u32 wb_addr_high;
/* packet buffer address low */
u32 wb_addr_low;
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd : 1;
/* SGE length */
u32 length : 31;
#else
/* SGE length */
u32 length : 31;
u32 rsvd : 1;
#endif
} bs0;
u32 dw0;
};
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* 0:list,1:last */
u32 list : 1;
/* 0:normal,1:pointer to next SGE */
u32 extension : 1;
/* key or unsed */
u32 key : 30;
#else
/* key or unsed */
u32 key : 30;
/* 0:normal,1:pointer to next SGE */
u32 extension : 1;
/* 0:list,1:last */
u32 list : 1;
#endif
} bs1;
u32 dw1;
};
};
/*
* nic_rq_bd_sec table define
*/
struct nic_rq_bd_sec {
/* packet buffer address high */
u32 pkt_buf_addr_high;
/* packet buffer address low */
u32 pkt_buf_addr_low;
};
/*
* nic_rq_wqe table define
*/
struct nic_rq_wqe {
struct nic_wqe_ctrl_sec rq_wqe_ctrl_sec;
u32 rsvd;
struct nic_rq_sge_sec rx_sge;
struct nic_rq_bd_sec pkt_buf_addr;
};
/* Link WQE structure */
struct hifc_link_wqe_s {
union {
struct {
unsigned int rsv1 : 14;
unsigned int wf : 1;
unsigned int rsv2 : 14;
unsigned int ctrlsl : 2;
unsigned int o : 1;
} wd0;
u32 val_wd0;
};
union {
struct {
unsigned int msn : 16;
unsigned int dump_msn : 15;
/* lp means whether O bit is overturn */
unsigned int lp : 1;
} wd1;
unsigned int val_wd1;
};
unsigned int next_page_addr_hi;
unsigned int next_page_addr_lo;
};
struct hifc_root_rq_complet_info_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int done : 1; /* done bit,ucode will set to 1 */
unsigned int rsvd1 : 6;
unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */
unsigned int rsvd2 : 24;
#else
unsigned int rsvd2 : 24;
unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */
unsigned int rsvd1 : 6;
unsigned int done : 1; /* done bit,ucode will set to 1 */
#endif
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned short buf_length;
unsigned short exch_id;
#else
unsigned short exch_id;
unsigned short buf_length;
#endif
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned short sts_only; /* If only CMPL SECTION */
unsigned short status; /* 0:no err;!0:others */
#else
unsigned short status; /* 0:no err;!0:others */
unsigned short sts_only; /* If only CMPL SECTION */
#endif
unsigned int magic_num;
unsigned int rsvd[4];
};
/* Parent SQ WQE */
struct hifc_root_sge_s {
unsigned int buf_addr_hi;
unsigned int buf_addr_lo;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int ext_flag : 1;
unsigned int buf_len : 31;
#else
unsigned int buf_len : 31;
unsigned int ext_flag : 1;
#endif
} wd0;
struct {
unsigned int rsvd;
} wd1;
};
/* Root SQ WQE Task Section structure for FC */
struct hifc_root_sqe_task_section_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int task_type : 8;
/* 1:offload enable,0:offload disable. */
unsigned int off_load : 1;
unsigned int port_id : 4;
unsigned int host_id : 2;
unsigned int rsvd1 : 1;
unsigned int exch_id : 16;
#else
unsigned int exch_id : 16;
unsigned int rsvd1 : 1;
unsigned int host_id : 2;
unsigned int port_id : 4;
unsigned int off_load : 1;
unsigned int task_type : 8;
#endif
} fc_dw0;
union {
unsigned int context_gpa_hi;
unsigned int magic_num;
} fc_dw1;
struct {
unsigned int context_gpa_lo;
} fc_dw2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned short scq_num; /* SCQ num */
unsigned short rport_index; /* RPort */
#else
unsigned short rport_index; /* RPort */
unsigned short scq_num; /* SCQ num */
#endif
} fc_dw3;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */
unsigned int pkt_cos : 3;
unsigned int rsvd2 : 1;
unsigned int csize : 2;
unsigned int service_type : 5;
unsigned int parent_xid : 20;
#else
unsigned int parent_xid : 20;
unsigned int service_type : 5;
unsigned int csize : 2;
unsigned int rsvd2 : 1;
unsigned int pkt_cos : 3; /* pkt cos,4:ETH, 0:FC */
unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */
#endif
} fc_dw4;
struct {
unsigned int rsvd;
} fc_dw5;
};
/* Root SQ WQE */
struct hifc_root_sqe_s {
/* Control Section */
struct hifcoe_wqe_ctrl_s ctrl_section;
struct hifc_root_sqe_task_section_s task_section;
struct hifc_root_sge_s sge;
struct hifc_root_sge_s ctx_sge;
};
/* Parent SQ WQE and Root SQ WQE Related function */
void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned int ts_size,
unsigned int bdsl);
void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts,
unsigned int rport_index,
unsigned short local_xid,
unsigned short remote_xid,
unsigned short data_len);
void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr,
unsigned long long v_phyaddr,
unsigned int buf_len,
unsigned int xid, void *v_hba);
void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
void *v_sq_info, unsigned short cmnd,
unsigned int v_scqn, void *v_frame_pld);
void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info,
void *v_frame_pld, unsigned short type,
unsigned short cmnd, unsigned int v_scqn);
void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe,
unsigned short els_cmnd_type,
unsigned int v_magic_num);
void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int magic_num);
void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe,
unsigned int abts_param,
unsigned int magic_num);
void hifc_build_service_wqe_root_ts(void *v_hba,
struct hifc_root_sqe_s *v_rt_sqe,
unsigned int rx_id, unsigned int rport_id,
unsigned int scq_num);
void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe,
void *v_buf_addr,
unsigned long long v_phyaddr,
unsigned int buf_len,
void *v_hba);
void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe,
dma_addr_t ctx_addr,
unsigned int xid);
void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs,
unsigned short owner,
unsigned short pmsn);
void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe);
void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe);
void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_ts_s *v_sqe_ts);
void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe,
unsigned char v_task_type,
unsigned short v_exi_base,
unsigned char v_port_idx);
void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe,
enum hifcoe_task_type_e task_type,
unsigned short rx_id);
void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe, unsigned short owner,
unsigned short pmsn);
void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl,
unsigned char v_task_len);
void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe,
unsigned long long v_ctx_addr,
unsigned int buf_len);
void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg,
struct hifcoe_sqe_s *v_sqe);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFCOE_PARENT_CONTEXT_H__
#define __HIFCOE_PARENT_CONTEXT_H__
enum fc_parent_status_e {
FCOE_PARENT_STATUS_INVALID = 0,
FCOE_PARENT_STATUS_NORMAL,
FCOE_PARENT_STATUS_CLOSING
};
#define HIFCOE_DOUBLE_SGL (1)
#define HIFCOE_SINGLE_SGL (0)
#define HIFCOE_DIX_ALGORITHM_IP (1)
#define HIFCOE_DIX_ALGORITHM_CRC (0)
#define HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE (48)
#define HIFCOE_PARENT_CONTEXT_SRQ_QINFO_SIZE (8)
#define HIFCOE_PARENT_CONTEXT_TIMER_SIZE (32) /* 24+2*N,N=timer count */
#define HIFCOE_RQ_FILLED_OFFSET \
((u8)(u32)& \
(((struct hifcoe_sw_section_s *)0x0)->occupy_by_rqe_filled_flag))
#define HIFCOE_RW_LOCK_AREA_OFFSET \
((u8)(u32)&\
(((struct hifcoe_sw_section_s *)0x0)->occupy_by_rw_lock_area))
/* "fqg_level_eventiq_info_s" should be care if MAX_EVENTIQ_LEVEL is larger
* than 4
*/
#define MAX_EVENTIQ_LEVEL 4
#define MAX_EVENTIQ_LEVEL_SHIFT 2
#define SP_FEATRUE_EDTR 0x1
#define SP_FEATRUE_SEQ_CNT 0x2
#define MAX_PKT_SIZE_PER_DISPATCH (FC_PARENT_P->per_xmit_data_size)
#define MAX_PKT_SIZE_PER_DISPATCH_DIF_4K \
(MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 12) << 3))
#define MAX_PKT_SIZE_PER_DISPATCH_DIF_512B \
(MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 9) << 3))
#define MAX_PKT_SIZE_PER_DISPATCH_DIF(shift) \
(MAX_PKT_SIZE_PER_DISPATCH +\
((u32)((MAX_PKT_SIZE_PER_DISPATCH >> 9) >> (shift)) << 3))
/* immidiate data DIF info definition in parent context */
struct immi_dif_info_s {
union {
u32 value;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 pdu_difx_cnt :8;
u32 sct_size :1;/* Sector size, 1: 4K; 0: 512 */
u32 dif_verify_type :2; /* verify type */
u32 dif_ins_rep_type:2; /* ins&rep type */
u32 io_1st_pdu :1;
/* Check blocks whose application tag contains
* 0xFFFF flag
*/
u32 difx_app_esc :1;
u32 difx_ref_esc :1;
/*
* Check blocks whose reference tag contains 0xFFFF flag
*/
u32 grd_ctrl :3; /* The DIF/DIX Guard control */
/* Bit 0: DIF/DIX guard verify algorithm control */
u32 grd_agm_ctrl :2;
/*
* Bit 1: DIF/DIX guard replace or insert algorithm
* control
*/
u32 grd_agm_ini_ctrl :3;
/* The DIF/DIX Reference tag control */
u32 ref_tag_ctrl :3;
/* Bit 0: scenario of the reference tag verify mode */
u32 ref_tag_mode :2;
/*
* Bit 1: scenario of the reference tag insert/replace
* mode
*/
/* 0: fixed; 1: increasement;*/
u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */
#else
u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */
/* Bit 0: scenario of the reference tag verify mode */
u32 ref_tag_mode :2;
/*
* Bit 1: scenario of the reference tag insert/replace
* mode
*/
/* 0: fixed; 1: increasement;*/
/* The DIF/DIX Reference tag control */
u32 ref_tag_ctrl :3;
u32 grd_agm_ini_ctrl :3;
/* Bit 0: DIF/DIX guard verify algorithm control */
u32 grd_agm_ctrl :2;
/*
* Bit 1: DIF/DIX guard replace or insert algorithm
* control
*/
u32 grd_ctrl :3; /* The DIF/DIX Guard control */
/*
* Check blocks whose reference tag contains 0xFFFF flag
*/
u32 difx_ref_esc :1;
/*
* Check blocks whose application tag contains 0xFFFF
* flag
*/
u32 difx_app_esc :1;
u32 io_1st_pdu :1;
u32 dif_ins_rep_type:2; /* ins&rep type */
u32 dif_verify_type :2; /* verify type */
u32 sct_size :1; /* Sector size, 1: 4K; 0: 512 */
u32 pdu_difx_cnt :8;
#endif
} info;
} dif_dw3;
union {
u32 value;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 difx_len :11; /* DIF/DIFX total length */
u32 difx_en :1; /* DIF/DIFX enable flag */
u32 rsv0 :4;
u32 dif_cnt :16;
#else
u32 dif_cnt :16;
u32 rsv0 :4;
u32 difx_en :1; /* DIF/DIFX enable flag */
u32 difx_len :11; /* DIF/DIFX total length */
#endif
} info;
} dif_other;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rep_app_tag :16;
u32 cmp_app_tag :16;
#else
u32 cmp_app_tag :16;
u32 rep_app_tag :16;
#endif
/*
* The ref tag value for verify compare, do not support replace or
* insert ref tag
*/
u32 cmp_ref_tag;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 cmp_app_tag_msk :16;
u32 rsv1 :16;
#else
u32 rsv1 :16;
u32 cmp_app_tag_msk :16;
#endif
};
/* parent context SW section definition: SW(80B) */
struct hifcoe_sw_section_s {
/* RO fields */
u32 scq_num_rcv_cmd; /* scq number used for cmd receive */
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 xid; /* driver init */
#else
struct {
u32 xid :13;
u32 vport :7;
u32 csctrl :8;
u32 rsvd0 :4;
} sw_ctxt_vport_xid;
#endif
u32 cid; /* ucode init */
u16 conn_id;
u16 immi_rq_page_size;
u16 immi_taskid_min;
u16 immi_taskid_max;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 vlan_id : 16; /* Vlan ID */
/* phycial port to receive and transmit packet. */
u32 port_id : 4;
/*
* new srq offset. Ucode use new srq to receive els/gs with big payload.
*/
u32 rsvd1 : 5;
u32 srr_support : 2; /* sequence retransmition support flag */
u32 srv_type : 5;
#else
union {
u32 pctxt_val0;
struct {
u32 srv_type : 5; /* driver init */
/* sequence retransmition support flag */
u32 srr_support : 2;
u32 rsvd1 : 5;
u32 port_id : 4; /* driver init */
u32 vlan_id : 16; /* driver init */
} dw;
} sw_ctxt_misc;
#endif
u16 oqid_rd;
u16 oqid_wr;
u32 per_xmit_data_size;
/* RW fields */
u32 cmd_scq_gpa_h;
u32 cmd_scq_gpa_l;
/* E_D_TOV timer value: value should be set on ms by driver */
u32 e_d_tov_timer_val;
/*
* mfs unalined bytes of per 64KB dispatch; equal to
* "MAX_PKT_SIZE_PER_DISPATCH%info->parent->tx_mfs"
*/
u16 mfs_unaligned_bytes;
u16 tx_mfs; /* remote port max receive fc payload length */
/* max data len allowed in xfer_rdy dis scenario*/
u32 xfer_rdy_dis_max_len_remote;
u32 xfer_rdy_dis_max_len_local;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* Double or single SGL, 1: double; 0: single */
u32 sgl_num :1;
u32 write_xfer_rdy :1; /* WRITE Xfer_Rdy disable or enable */
u32 rec_support :1; /* REC support flag */
u32 conf_support :1; /* Response confirm support flag */
u32 vlan_enable :1; /* Vlan enable flag */
u32 e_d_tov :1; /* E_D_TOV Resolution, 0: ms, 1: us*/
/* seq_cnt, 1: increament support, 0: increament not support */
u32 seq_cnt :1;
/* 0:Target, 1:Initiator, 2:Target&Initiator */
u32 work_mode :2;
/* used for parent context cache Consistency judgment,1: done*/
u32 flush_done :1;
u32 oq_cos_cmd :3; /* esch oq cos for cmd/xferrdy/rsp */
u32 oq_cos_data :3; /* esch oq cos for data */
u32 cos :3; /* doorbell cos value */
u32 status :8; /* status of flow*/
u32 rsvd4 :2;
u32 priority :3; /* vlan priority */
#else
union {
struct {
u32 priority : 3; /* vlan priority */
u32 rsvd4 : 2;
u32 status : 8; /* status of flow*/
u32 cos : 3; /* doorbell cos value */
u32 oq_cos_data : 3; /* esch oq cos for data */
/* esch oq cos for cmd/xferrdy/rsp */
u32 oq_cos_cmd : 3;
/*
* used for parent context cache Consistency judgment,
* 1: done
*/
u32 flush_done : 1;
/* 0:Target, 1:Initiator, 2:Target&Initiator */
u32 work_mode : 2;
u32 seq_cnt : 1; /* seq_cnt */
u32 e_d_tov : 1; /* E_D_TOV resolution */
u32 vlan_enable : 1; /* Vlan enable flag */
/* Response confirm support flag */
u32 conf_support : 1;
u32 rec_support : 1; /* REC support flag */
/* WRITE Xfer_Rdy disable or enable */
u32 write_xfer_rdy : 1;
/* Double or single SGL, 1: double; 0: single */
u32 sgl_num : 1;
} dw;
u32 pctxt_val1;
} sw_ctxt_config;
#endif
/* immidiate data dif control info(20B) */
struct immi_dif_info_s immi_dif_info;
};
struct hifcoe_hw_rsvd_queue_s {
/* bitmap[0]:255-192 */
/* bitmap[1]:191-128 */
/* bitmap[2]:127-64 */
/* bitmap[3]:63-0 */
u64 seq_id_bitmap[4];
struct {
u64 last_req_seq_id : 8;
u64 xid : 20;
u64 rsvd0 : 36;
} wd0;
};
struct hifcoe_sq_qinfo_s {
u64 rsvd_0 : 10;
/* 0: get pmsn from queue header; 1: get pmsn from ucode */
u64 pmsn_type : 1;
u64 rsvd_1 : 4;
u64 cur_wqe_o : 1; /* should be opposite from loop_o */
u64 rsvd_2 : 48;
u64 cur_sqe_gpa;
u64 pmsn_gpa; /* sq's queue header gpa */
u64 sqe_dmaattr_idx : 6;
u64 sq_so_ro : 2;
u64 rsvd_3 : 2;
u64 ring : 1; /* 0: link; 1: ring */
u64 loop_o : 1; /* init to be the first round o-bit */
u64 rsvd_4 : 4;
u64 zerocopy_dmaattr_idx : 6;
u64 zerocopy_so_ro : 2;
u64 parity : 8;
u64 rsvd_5 : 26;
u64 pcie_template : 6;
};
struct hifcoe_cq_qinfo_s {
u64 pcie_template_hi : 3;
u64 parity_2 : 1;
u64 cur_cqe_gpa : 60;
u64 pi : 15;
u64 pi_o : 1;
u64 ci : 15;
u64 ci_o : 1;
/* if init_mode = 2, is msi/msi-x; other the low-5-bit means c_eqn */
u64 c_eqn_msi_x : 10;
u64 parity_1 : 1;
/* 0: get ci from queue header; 1: get ci from ucode */
u64 ci_type : 1;
u64 cq_depth : 3; /* valid when ring = 1 */
u64 armq : 1; /* 0: IDLE state; 1: NEXT state */
u64 cur_cqe_cnt : 8;
u64 cqe_max_cnt : 8;
u64 cqe_dmaattr_idx : 6;
u64 cq_so_ro : 2;
u64 init_mode : 2; /* 1: armQ; 2: msi/msi-x; others: rsvd */
u64 next_o : 1; /* next pate valid o-bit */
u64 loop_o : 1; /* init to be the first round o-bit */
u64 next_cq_wqe_page_gpa : 52;
u64 pcie_template_lo : 3;
u64 parity_0 : 1;
u64 ci_gpa : 60; /* cq's queue header gpa */
};
struct hifcoe_scq_qinfo_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
union {
struct {
u64 parity : 6;
u64 rq_th2_preld_cache_num : 5;
u64 rq_th1_preld_cache_num : 5;
u64 rq_th0_preld_cache_num : 5;
u64 rq_min_preld_cache_num : 4;
u64 sq_th2_preld_cache_num : 5;
u64 sq_th1_preld_cache_num : 5;
u64 sq_th0_preld_cache_num : 5;
u64 sq_min_preld_cache_num : 4;
u64 scq_n : 20; /* scq number */
} info;
u64 pctxt_val1;
} hw_scqc_config;
#else
union {
struct {
u64 scq_n : 20; /* scq number */
u64 sq_min_preld_cache_num : 4;
u64 sq_th0_preld_cache_num : 5;
u64 sq_th1_preld_cache_num : 5;
u64 sq_th2_preld_cache_num : 5;
u64 rq_min_preld_cache_num : 4;
u64 rq_th0_preld_cache_num : 5;
u64 rq_th1_preld_cache_num : 5;
u64 rq_th2_preld_cache_num : 5;
u64 parity : 6;
} info;
u64 pctxt_val1;
} hw_scqc_config;
#endif
};
struct hifcoe_srq_qinfo_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u64 srqc_gpa : 60;
u64 parity : 4;
#else
u64 parity : 4;
u64 srqc_gpa : 60;
#endif
};
/* here is the layout of service type 12/13 */
struct hifcoe_parent_context_s {
u8 key[HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE];
struct hifcoe_scq_qinfo_s resp_scq_qinfo;
struct hifcoe_srq_qinfo_s imm_srq_info;
struct hifcoe_sq_qinfo_s sq_qinfo;
u8 timer_section[HIFCOE_PARENT_CONTEXT_TIMER_SIZE];
struct hifcoe_hw_rsvd_queue_s hw_rsvdq;
struct hifcoe_srq_qinfo_s els_srq_info;
struct hifcoe_sw_section_s sw_section;
};
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __HIFCOE_WQE_H__
#define __HIFCOE_WQE_H__
/*
* TASK TYPE: in order to compatible wiht EDA, please add new type before BUTT.
*/
enum hifcoe_task_type_e {
HIFCOE_TASK_T_EMPTY = 0,/* SCQE TYPE: means task type not initialize */
HIFCOE_TASK_T_IWRITE = 1, /* SQE TYPE: ini send FCP Write Command */
HIFCOE_TASK_T_IREAD = 2,/* SQE TYPE: ini send FCP Read Command */
/* SCQE TYPE: ini recv fcp rsp for IREAD/IWRITE/ITMF*/
HIFCOE_TASK_T_IRESP = 3,
HIFCOE_TASK_T_TCMND = 4,/* NA */
HIFCOE_TASK_T_TREAD = 5,/* SQE TYPE: tgt send FCP Read Command */
/* SQE TYPE: tgt send FCP Write Command (XFER_RDY) */
HIFCOE_TASK_T_TWRITE = 6,
HIFCOE_TASK_T_TRESP = 7,/* SQE TYPE: tgt send fcp rsp of Read/Write*/
HIFCOE_TASK_T_TSTS = 8, /* SCQE TYPE: tgt sts for TREAD/TWRITE/TRESP*/
HIFCOE_TASK_T_ABTS = 9, /* SQE TYPE: ini send abts request Command */
HIFCOE_TASK_T_IELS = 10,/* NA */
HIFCOE_TASK_T_ITMF = 11,/* SQE TYPE: ini send tmf request Command */
HIFCOE_TASK_T_CLEAN_UP = 12,/* NA */
HIFCOE_TASK_T_CLEAN_UP_ALL = 13,/* NA */
HIFCOE_TASK_T_UNSOLICITED = 14, /* NA */
HIFCOE_TASK_T_ERR_WARN = 15,/* NA */
HIFCOE_TASK_T_SESS_EN = 16, /* CMDQ TYPE: enable session */
HIFCOE_TASK_T_SESS_DIS = 17,/* NA */
HIFCOE_TASK_T_SESS_DEL = 18,/* NA */
HIFCOE_TASK_T_RQE_REPLENISH = 19, /* NA */
HIFCOE_TASK_T_RCV_TCMND = 20, /* SCQE TYPE: tgt recv fcp cmd */
HIFCOE_TASK_T_RCV_ELS_CMD = 21, /* SCQE TYPE: tgt recv els cmd */
HIFCOE_TASK_T_RCV_ABTS_CMD = 22,/* SCQE TYPE: tgt recv abts cmd */
/* SCQE TYPE: tgt recv immidiate data */
HIFCOE_TASK_T_RCV_IMMIDIATE = 23,
/*
* SQE TYPE: send ESL rsp. PLOGI_ACC, PRLI_ACC will carry the parent
* context parameter indication.
*/
HIFCOE_TASK_T_ELS_RSP = 24,
HIFCOE_TASK_T_ELS_RSP_STS = 25, /* SCQE TYPE: ELS rsp sts */
HIFCOE_TASK_T_ABTS_RSP = 26,/* CMDQ TYPE: tgt send abts rsp */
HIFCOE_TASK_T_ABTS_RSP_STS = 27,/* SCQE TYPE: tgt abts rsp sts*/
HIFCOE_TASK_T_ABORT = 28, /* CMDQ TYPE: tgt send Abort Command */
HIFCOE_TASK_T_ABORT_STS = 29, /* SCQE TYPE: Abort sts */
HIFCOE_TASK_T_ELS = 30, /* SQE TYPE: send ELS request Command */
HIFCOE_TASK_T_RCV_ELS_RSP = 31, /* SCQE TYPE: recv ELS response */
HIFCOE_TASK_T_GS = 32, /* SQE TYPE: send GS request Command */
HIFCOE_TASK_T_RCV_GS_RSP = 33, /* SCQE TYPE: recv GS response */
HIFCOE_TASK_T_SESS_EN_STS = 34, /* SCQE TYPE: enable session sts */
HIFCOE_TASK_T_SESS_DIS_STS = 35,/* NA */
HIFCOE_TASK_T_SESS_DEL_STS = 36,/* NA */
HIFCOE_TASK_T_RCV_ABTS_RSP = 37,/* SCQE TYPE: ini recv abts rsp */
HIFCOE_TASK_T_BUFFER_CLEAR = 38,/* CMDQ TYPE: Buffer Clear */
HIFCOE_TASK_T_BUFFER_CLEAR_STS = 39,/* SCQE TYPE: Buffer Clear sts */
HIFCOE_TASK_T_FLUSH_SQ = 40,/* CMDQ TYPE: flush sq */
HIFCOE_TASK_T_FLUSH_SQ_STS = 41,/* SCQE TYPE: flush sq sts */
HIFCOE_TASK_T_SESS_RESET = 42, /* SQE TYPE: Reset session */
HIFCOE_TASK_T_SESS_RESET_STS = 43, /* SCQE TYPE: Reset session sts */
HIFCOE_TASK_T_RQE_REPLENISH_STS = 44, /* NA */
HIFCOE_TASK_T_DUMP_EXCH = 45, /* CMDQ TYPE: dump exch */
HIFCOE_TASK_T_INIT_SRQC = 46, /* CMDQ TYPE: init SRQC */
HIFCOE_TASK_T_CLEAR_SRQ = 47, /* CMDQ TYPE: clear SRQ */
HIFCOE_TASK_T_CLEAR_SRQ_STS = 48, /* SCQE TYPE: clear SRQ sts */
HIFCOE_TASK_T_INIT_SCQC = 49, /* CMDQ TYPE: init SCQC */
HIFCOE_TASK_T_DEL_SCQC = 50,/* CMDQ TYPE: delete SCQC */
HIFCOE_TASK_T_TMF_RESP = 51,/* SQE TYPE: tgt send tmf rsp */
HIFCOE_TASK_T_DEL_SRQC = 52,/* CMDQ TYPE: delete SRQC */
/* SCQE TYPE: tgt recv continue immidiate data */
HIFCOE_TASK_T_RCV_IMMI_CONTINUE = 53,
HIFCOE_TASK_T_ITMF_RESP = 54, /* SCQE TYPE: ini recv tmf rsp */
HIFCOE_TASK_T_ITMF_MARKER_STS = 55,/* SCQE TYPE: tmf marker sts */
HIFCOE_TASK_T_TACK = 56,
HIFCOE_TASK_T_SEND_AEQERR = 57,
HIFCOE_TASK_T_ABTS_MARKER_STS = 58,/* SCQE TYPE: abts marker sts */
HIFCOE_TASK_T_FLR_CLEAR_IO = 59,/* FLR clear io type*/
HIFCOE_TASK_T_BUTT
};
/*
* error code for error report
*/
enum hifcoe_err_code_e {
FCOE_CQE_COMPLETED = 0, /* Successful */
FCOE_SESS_HT_INSERT_FAIL = 1,/* Offload fail: hash insert fail */
FCOE_SESS_HT_INSERT_DUPLICATE = 2, /* Offload fail: duplicate offload */
FCOE_SESS_HT_BIT_SET_FAIL = 3, /* Offload fail: bloom filter set fail */
/* Offload fail: hash delete fail(duplicate delete) */
FCOE_SESS_HT_DELETE_FAIL = 4,
FCOE_CQE_BUFFER_CLEAR_IO_COMPLETED = 5, /* IO done in buffer clear */
/* IO done in session rst mode=1 */
FCOE_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED = 6,
/* IO done in session rst mode=3 */
FCOE_CQE_SESSION_RST_CLEAR_IO_COMPLETED = 7,
FCOE_CQE_TMF_RSP_IO_COMPLETED = 8, /* IO done in tgt tmf rsp */
FCOE_CQE_TMF_IO_COMPLETED = 9, /* IO done in ini tmf */
FCOE_CQE_DRV_ABORT_IO_COMPLETED = 10,/* IO done in tgt abort */
/* IO done in fcp rsp process. Used for the sceanrio:
* 1.abort before cmd
* 2.send fcp rsp directly after recv cmd
*/
FCOE_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED = 11,
/* IO done in fcp cmd process. Used for the sceanrio:
* 1.abort before cmd
* 2.child setup fail
*/
FCOE_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED = 12,
FCOE_CQE_WQE_FLUSH_IO_COMPLETED = 13,/* IO done in FLUSH SQ */
/* fcp data format check: DIFX check error */
FCOE_ERROR_CODE_DATA_DIFX_FAILED = 14,
/* fcp data format check: task_type is not read */
FCOE_ERROR_CODE_DATA_TASK_TYPE_INCORRECT = 15,
/* fcp data format check: data offset is not continuous */
FCOE_ERROR_CODE_DATA_OOO_RO = 16,
/* fcp data format check: data is over run */
FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS = 17,
/* fcp rsp format check: payload is too short */
FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD = 18,
/* fcp rsp format check: fcp_conf need, but exch don't hold seq
* initiative
*/
FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET = 19,
/* fcp rsp format check: fcp_conf is required, but it's the last seq */
FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ = 20,
/* xfer rdy format check: payload is too short */
FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE = 21,
/* xfer rdy format check: last data out havn't finished */
FCOE_ERROR_CODE_XFER_PEND_XFER_SET = 22,
/* xfer rdy format check: data offset is not continuous */
FCOE_ERROR_CODE_XFER_OOO_RO = 23,
/* xfer rdy format check: burst len is 0 */
FCOE_ERROR_CODE_XFER_NULL_BURST_LEN = 24,
FCOE_ERROR_CODE_REC_TIMER_EXPIRE = 25, /* Timer expire: REC_TIMER */
FCOE_ERROR_CODE_E_D_TIMER_EXPIRE = 26, /* Timer expire: E_D_TIMER */
FCOE_ERROR_CODE_ABORT_TIMER_EXPIRE = 27,/* Timer expire: Abort timer */
/* Abort IO magic number mismatch */
FCOE_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH = 28,
/* RX immidiate data cmd pkt child setup fail */
FCOE_IMMI_CMDPKT_SETUP_FAIL = 29,
/* RX fcp data sequence id not equal */
FCOE_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL = 30,
FCOE_ELS_GS_RSP_EXCH_CHECK_FAIL = 31,/* ELS/GS exch info check fail */
FCOE_CQE_ELS_GS_SRQE_GET_FAIL = 32, /* ELS/GS process get SRQE fail */
FCOE_CQE_DATA_DMA_REQ_FAIL = 33, /* SMF soli-childdma rsp error */
FCOE_CQE_SESSION_CLOSED = 34,/* Session is closed */
FCOE_SCQ_IS_FULL = 35, /* SCQ is full */
FCOE_SRQ_IS_FULL = 36, /* SRQ is full */
FCOE_ERROR_DUCHILDCTX_SETUP_FAIL = 37, /* dpchild ctx setup fail */
FCOE_ERROR_INVALID_TXMFS = 38, /* invalid txmfs */
/* offload fail,lack of SCQE,through AEQ */
FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL = 39,
FCOE_ERROR_INVALID_TASK_ID = 40, /* tx invlaid task id */
FCOE_ERROR_INVALID_PKT_LEN = 41, /* tx els gs pakcet len check */
FCOE_CQE_ELS_GS_REQ_CLR_IO_COMPLETED = 42, /* IO done in els gs tx */
FCOE_CQE_ELS_RSP_CLR_IO_COMPLETED = 43, /* IO done in els rsp tx */
FCOE_ERROR_CODE_RESID_UNDER_ERR = 44 /* FCP RSP RESID ERROR */
};
/* AEQ EVENT TYPE */
enum hifcoe_aeq_evt_type_e {
/*
* SCQ and SRQ not enough, HOST will initiate a operation to associated
* SCQ/SRQ
*/
FC_AEQ_EVENT_QUEUE_ERROR = 48,
/* WQE MSN check error,HOST will reset port */
FC_AEQ_EVENT_WQE_FATAL_ERROR = 49,
/* serious chip error, HOST will reset chip */
FC_AEQ_EVENT_CTX_FATAL_ERROR = 50,
FC_AEQ_EVENT_OFFLOAD_ERROR = 51,
FC_FC_AEQ_EVENT_TYPE_LAST
};
enum hifcoe_aeq_evt_err_code_e {
/* detail type of resource lack */
FC_SCQ_IS_FULL_ERR = 0,
FC_SRQ_IS_FULL_ERR,
/* detail type of FC_AEQ_EVENT_WQE_FATAL_ERROR */
FC_SQE_CHILD_SETUP_WQE_MSN_ERR = 2,
FC_SQE_CHILD_SETUP_WQE_GPA_ERR,
FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1,
FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2,
FC_CLEAEQ_WQE_ERR,
FC_WQEFETCH_WQE_MSN_ERR,
FC_WQEFETCH_QUINFO_ERR,
/* detail type of FC_AEQ_EVENT_CTX_FATAL_ERROR */
FC_SCQE_ERR_BIT_ERR = 9,
FC_UPDMA_ADDR_REQ_SRQ_ERR,
FC_SOLICHILDDMA_ADDR_REQ_ERR,
FC_UNSOLICHILDDMA_ADDR_REQ_ERR,
FC_SQE_CHILD_SETUP_QINFO_ERR_1,
FC_SQE_CHILD_SETUP_QINFO_ERR_2,
FC_CMDPKT_CHILD_SETUP_QINFO_ERR_1,
FC_CMDPKT_CHILD_SETUP_QINFO_ERR_2,
FC_CMDPKT_CHILD_SETUP_PMSN_ERR,
FC_CLEAEQ_CTX_ERR,
FC_WQEFETCH_CTX_ERR,
FC_FLUSH_QPC_ERR_LQP,
FC_FLUSH_QPC_ERR_SMF,
FC_PREFETCH_QPC_ERR_1,
FC_PREFETCH_QPC_ERR_2,
FC_PREFETCH_QPC_ERR_3,
FC_PREFETCH_QPC_ERR_4,
FC_PREFETCH_QPC_ERR_5,
FC_PREFETCH_QPC_ERR_6,
FC_PREFETCH_QPC_ERR_7,
FC_PREFETCH_QPC_ERR_8,
FC_PREFETCH_QPC_ERR_9,
FC_PREFETCH_QPC_ERR_10,
FC_PREFETCH_QPC_ERR_11,
FC_PREFETCH_QPC_ERR_DEFAULT,
FC_CHILDHASH_INSERT_SW_ERR,
FC_CHILDHASH_LOOKUP_SW_ERR,
FC_CHILDHASH_DEL_SW_ERR,
FC_FLOWHASH_INSERT_SW_ERR,
FC_FLOWHASH_LOOKUP_SW_ERR,
FC_FLOWHASH_DEL_SW_ERR,
FC_FLUSH_QPC_ERR_USED,
FC_FLUSH_QPC_ERR_OUTER_LOCK,
FC_AEQ_EVT_ERR_CODE_BUTT
};
/* AEQ data structure */
struct hifcoe_aqe_data_s {
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 evt_code: 8;
u32 rsvd: 8;
u32 conn_id : 16; /* conn_id */
#else
u32 conn_id : 16;
u32 rsvd: 8;
u32 evt_code: 8;
#endif
} wd0;
u32 data0;
};
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd: 12;
u32 xid : 20; /* xid */
#else
u32 xid : 20; /* xid */
u32 rsvd: 12;
#endif
} wd1;
u32 data1;
};
};
/* Control Section: Common Header */
struct hifcoe_wqe_ctrl_ch_s {
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 owner : 1;
u32 ctrl_sl : 2;
u32 csl : 2;
u32 dif_sl: 3;
u32 cr: 1;
u32 df: 1;
u32 va: 1;
u32 tsl : 5;
u32 cf: 1;
u32 wf: 1;
u32 rsvd0 : 4;
u32 drv_sl: 2;
u32 bdsl : 8;
#else
u32 bdsl : 8;
u32 drv_sl: 2;
u32 rsvd0 : 4;
u32 wf: 1;
u32 cf: 1;
u32 tsl : 5;
u32 va: 1;
u32 df: 1;
u32 cr: 1;
u32 dif_sl: 3;
u32 csl : 2;
u32 ctrl_sl: 2;
u32 owner : 1;
#endif
} wd0;
u32 ctrl_ch_val;
};
};
/* Control Section: Queue Specific Field */
struct hifcoe_wqe_ctrl_qsf_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 dump_wqe_sn : 16;
u32 wqe_sn:16;
#else
u32 wqe_sn:16;
u32 dump_wqe_sn : 16;
#endif
};
/* DIF info definition in WQE */
struct hifcoe_fc_dif_info_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* difx enable flag:1'b0: disable;1'b1: enable */
u32 difx_en : 1;
/*
* sector size:1'b0: sector size is 512B.1'b1: sector size is
* 4KB.
*/
u32 sct_size : 1;
u32 difx_len : 11;
/*
* The DIFX verify type: 2'b00: Type0, 2'b01: Type 1, 2'b10:
* Type 2, 2'b11: Type 3
*/
u32 dif_verify_type : 2;
/*
* The DIFX insert and replace type: 2'b00: Type0, 2'b01: Type 1
* , 2'b10: Type 2, 2'b11: Type 3
*/
u32 dif_ins_rep_type : 2;
u32 difx_app_esc : 1;
u32 difx_ref_esc : 1;
u32 grd_ctrl : 3;
u32 grd_agm_ctrl : 2;
u32 grd_agm_ini_ctrl : 3;
u32 ref_tag_ctrl : 3;
u32 ref_tag_mode : 2;
#else
u32 ref_tag_mode : 2;
u32 ref_tag_ctrl : 3;
u32 grd_agm_ini_ctrl : 3;
u32 grd_agm_ctrl : 2;
u32 grd_ctrl : 3;
u32 difx_ref_esc : 1;
u32 difx_app_esc : 1;
u32 dif_ins_rep_type : 2;
u32 dif_verify_type : 2;
u32 difx_len : 11;
u32 sct_size : 1;
u32 difx_en : 1;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 app_tag_ctrl : 3;
u32 vpid : 7;
u32 lun_qos_en : 2;
u32 rsvd : 4;
u32 cmp_app_tag_msk : 16;
#else
u32 cmp_app_tag_msk : 16;
u32 rsvd : 4;
u32 lun_qos_en : 2;
u32 vpid : 7;
u32 app_tag_ctrl : 3;
#endif
} wd1;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u16 rep_app_tag;
u16 cmp_app_tag;
#else
u16 cmp_app_tag;
u16 rep_app_tag;
#endif
u32 cmp_ref_tag;
u32 rep_ref_tag;
};
/* Task Section: TMF SQE for INI */
struct hifcoe_tmf_info_s {
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 reset_exch_start :16;
u32 reset_exch_end :16;
#else
u32 reset_exch_end :16;
u32 reset_exch_start :16;
#endif
} bs;
u32 value;
} w0;
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd0 :5;
u32 marker_sts :1;
u32 reset_type :2;
u32 reset_did :24;
#else
u32 reset_did :24;
u32 reset_type :2;
u32 marker_sts :1;
u32 rsvd0 :5;
#endif
} bs;
u32 value;
} w1;
union {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd0 :8;
u32 reset_sid :24;
#else
u32 reset_sid :24;
u32 rsvd0 :8;
#endif
} bs;
u32 value;
} w2;
u8 reset_lun[8];
};
/* Task Section: CMND SQE for INI */
struct hifcoe_sqe_icmnd_s {
u8 fcp_cmnd_iu[48];
union {
struct hifcoe_fc_dif_info_s dif_info;
struct hifcoe_tmf_info_s tmf;
} info;
u32 magic_num;
u32 rsp_gpa_hi;
u32 rsp_gpa_lo;
};
/* Task Section: ABTS SQE */
struct hifcoe_sqe_abts_s {
u32 fh_parm_abts;
u32 magic_num;
};
struct hifcoe_keys_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsv : 16;
u32 smac0 : 8;
u32 smac1 : 8;
#else
u32 smac1 : 8;
u32 smac0 : 8;
u32 rsv : 16;
#endif
} wd0;
u8 smac[4];
u8 dmac[6];
u8 sid[3];
u8 did[3];
u32 svlan;
u32 cvlan;
};
/* BDSL: Session Enable WQE */
/* keys field only use 26 bytes room */
struct hifcoe_cmdqe_sess_en_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rx_id : 16;
#else
u32 rx_id : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd1 : 12;
u32 cid : 20;
#else
u32 cid : 20;
u32 rsvd1 : 12;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 scqn :16;
u32 conn_id :16;
#else
u32 conn_id :16;
u32 scqn :16;
#endif
} wd2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd3 :12;
u32 xid_p :20;
#else
u32 xid_p :20;
u32 rsvd3 :12;
#endif
} wd3;
u32 context_gpa_hi;
u32 context_gpa_lo;
struct hifcoe_keys_s keys;
};
/* Control Section */
struct hifcoe_wqe_ctrl_s {
struct hifcoe_wqe_ctrl_ch_s ch;
struct hifcoe_wqe_ctrl_qsf_s qsf;
};
struct hifcoe_sqe_els_rsp_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/*
* ELS RSP packet payload. ELS RSP payload GPA is store in BDSL, ucode
* use child setup to send data(do not include fc_hdr/eth_hdr)
*/
u32 data_len:16;
u32 echo_flag :16;
#else
u32 echo_flag :16;
u32 data_len:16;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* Para Update:drv indicate Parent Context para need to be
* update or not.
* 00---no update
* 01---send PLOGI_ACC, need to updata Port para
* 10---send PRLI_ACC, need to updata process para
* 11---Reserved
*/
u32 para_update :2;
u32 clr_io :1;
u32 lp_bflag:1; /* use for loopback */
u32 rsvd1 :28;
#else
u32 rsvd1 :28;
u32 lp_bflag:1;
u32 clr_io :1;
u32 para_update :2;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 tx_mfs :16;
u32 rsvd2 :14;
u32 e_d_tov :1;
u32 seq_cnt :1;
#else
u32 seq_cnt :1;
u32 e_d_tov :1;
u32 rsvd2 :14;
u32 tx_mfs :16;
#endif
} wd2;
u32 e_d_tov_timer_val;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 immi_taskid_start:16;
u32 immi_taskid_cnt :13;
u32 xfer_dis:1;
u32 rec :1;
u32 conf:1;
#else
u32 conf:1;
u32 rec :1;
u32 xfer_dis:1;
u32 immi_taskid_cnt :13;
u32 immi_taskid_start:16;
#endif
} wd4;
u32 first_burst_len;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 reset_exch_start :16;
u32 reset_exch_end:16;
#else
u32 reset_exch_end:16;
u32 reset_exch_start :16;
#endif
} wd6;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd:16;
u32 scqn:16;
#else
u32 scqn:16;
u32 rsvd:16;
#endif
} wd7;
u32 magic_num;
u32 magic_local;
u32 magic_remote;
u32 ts_rcv_echo_req;
};
struct hifcoe_sqe_reset_session_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 reset_exch_start :16;
u32 reset_exch_end:16;
#else
u32 reset_exch_end:16;
u32 reset_exch_start :16;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd:6;
/*
* 1: clean io;
* 2: delete session;
* 3: clean io&delete session
*/
u32 mode:2;
u32 reset_did :24;
#else
u32 reset_did :24;
u32 mode:2;
u32 rsvd:6;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd:8;
u32 reset_sid :24;
#else
u32 reset_sid :24;
u32 rsvd:8;
#endif
} wd2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd:16;
u32 scqn:16;
#else
u32 scqn:16;
u32 rsvd:16;
#endif
} wd3;
};
struct hifcoe_sqe_t_els_gs_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/*
* ELS/GS req packet payload. ELS/GS payload GPA is store in BDSL,
* ucode use child setup to send data(do not include fc_hdr/eth_hdr)
*/
u16 data_len;
u16 echo_flag; /* echo flag */
#else
u16 echo_flag;
u16 data_len;
#endif
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/* Para Update: drv indicate Parent Context para need to be
* update or not.
* 00---no update
* 01---send PRLI Req, need to updata Port para
* 10---Reserved
* 11---Reserved
*/
u32 para_update :2;
u32 clr_io :1;
u32 lp_bflag:1; /* use for loopback */
u32 rec_support :1;
u32 rec_flag:1;
u32 orign_oxid :16;
u32 rsvd1 :10;
#else
u32 rsvd1 :10;
u32 orign_oxid :16;
u32 rec_flag:1;
u32 rec_support :1;
u32 lp_bflag:1;
u32 clr_io :1;
u32 para_update :2;
#endif
} wd4;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 tx_mfs :16;
u32 rsvd2 :14;
u32 e_d_tov :1;
u32 seq_cnt :1;
#else
u32 seq_cnt :1;
u32 e_d_tov :1;
u32 rsvd2 :14;
u32 tx_mfs :16;
#endif
} wd5;
u32 e_d_tov_timer_val;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 reset_exch_start :16;
u32 reset_exch_end:16;
#else
u32 reset_exch_end:16;
u32 reset_exch_start :16;
#endif
} wd6;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd:16;
u32 scqn:16;
#else
u32 scqn:16;
u32 rsvd:16;
#endif
} wd7;
u32 magic_num;
};
struct hifcoe_sqe_els_gs_elsrsp_comm_s {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u16 data_len;
u16 rsvd;
#else
u16 rsvd;
u16 data_len;
#endif
};
/* SQE Task Section's Contents except Common Header */
union hifcoe_sqe_ts_cont_u {
struct hifcoe_sqe_icmnd_s icmnd;
struct hifcoe_sqe_abts_s abts;
struct hifcoe_sqe_els_rsp_s els_rsp;
struct hifcoe_sqe_t_els_gs_s t_els_gs;
struct hifcoe_sqe_els_gs_elsrsp_comm_s els_gs_elsrsp_comm;
struct hifcoe_sqe_reset_session_s reset_session;
u32 value[16];
};
struct hifcoe_sqe_ts_s {
/* SQE Task Section's Common Header */
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type :8;
u32 rsvd:5; /* used for loopback saving bdsl's num */
/* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */
u32 cdb_type:1;
/* standard immidiate data flag, use with local-xid for initiator */
u32 immi_std:1;
/*
* CRC err inject flag: drv set, and ucode use for send first packet of
* WQE
*/
u32 crc_inj :1;
u32 local_xid :16; /* local exch_id */
#else
u32 local_xid :16;
u32 crc_inj :1;
u32 immi_std:1;
/* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */
u32 cdb_type:1;
u32 rsvd:5; /* used for loopback saving bdsl's num */
u32 task_type :8;
#endif
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u16 remote_xid; /* remote exch_id */
u16 conn_id;
#else
u16 conn_id;
u16 remote_xid;
#endif
} wd0;
union hifcoe_sqe_ts_cont_u cont;
};
struct hifcoe_constant_sge_s {
u32 buf_addr_hi;
u32 buf_addr_lo;
};
struct hifcoe_variable_sge_s {
u32 buf_addr_hi;
u32 buf_addr_lo;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 r_flag :1;
u32 buf_len :31;
#else
u32 buf_len :31;
u32 r_flag :1;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 last_flag :1;
u32 extension_flag :1;
u32 xid : 14;
u32 buf_addr_gpa: 16;
#else
u32 buf_addr_gpa: 16;
u32 xid : 14;
u32 extension_flag :1;
u32 last_flag :1;
#endif
} wd1;
};
/* SQE, should not be over 128B */
struct hifcoe_sqe_s {
struct hifcoe_wqe_ctrl_s ctrl_sl;
struct hifcoe_sqe_ts_s ts_sl;
struct hifcoe_variable_sge_s sge[2];
};
struct hifcoe_rqe_ctrl_s {
struct hifcoe_wqe_ctrl_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u16 dump_wqe_msn;
u16 wqe_msn;
#else
u16 wqe_msn;
u16 dump_wqe_msn;
#endif
} wd0;
};
struct hifcoe_rqe_drv_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/*
* User ID[15:0], 15 bits valid and User ID[15] is fix to 0
*/
u32 user_id :16;
u32 rsvd0 :16;
#else
u32 rsvd0 :16;
u32 user_id :16;
#endif
} wd0;
u32 rsvd1;
};
/* RQE,should not be over 32B */
struct hifcoe_rqe_s {
struct hifcoe_rqe_ctrl_s ctrl_sl;
u32 cqe_gpa_h;
u32 cqe_gpa_l;
struct hifcoe_constant_sge_s bds_sl;
struct hifcoe_rqe_drv_s drv_sl;
};
struct hifcoe_cmdqe_abts_rsp_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rx_id : 16;
#else
u32 rx_id : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsp_type:1; /* 0:BA_ACC, 1:BA_RJT */
u32 payload_len :7;
u32 port_id :4;
u32 rsvd1 :4;
u32 ox_id :16;
#else
u32 ox_id :16;
u32 rsvd1 :4;
u32 port_id :4;
u32 payload_len :7;
u32 rsp_type:1;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 scqn: 16;
u32 conn_id : 16;
#else
u32 conn_id : 16;
u32 scqn: 16;
#endif
} wd2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd: 12;
u32 xid : 20;
#else
u32 xid : 20;
u32 rsvd: 12;
#endif
} wd3;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd: 12;
u32 cid : 20;
#else
u32 cid : 20;
u32 rsvd: 12;
#endif
} wd4;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd: 16;
u32 req_rx_id : 16;
#else
u32 req_rx_id : 16;
u32 rsvd: 16;
#endif
} wd5;
/* payload length is according to rsp_type:1DWORD or 3DWORD */
u32 payload[3];
};
struct hifcoe_cmdqe_buffer_clear_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 wqe_type:8;
u32 rsvd0 :8;
u32 rsvd1 :16;
#else
u32 rsvd1 :16;
u32 rsvd0 :8;
u32 wqe_type:8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rx_id_start :16;
u32 rx_id_end :16;
#else
u32 rx_id_end :16;
u32 rx_id_start :16;
#endif
} wd1;
u32 scqn;
u32 wd3;
};
struct hifcoe_cmdqe_flush_sq_info_s {
u32 cid;
u32 xid;
};
struct hifcoe_cmdqe_flush_sq_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 wqe_type :8;
u32 sq_qid :8;
u32 entry_count :16;
#else
u32 entry_count :16;
u32 sq_qid :8;
u32 wqe_type :8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 last_wqe:1;
u32 pos :11;
u32 port_id:4;
u32 scqn:16;
#else
u32 scqn:16;
u32 port_id :4;
u32 pos :11;
u32 last_wqe:1;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 pkt_ptr :16;
u32 rsvd:16;
#else
u32 rsvd:16;
u32 pkt_ptr :16;
#endif
} wd2;
struct hifcoe_cmdqe_flush_sq_info_s sq_info_entry[0];
};
struct hifcoe_cmdqe_creat_srqc_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rsvd1 : 16;
#else
u32 rsvd1 : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
u32 srqc_gpa_h;
u32 srqc_gpa_l;
u32 srqc[16];/* srqc_size=64B */
};
struct hifcoe_cmdqe_delete_srqc_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rsvd1 : 16;
#else
u32 rsvd1 : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
u32 srqc_gpa_h;
u32 srqc_gpa_l;
};
struct hifcoe_cmdqe_clr_srq_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rsvd1 : 16;
#else
u32 rsvd1 : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
/*
* 0: SRQ for recv ELS;
* 1: SRQ for recv immidiate data
*/
u32 srq_type: 16;
u32 scqn: 16;
#else
u32 scqn: 16;
u32 srq_type: 16;
#endif
} wd1;
u32 srqc_gpa_h;
u32 srqc_gpa_l;
};
struct hifcoe_cmdqe_creat_scqc_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rsvd1 : 16;
#else
u32 rsvd1 : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd2 : 16;
u32 scqn: 16;
#else
u32 scqn: 16;
u32 rsvd2 : 16;
#endif
} wd1;
u32 scqc[16];/* scqc_size=64B */
};
struct hifcoe_cmdqe_delete_scqc_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rsvd1 : 16;
#else
u32 rsvd1 : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd2 : 16;
u32 scqn: 16;
#else
u32 scqn: 16;
u32 rsvd2 : 16;
#endif
} wd1;
};
struct hifcoe_sqe_t_rsp_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 fill:2; /* 2bit of F_CTL[1:0] */
u32 conf:1; /* Wait INI confirm, 0: disable, 1:enable */
/*
* 0: payload area store payload,
* 1: payload area store payload GPA
*/
u32 mode:1;
u32 immi:1;
u32 rsvd0 :3;
u32 fcp_rsp_len :8; /* FCP_RESP payload(24~96B)*/
u32 rsvd1 :16;
#else
u32 rsvd1 :16;
u32 fcp_rsp_len :8;
u32 rsvd0 :3;
u32 immi:1;
u32 mode:1;
u32 conf:1;
u32 fill:2;
#endif
} wd0;
u32 magic_num;
u32 hotpooltag;
union {
struct {
u32 addr_h;
u32 addr_l;
} gpa;
struct {
u32 data[25]; /* FCP_RESP payload buf, 100B rsvd */
} buf;
} payload;
};
struct hifcoe_sqe_tresp_ts_s {
/* SQE Task Section's Common Header */
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u8 task_type;
u8 rsvd0;
u16 local_xid;
#else
u16 local_xid;
u8 rsvd0;
u8 task_type;
#endif
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u16 remote_xid;
u16 conn_id;
#else
u16 conn_id;
u16 remote_xid;
#endif
} wd0;
struct hifcoe_sqe_t_rsp_s t_rsp;
};
/* SQE for fcp response, max TSL is 120B*/
struct hifcoe_sqe_tresp_s {
struct hifcoe_wqe_ctrl_s ctrl_sl;
struct hifcoe_sqe_tresp_ts_s ts_sl;
};
/* SCQE Common Header */
struct hifcoe_scqe_ch_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 owner : 1;
u32 err_code: 7;
u32 cqe_remain_cnt : 3;
u32 rsvd0 : 13;
u32 task_type : 8;
#else
u32 task_type : 8;
u32 rsvd0 : 13;
u32 cqe_remain_cnt : 3;
u32 err_code: 7;
u32 owner : 1;
#endif
} wd0;
};
struct hifcoe_scqe_type_s {
struct hifcoe_scqe_ch_s ch;
u32 rsvd0;
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u16 rsvd4;
u16 conn_id;
#else
u16 conn_id;
u16 rsvd4;
#endif
u32 rsvd1[12];
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd2 :31;
u32 done:1;
#else
u32 done:1;
u32 rsvd3 :31;
#endif
} wd0;
};
struct hifcoe_scqe_sess_sts_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd1 :12;
u32 xid_qpn :20;
#else
u32 xid_qpn :20;
u32 rsvd1 :12;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd3 :16;
u32 conn_id :16;
#else
u32 conn_id :16;
u32 rsvd3 :16;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd2 :12;
u32 cid :20;
#else
u32 cid :20;
u32 rsvd2 :12;
#endif
} wd2;
u64 bloomfilter_id; /* valid only in session offload */
};
struct hifcoe_scqe_comm_rsp_sts_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 ox_id :16;
u32 rx_id :16;
#else
u32 rx_id :16;
u32 ox_id :16;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd0 :16;
u32 conn_id :16;
#else
u32 conn_id :16;
u32 rsvd0 :16;
#endif
} wd1;
u32 magic_num;
};
struct hifcoe_scqe_iresp_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 ox_id :16;
u32 rx_id :16;
#else
u32 rx_id :16;
u32 ox_id :16;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 dif_info:5;
u32 rsvd0 :11;
u32 conn_id :16;
#else
u32 conn_id :16;
u32 rsvd0 :11;
u32 dif_info:5;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd0 :16;
u32 fcp_flag:8;
u32 scsi_status :8;
#else
u32 scsi_status :8;
u32 fcp_flag:8;
u32 rsvd0 :16;
#endif
} wd2;
u32 fcp_resid;
u32 fcp_sns_len;
u32 fcp_rsp_len;
u32 magic_num;
};
struct hifcoe_scqe_rcv_abts_rsp_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 ox_id :16;
u32 rx_id :16;
#else
u32 rx_id :16;
u32 ox_id :16;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd0 :16;
u32 conn_id :16;
#else
u32 conn_id :16;
u32 rsvd0 :16;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd0:24;
u32 fh_rctrl :8;
#else
u32 fh_rctrl :8;
u32 rsvd0:24;
#endif
} wd2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd1 :8;
u32 did :24;
#else
u32 did :24;
u32 rsvd1 :8;
#endif
} wd3;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd2 :8;
u32 sid :24;
#else
u32 sid :24;
u32 rsvd2 :8;
#endif
} wd4;
/* payload length is according to fh_rctrl:1DWORD or 3DWORD */
u32 payload[3];
u32 magic_num;
};
struct hifcoe_scqe_rcv_els_cmd_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd0 :8;
u32 did :24;
#else
u32 did :24;
u32 rsvd0 :8;
#endif
} wd0;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd1 :8;
u32 sid :24;
#else
u32 sid :24;
u32 rsvd1 :8;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 ox_id :16;
u32 rx_id :16;
#else
u32 rx_id :16;
u32 ox_id :16;
#endif
} wd2;
struct{
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 data_len :16;/* ELS cmd Payload length */
u32 user_id_num :16;/* current used user_id num */
#else
u32 user_id_num :16;
u32 data_len :16;
#endif
} wd3;
u32 user_id[9]; /* User ID of SRQ SGE, used for drvier buffer release */
u32 ts;
};
struct hifcoe_scqe_rcv_els_gs_rsp_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 ox_id :16;
u32 rx_id :16;
#else
u32 rx_id :16;
u32 ox_id :16;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 data_len:16;
u32 conn_id :16;
#else
u32 conn_id :16;
u32 data_len:16; /* ELS/GS RSP Payload length */
#endif
} wd2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 end_rsp :1;
u32 echo_rsp:1;
u32 rsvd:6;
u32 did :24;
#else
u32 did :24;
u32 rsvd:6;
u32 echo_rsp:1;
u32 end_rsp :1;
#endif
} wd3;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 user_id_num :8;
u32 sid :24;
#else
u32 sid :24;
u32 user_id_num :8;
#endif
} wd4;
u32 magic_num;
u32 user_id[9];
};
struct hifcoe_scqe_rcv_flush_sts_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 last_flush : 8;
u32 port_id: 8;
u32 rsvd0 : 16;
#else
u32 rsvd0 : 16;
u32 port_id: 8;
u32 last_flush : 8;
#endif
} wd0;
};
struct hifcoe_scqe_rcv_clear_buf_sts_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 port_id: 8;
u32 rsvd0 : 24;
#else
u32 rsvd0 : 24;
u32 port_id: 8;
#endif
} wd0;
};
struct hifcoe_scqe_itmf_marker_sts_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 ox_id :16;
u32 rx_id :16;
#else
u32 rx_id :16;
u32 ox_id :16;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 end_rsp :8;
u32 did :24;
#else
u32 did :24;
u32 end_rsp :8;
#endif
} wd2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 rsvd1:8;
u32 sid :24;
#else
u32 sid :24;
u32 rsvd1:8;
#endif
} wd3;
};
struct hifcoe_scqe_abts_marker_sts_s {
struct hifcoe_scqe_ch_s ch;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 ox_id :16;
u32 rx_id :16;
#else
u32 rx_id :16;
u32 ox_id :16;
#endif
} wd1;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 end_rsp :8;
u32 did :24;
#else
u32 did :24;
u32 end_rsp :8;
#endif
} wd2;
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 io_state :8;
u32 sid :24;
#else
u32 sid :24;
u32 io_state :8;
#endif
} wd3;
};
/* SCQE, should not be over 64B */
union hifcoe_scqe_u {
struct hifcoe_scqe_type_s common;
/* session enable/disable/delete sts */
struct hifcoe_scqe_sess_sts_s sess_sts;
/* aborts/abts_rsp/els rsp sts */
struct hifcoe_scqe_comm_rsp_sts_s comm_sts;
struct hifcoe_scqe_rcv_clear_buf_sts_s clear_sts;/* clear buffer sts */
struct hifcoe_scqe_rcv_flush_sts_s flush_sts; /* flush sq sts */
struct hifcoe_scqe_iresp_s iresp;
struct hifcoe_scqe_rcv_abts_rsp_s rcv_abts_rsp; /* recv abts rsp*/
struct hifcoe_scqe_rcv_els_cmd_s rcv_els_cmd;/* recv els cmd */
struct hifcoe_scqe_rcv_els_gs_rsp_s rcv_els_gs_rsp;/* recv els/gs rsp */
struct hifcoe_scqe_itmf_marker_sts_s itmf_marker_sts;/* tmf marker */
struct hifcoe_scqe_abts_marker_sts_s abts_marker_sts;/* abts marker */
};
struct hifcoe_cmdqe_type_s {
struct {
#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
u32 task_type : 8;
u32 rsvd0 : 8;
u32 rx_id : 16;
#else
u32 rx_id : 16;
u32 rsvd0 : 8;
u32 task_type : 8;
#endif
} wd0;
};
/* CMDQE, variable length */
union hifc_cmdqe_u {
struct hifcoe_cmdqe_type_s common;
struct hifcoe_cmdqe_sess_en_s session_enable;
struct hifcoe_cmdqe_abts_rsp_s snd_abts_rsp;
struct hifcoe_cmdqe_buffer_clear_s buffer_clear;
struct hifcoe_cmdqe_flush_sq_s flush_sq;
struct hifcoe_cmdqe_creat_srqc_s create_srqc;
struct hifcoe_cmdqe_delete_srqc_s delete_srqc;
struct hifcoe_cmdqe_clr_srq_s clear_srq;
struct hifcoe_cmdqe_creat_scqc_s create_scqc;
struct hifcoe_cmdqe_delete_scqc_s delete_scqc;
};
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "hifc_knl_adp.h"
#include "unf_log.h"
#include "unf_exchg.h"
#include "unf_rport.h"
#include "unf_io.h"
#include "unf_portman.h"
#include "unf_io_abnormal.h"
#define UNF_GET_FCP_CTL(pkg) ((((pkg)->status) >> 8) & 0xFF)
#define UNF_GET_SCSI_STATUS(pkg) (((pkg)->status) & 0xFF)
static unsigned int unf_io_success_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status);
static unsigned int unf_ini_error_default_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status);
static unsigned int unf_io_under_flow_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status);
static unsigned int unf_ini_dif_error_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status);
struct unf_ini_error_handler {
unsigned int error_code;
unsigned int (*pfn_unf_ini_error_handler)(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status);
};
struct unf_ini_error_handler ini_error_handler_table[] = {
{ UNF_IO_SUCCESS, unf_io_success_handler },
{ UNF_IO_ABORTED, unf_ini_error_default_handler },
{ UNF_IO_FAILED, unf_ini_error_default_handler },
{ UNF_IO_ABORT_ABTS, unf_ini_error_default_handler },
{ UNF_IO_ABORT_LOGIN, unf_ini_error_default_handler },
{ UNF_IO_ABORT_REET, unf_ini_error_default_handler },
{ UNF_IO_ABORT_FAILED, unf_ini_error_default_handler },
{ UNF_IO_OUTOF_ORDER, unf_ini_error_default_handler },
{ UNF_IO_FTO, unf_ini_error_default_handler },
{ UNF_IO_LINK_FAILURE, unf_ini_error_default_handler },
{ UNF_IO_OVER_FLOW, unf_ini_error_default_handler },
{ UNF_IO_RSP_OVER, unf_ini_error_default_handler },
{ UNF_IO_LOST_FRAME, unf_ini_error_default_handler },
{ UNF_IO_UNDER_FLOW, unf_io_under_flow_handler },
{ UNF_IO_HOST_PROG_ERROR, unf_ini_error_default_handler },
{ UNF_IO_SEST_PROG_ERROR, unf_ini_error_default_handler },
{ UNF_IO_INVALID_ENTRY, unf_ini_error_default_handler },
{ UNF_IO_ABORT_SEQ_NOT, unf_ini_error_default_handler },
{ UNF_IO_REJECT, unf_ini_error_default_handler },
{ UNF_IO_EDC_IN_ERROR, unf_ini_error_default_handler },
{ UNF_IO_EDC_OUT_ERROR, unf_ini_error_default_handler },
{ UNF_IO_UNINIT_KEK_ERR, unf_ini_error_default_handler },
{ UNF_IO_DEK_OUTOF_RANGE, unf_ini_error_default_handler },
{ UNF_IO_KEY_UNWRAP_ERR, unf_ini_error_default_handler },
{ UNF_IO_KEY_TAG_ERR, unf_ini_error_default_handler },
{ UNF_IO_KEY_ECC_ERR, unf_ini_error_default_handler },
{ UNF_IO_BLOCK_SIZE_ERROR, unf_ini_error_default_handler },
{ UNF_IO_ILLEGAL_CIPHER_MODE, unf_ini_error_default_handler },
{ UNF_IO_CLEAN_UP, unf_ini_error_default_handler },
{ UNF_IO_ABORTED_BY_TARGET, unf_ini_error_default_handler },
{ UNF_IO_TRANSPORT_ERROR, unf_ini_error_default_handler },
{ UNF_IO_LINK_FLASH, unf_ini_error_default_handler },
{ UNF_IO_TIMEOUT, unf_ini_error_default_handler },
{ UNF_IO_DMA_ERROR, unf_ini_error_default_handler },
{ UNF_IO_DIF_ERROR, unf_ini_dif_error_handler },
{ UNF_IO_INCOMPLETE, unf_ini_error_default_handler },
{ UNF_IO_DIF_REF_ERROR, unf_ini_dif_error_handler },
{ UNF_IO_DIF_GEN_ERROR, unf_ini_dif_error_handler }
};
void unf_done_ini_xchg(struct unf_xchg_s *v_xchg)
{
/*
* About I/O Done
* 1. normal case
* 2. Send ABTS & RCVD RSP
* 3. Send ABTS & timer timeout
*/
struct unf_scsi_cmd_s scsi_cmd = { 0 };
unsigned long flags = 0;
struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
unsigned int scsi_id = 0;
UNF_CHECK_VALID(0x1301, TRUE, v_xchg, return);
/* scsi_cmnd validity check */
if (unlikely(!v_xchg->scsi_cmnd_info.scsi_cmnd))
return;
/* 1. Free RX_ID for INI SIRT: Do not care
* 2. set & check exchange state
*
* for Set UP_ABORT Tag:
* 1) L_Port destroy
* 2) AC power down
* 3) LUN reset
* 4) Target/Session reset
* 5) SCSI send Abort(ABTS)
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
v_xchg->io_state |= INI_IO_STATE_DONE;
if (unlikely(v_xchg->io_state & (INI_IO_STATE_UPABORT |
INI_IO_STATE_UPSEND_ERR |
INI_IO_STATE_TMF_ABORT))) {
/*
* a. UPABORT: scsi have send ABTS
* --->>> do not call SCSI_Done, return directly
* b. UPSEND_ERR: error happened duiring LLDD send SCSI_CMD
* --->>> do not call SCSI_Done, scsi need retry
*/
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_KEVENT,
"[event]Exchange(0x%p) Cmdsn:0x%lx upCmd:%p oxid(0x%x) with state(0x%x) has been aborted or send error",
v_xchg, (unsigned long)v_xchg->cmnd_sn,
v_xchg->scsi_cmnd_info.scsi_cmnd, v_xchg->ox_id,
v_xchg->io_state);
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
/* here, return directly */
return;
}
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
/* 3. Get scsi_cmnd info */
scsi_cmnd_info = &v_xchg->scsi_cmnd_info;
/*
* 4. Set:
* scsi_cmnd;
* cmnd_done_func;
* cmnd up_level_done;
* sense_buff_addr;
* resid_length;
* cmnd_result;
* dif_info
*
* UNF_SCSI_CMND <<-- UNF_SCSI_CMND_INFO
*/
UNF_SET_HOST_CMND((&scsi_cmd), scsi_cmnd_info->scsi_cmnd);
UNF_SET_CMND_DONE_FUNC((&scsi_cmd), scsi_cmnd_info->pfn_done);
scsi_cmd.drv_private = v_xchg->lport;
if (unlikely((UNF_SCSI_STATUS(v_xchg->scsi_cmnd_info.result)) &
FCP_SNS_LEN_VALID_MASK)) {
unf_save_sense_data(
scsi_cmd.upper_cmnd,
(char *)v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu,
SCSI_SENSE_DATA_LEN);
}
UNF_SET_RESID((&scsi_cmd), (unsigned int)v_xchg->resid_len);
UNF_SET_CMND_RESULT((&scsi_cmd), scsi_cmnd_info->result);
memcpy(&scsi_cmd.dif_info, &v_xchg->dif_info,
sizeof(struct dif_info_s));
scsi_id = scsi_cmnd_info->scsi_id;
/* 5. call scsi_cmnd_done func: unf_scsi_done */
UNF_DONE_SCSI_CMND(&scsi_cmd);
/* 6. Update IO result CNT */
if (likely(v_xchg->lport)) {
scsi_image_table = &v_xchg->lport->rport_scsi_table;
UNF_IO_RESULT_CNT(scsi_image_table, scsi_id,
(scsi_cmnd_info->result >> 16));
}
}
static inline unsigned int unf_ini_get_sgl_entry_buf(
ini_get_sgl_entry_buf pfn_unf_ini_get_sgl,
void *v_cmnd,
void *v_driver_sgl,
void **v_upper_sgl,
unsigned int *v_req_index,
unsigned int *v_index,
char **v_buf,
unsigned int *v_buf_len)
{
if (unlikely(!pfn_unf_ini_get_sgl)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Command(0x%p) Get sgl Entry func Null.", v_cmnd);
return UNF_RETURN_ERROR;
}
return pfn_unf_ini_get_sgl(v_cmnd, v_driver_sgl, v_upper_sgl,
v_req_index, v_index, v_buf, v_buf_len);
}
unsigned int unf_ini_get_sgl_entry(void *v_pkg, char **v_buf,
unsigned int *v_buf_len)
{
struct unf_frame_pkg_s *pkg = (struct unf_frame_pkg_s *)v_pkg;
struct unf_xchg_s *xchg = NULL;
unsigned int ret = RETURN_OK;
UNF_CHECK_VALID(0x1305, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1306, UNF_TRUE, v_buf, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1307, UNF_TRUE, v_buf_len, return UNF_RETURN_ERROR);
xchg = (struct unf_xchg_s *)pkg->xchg_contex;
UNF_CHECK_VALID(0x1308, UNF_TRUE, xchg, return UNF_RETURN_ERROR);
/* Get SGL Entry buffer for INI Mode */
ret = unf_ini_get_sgl_entry_buf(
xchg->scsi_cmnd_info.pfn_unf_get_sgl_entry_buf,
xchg->scsi_cmnd_info.scsi_cmnd,
NULL,
&xchg->req_sgl_info.sgl,
&xchg->scsi_cmnd_info.port_id,
&((xchg->req_sgl_info).entry_index),
v_buf, v_buf_len);
return ret;
}
unsigned int unf_ini_get_dif_sgl_entry(void *v_pkg, char **v_buf,
unsigned int *v_buf_len)
{
struct unf_frame_pkg_s *pkg = (struct unf_frame_pkg_s *)v_pkg;
struct unf_xchg_s *xchg = NULL;
unsigned int ret = RETURN_OK;
UNF_CHECK_VALID(0x1305, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1306, UNF_TRUE, v_buf, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1307, UNF_TRUE, v_buf_len, return UNF_RETURN_ERROR);
xchg = (struct unf_xchg_s *)pkg->xchg_contex;
UNF_CHECK_VALID(0x1308, UNF_TRUE, xchg, return UNF_RETURN_ERROR);
/* Get SGL Entry buffer for INI Mode */
ret = unf_ini_get_sgl_entry_buf(
xchg->scsi_cmnd_info.pfn_unf_get_sgl_entry_buf,
xchg->scsi_cmnd_info.scsi_cmnd,
NULL,
&xchg->dif_sgl_info.sgl,
&xchg->scsi_cmnd_info.port_id,
&xchg->dif_sgl_info.entry_index,
v_buf, v_buf_len);
return ret;
}
unsigned int unf_get_uplevel_cmnd_errcode(
struct unf_ini_error_code_s *v_err_table,
unsigned int v_err_table_count,
unsigned int v_drv_err_code)
{
unsigned int i;
/* fail return UNF_RETURN_ERROR,adjust by up level */
if (unlikely(!v_err_table)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"Error Code Table is Null, Error Code(0x%x).",
v_drv_err_code);
return (unsigned int)UNF_SCSI_HOST(DID_ERROR);
}
for (i = 0; i < v_err_table_count; i++) {
if (v_drv_err_code == v_err_table[i].drv_err_code)
return v_err_table[i].ap_err_code;
}
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Unsupported Ap Error code by Error Code(0x%x).",
v_drv_err_code);
return (unsigned int)UNF_SCSI_HOST(DID_ERROR);
}
static unsigned int unf_ini_status_handle(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg)
{
unsigned int i;
unsigned int ret;
unsigned int status;
for (i = 0;
i < sizeof(ini_error_handler_table) /
sizeof(struct unf_ini_error_handler);
i++) {
if (UNF_GET_LL_ERR(v_pkg) ==
ini_error_handler_table[i].error_code) {
status = unf_get_uplevel_cmnd_errcode(
v_xchg->scsi_cmnd_info.err_code_table,
v_xchg->scsi_cmnd_info.err_code_table_cout,
UNF_GET_LL_ERR(v_pkg));
if (ini_error_handler_table[i].pfn_unf_ini_error_handler) {
ret = ini_error_handler_table[i].pfn_unf_ini_error_handler(
v_xchg,
v_pkg,
status);
} else {
/* set exchange->result
* ---to--->>>scsi_result
*/
ret = unf_ini_error_default_handler(v_xchg,
v_pkg,
status);
}
return ret;
}
}
status = unf_get_uplevel_cmnd_errcode(
v_xchg->scsi_cmnd_info.err_code_table,
v_xchg->scsi_cmnd_info.err_code_table_cout,
UNF_IO_SOFT_ERR);
ret = unf_ini_error_default_handler(v_xchg, v_pkg, status);
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Can not find com status, SID(0x%x) exchange(0x%p) com_status(0x%x) DID(0x%x) hot_pool_tag(0x%x)",
v_xchg->sid, v_xchg, v_pkg->status,
v_xchg->did, v_xchg->hot_pool_tag);
return ret;
}
static void unf_analysis_response_info(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int *v_status)
{
unsigned char *resp_buf = NULL;
/* LL_Driver use Little End, and copy RSP_INFO to COM_Driver */
if (v_pkg->unf_rsp_pload_bl.buffer_ptr) {
if (v_pkg->unf_rsp_pload_bl.buffer_ptr[0] !=
UNF_FCP_TM_RSP_COMPLETE) {
*v_status = UNF_SCSI_HOST(DID_BUS_BUSY);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)",
v_xchg->lport, UNF_GET_SCSI_STATUS(v_pkg));
}
} else {
resp_buf =
(unsigned char *)v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu;
if ((resp_buf)) {
/* If chip use Little End, then change it to Big End */
if ((v_pkg->byte_orders & UNF_BIT_3) == 0)
unf_cpu_to_big_end(
resp_buf,
v_pkg->unf_rsp_pload_bl.length);
/* Chip DAM data with Big End */
if (resp_buf[3] != UNF_FCP_TM_RSP_COMPLETE) {
*v_status = UNF_SCSI_HOST(DID_BUS_BUSY);
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)",
v_xchg->lport,
UNF_GET_SCSI_STATUS(v_pkg));
}
}
}
}
static void unf_analysis_sense_info(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg)
{
#define MIN(x, y) ((x) < (y) ? (x) : (y))
unsigned int length = 0;
/* 4 bytes Align */
length = v_pkg->unf_sense_pload_bl.length;
if (length % 4 != 0)
length = 4 * ((length / 4) + 1);
/*
* If have sense info then copy directly
* else, the chip has been dma the data to sense buffer
*/
if (v_pkg->unf_sense_pload_bl.buffer_ptr) {
/* carry from wqe by ll_driver & ucode: do not used */
unf_cpu_to_big_end(v_pkg->unf_sense_pload_bl.buffer_ptr,
length);
memcpy(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu,
v_pkg->unf_sense_pload_bl.buffer_ptr,
(unsigned int)MIN(UNF_SCSI_SENSE_DATA_LEN,
v_pkg->unf_sense_pload_bl.length));
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]LPort(0x%p), Sense Length(%u), Scsi Status(0x%x).",
v_xchg->lport,
v_pkg->unf_sense_pload_bl.length,
UNF_GET_SCSI_STATUS(v_pkg));
} else if ((length != 0) &&
(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) {
/* has been dma to exchange buffer */
if ((v_pkg->byte_orders & UNF_BIT_4) == 0) {
unf_cpu_to_big_end(((unsigned char *)
(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) +
v_pkg->unf_rsp_pload_bl.length,
v_pkg->unf_sense_pload_bl.length);
}
memcpy(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu,
((unsigned char *)
(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) +
v_pkg->unf_rsp_pload_bl.length,
(unsigned int)MIN(UNF_SCSI_SENSE_DATA_LEN,
v_pkg->unf_sense_pload_bl.length));
}
}
static unsigned int unf_io_success_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status)
{
unsigned char scsi_status;
unsigned char control;
unsigned int status = v_status;
UNF_CHECK_VALID(0x1311, TRUE, v_xchg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1312, TRUE, v_pkg, return UNF_RETURN_ERROR);
control = UNF_GET_FCP_CTL(v_pkg);
scsi_status = UNF_GET_SCSI_STATUS(v_pkg);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"[info]Port(0x%p), Exchange(0x%p) Completed, Control(0x%x), Scsi Status(0x%x)",
v_xchg->lport, v_xchg, control, scsi_status);
if (control & FCP_SNS_LEN_VALID_MASK) {
/* has sense info */
if (scsi_status == FCP_SCSI_STATUS_GOOD)
scsi_status = SCSI_CHECK_CONDITION;
unf_analysis_sense_info(v_xchg, v_pkg);
} else {
/*
* When the FCP_RSP_LEN_VALID bit is set to one,
* the content of the SCSI STATUS CODE field is not reliable
* and shall be ignored by the application client.
*/
if (control & FCP_RSP_LEN_VALID_MASK)
unf_analysis_response_info(v_xchg, v_pkg, &status);
}
v_xchg->scsi_cmnd_info.result = status |
UNF_SCSI_STATUS(scsi_status);
return RETURN_OK;
}
static unsigned int unf_ini_error_default_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status)
{
/* set exchange->result ---to--->>> scsi_cmnd->result */
UNF_CHECK_VALID(0x1313, TRUE, v_xchg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1314, TRUE, v_pkg, return UNF_RETURN_ERROR);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_WARN,
"[warn]SID(0x%x) exchange(0x%p) com_status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response_len(0x%x)",
v_xchg->sid, v_xchg, v_pkg->status, v_status,
v_xchg->did, v_xchg->hot_pool_tag, v_pkg->residus_len);
v_xchg->scsi_cmnd_info.result =
v_status | UNF_SCSI_STATUS(UNF_GET_SCSI_STATUS(v_pkg));
return RETURN_OK;
}
static unsigned int unf_ini_dif_error_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status)
{
struct unf_dif_control_info_s *dif_control = NULL;
unsigned char *sense_data = NULL;
unsigned short sense_code = 0;
UNF_CHECK_VALID(0x1315, TRUE, v_xchg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1316, TRUE, v_pkg, return UNF_RETURN_ERROR);
UNF_REFERNCE_VAR(v_status);
/*
* According to DIF scheme
* drive set check condition(0x2) when dif error occurs,
* and returns the values base on the upper-layer verification resule
* Check sequence: crc,Lba,App,
* if CRC error is found, the subsequent check is not performed
*/
v_xchg->scsi_cmnd_info.result =
UNF_SCSI_STATUS(SCSI_CHECK_CONDITION);
dif_control = &v_pkg->dif_control;
if (v_pkg->status_sub_code == 0) {
UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, 0,
sense_code, DRV_DIF_CRC_ERR);
UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, 0,
sense_code, DRV_DIF_LBA_ERR);
UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, 0,
sense_code, DRV_DIF_APP_ERR);
if (sense_code == 0) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Unexpected DIF unwonted, operation_code(0x%x) actual DIF(0x%llx) expected DIF(0x%llx)",
v_xchg->dif_control.protect_opcode,
*(unsigned long long *)
&dif_control->actual_dif[0],
*(unsigned long long *)
&dif_control->expected_dif[0]);
}
} else {
sense_code = (unsigned short)v_pkg->status_sub_code;
}
sense_data = (unsigned char *)
v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu;
memset(sense_data, 0, SCSI_SENSE_DATA_LEN);
sense_data[0] = 0x70; /* response code */
sense_data[2] = ILLEGAL_REQUEST; /* sense key:0x05; */
sense_data[7] = 0x7; /* additional sense length */
sense_data[12] = (unsigned char)(sense_code >> 8);
sense_data[13] = (unsigned char)sense_code;
/* valid sense data length snscode[13] */
return RETURN_OK;
}
static unsigned int unf_io_under_flow_handler(struct unf_xchg_s *v_xchg,
struct unf_frame_pkg_s *v_pkg,
unsigned int v_status)
{
/* under flow: residlen > 0 */
UNF_CHECK_VALID(0x1317, TRUE, v_xchg, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1318, TRUE, v_pkg, return UNF_RETURN_ERROR);
if ((v_xchg->fcp_cmnd.cdb[0] != SCSIOPC_REPORT_LUN) &&
(v_xchg->fcp_cmnd.cdb[0] != SCSIOPC_INQUIRY)) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"[info]IO under flow: SID(0x%x) exchange(0x%p) com status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response SID(0x%x)",
v_xchg->sid, v_xchg, v_pkg->status, v_status,
v_xchg->did, v_xchg->hot_pool_tag,
v_pkg->residus_len);
}
v_xchg->resid_len = (int)v_pkg->residus_len;
(void)unf_io_success_handler(v_xchg, v_pkg, v_status);
return RETURN_OK;
}
void unf_complete_cmnd(struct unf_scsi_cmd_s *v_scsi_cmnd, unsigned int result)
{
/*
* Exception during process Que_CMND
* 1. L_Port == NULL;
* 2. L_Port == removing;
* 3. R_Port == NULL;
* 4. Xchg == NULL.
*/
UNF_CHECK_VALID(0x1319, TRUE, UNF_GET_CMND_DONE_FUNC(v_scsi_cmnd),
return);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"[info]Command(0x%p), Result(0x%x).", v_scsi_cmnd, result);
UNF_SET_CMND_RESULT(v_scsi_cmnd, result);
/* struct unf_scsi_cmd_s->pfn_done -->> unf_scsi_done */
UNF_DONE_SCSI_CMND(v_scsi_cmnd);
}
static inline void unf_bind_xchg_scsi_cmd(struct unf_xchg_s *v_xchg,
struct unf_scsi_cmd_s *v_scsi_cmnd)
{
struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL;
scsi_cmnd_info = &v_xchg->scsi_cmnd_info;
/* UNF_SCSI_CMND_INFO <<-- UNF_SCSI_CMND */
scsi_cmnd_info->err_code_table =
UNF_GET_ERR_CODE_TABLE(v_scsi_cmnd);
scsi_cmnd_info->err_code_table_cout =
UNF_GET_ERR_CODE_TABLE_COUNT(v_scsi_cmnd);
scsi_cmnd_info->pfn_done = UNF_GET_CMND_DONE_FUNC(v_scsi_cmnd);
scsi_cmnd_info->scsi_cmnd = UNF_GET_HOST_CMND(v_scsi_cmnd);
scsi_cmnd_info->sense_buf =
(char *)UNF_GET_SENSE_BUF_ADDR(v_scsi_cmnd);
/* unf_get_frame_entry_buf */
scsi_cmnd_info->pfn_unf_get_sgl_entry_buf =
UNF_GET_SGL_ENTRY_BUF_FUNC(v_scsi_cmnd);
scsi_cmnd_info->sgl = UNF_GET_CMND_SGL(v_scsi_cmnd);
scsi_cmnd_info->time_out = v_scsi_cmnd->time_out;
scsi_cmnd_info->entry_cnt = v_scsi_cmnd->entry_count;
scsi_cmnd_info->port_id = (unsigned int)v_scsi_cmnd->port_id;
scsi_cmnd_info->scsi_id = UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd);
}
unsigned int unf_ini_scsi_completed(void *v_lport,
struct unf_frame_pkg_s *v_pkg)
{
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = NULL;
struct unf_fcp_cmnd_s *fcp_cmnd = NULL;
unsigned int control;
unsigned short xchg_tag;
unsigned int ret;
unsigned long flag = 0;
UNF_CHECK_VALID(0x1323, TRUE, v_lport, return UNF_RETURN_ERROR);
UNF_CHECK_VALID(0x1324, TRUE, v_pkg, return UNF_RETURN_ERROR);
lport = (struct unf_lport_s *)v_lport;
xchg_tag =
(unsigned short)v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX];
/* 1. Find Exchange Context */
xchg = unf_cm_lookup_xchg_by_tag(v_lport, (unsigned short)xchg_tag);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x_0x%x) can not find exchange by tag(0x%x)",
lport->port_id, lport->nport_id, xchg_tag);
/* NOTE: return directly */
return UNF_RETURN_ERROR;
}
/* 2. Consistency check */
UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, xchg,
v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME],
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]);
/* 3. Increase ref_cnt for exchange protecting */
ret = unf_xchg_ref_inc(xchg, INI_RESPONSE_DONE); /* hold */
UNF_CHECK_VALID(0x1325, TRUE, (ret == RETURN_OK),
return UNF_RETURN_ERROR);
fcp_cmnd = &xchg->fcp_cmnd;
control = fcp_cmnd->control;
control = UNF_GET_TASK_MGMT_FLAGS(control);
/* 4. Cancel timer if necessary */
if (xchg->scsi_cmnd_info.time_out != 0)
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg);
/* 5. process scsi TMF if necessary */
if (control != 0) {
unf_process_scsi_mgmt_result(v_pkg, xchg);
unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); /* cancel hold */
/* NOTE: return directly */
return RETURN_OK;
}
/* 6. Xchg Abort state check */
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
if (INI_IO_STATE_UPABORT & xchg->io_state) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN,
"[warn]Port(0x%x) find exchange(%p) state(0x%x) has been aborted",
lport->port_id, xchg, xchg->io_state);
/* NOTE: release exchange during SCSI ABORT(ABTS) */
unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); /* cancel hold */
return ret;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
/*
* 7. INI SCSI CMND Status process
* set exchange->result ---to--->>> scsi_result
*/
ret = unf_ini_status_handle(xchg, v_pkg);
/* 8. NOTE: release exchange if necessary */
unf_cm_free_xchg(lport, xchg);
/* 9. dec exch ref_cnt */
/* cancel hold: release resource now */
unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE);
return ret;
}
unsigned int unf_hardware_start_io(struct unf_lport_s *v_lport,
struct unf_frame_pkg_s *v_pkg)
{
if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_cmnd_send)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%x) low level send scsi function is NULL",
v_lport->port_id);
return UNF_RETURN_ERROR;
}
return v_lport->low_level_func.service_op.pfn_unf_cmnd_send(
v_lport->fc_port,
v_pkg);
}
struct unf_rport_s *unf_find_rport_by_scsi_id(
struct unf_lport_s *v_lport,
struct unf_ini_error_code_s *v_err_code_table,
unsigned int v_err_code_table_cout,
unsigned int v_scsi_id,
unsigned int *v_scsi_result)
{
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL;
struct unf_rport_s *rport = NULL;
unsigned long flags = 0;
/* scsi_table -> session_table -> image_table */
scsi_image_table = &v_lport->rport_scsi_table;
/* 1. Scsi_Id validity check */
if (unlikely(v_scsi_id >= scsi_image_table->max_scsi_id)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Input scsi_id(0x%x) bigger than max_scsi_id(0x%x).",
v_scsi_id, scsi_image_table->max_scsi_id);
*v_scsi_result = unf_get_uplevel_cmnd_errcode(
v_err_code_table,
v_err_code_table_cout,
UNF_IO_SOFT_ERR); /* did_soft_error */
return NULL;
}
/* 2. GetR_Port_Info/R_Port: use Scsi_Id find from L_Port's
* Rport_Scsi_Table (image table)
*/
spin_lock_irqsave(&scsi_image_table->scsi_image_table_lock, flags);
wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[v_scsi_id];
rport = wwpn_rport_info->rport;
spin_unlock_irqrestore(&scsi_image_table->scsi_image_table_lock, flags);
if (unlikely(!rport)) {
*v_scsi_result = unf_get_uplevel_cmnd_errcode(
v_err_code_table,
v_err_code_table_cout,
/* did_not_connect */
UNF_IO_PORT_LOGOUT);
return NULL;
}
return rport;
}
static unsigned int unf_build_xchg_fcp_cmnd(struct unf_fcp_cmnd_s *v_fcp_cmnd,
struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI_CMND -->> FCP_CMND */
if (UNF_GET_DATA_DIRECTION(v_scsi_cmnd) == DMA_TO_DEVICE) {
v_fcp_cmnd->control = UNF_FCP_WR_DATA;
} else if (UNF_GET_DATA_DIRECTION(v_scsi_cmnd) == DMA_FROM_DEVICE) {
v_fcp_cmnd->control = UNF_FCP_RD_DATA;
} else {
/* DMA Direction None */
v_fcp_cmnd->control = 0;
}
memcpy(v_fcp_cmnd->cdb, &UNF_GET_FCP_CMND(v_scsi_cmnd),
v_scsi_cmnd->cmnd_len);
if (((v_fcp_cmnd->control == UNF_FCP_WR_DATA) &&
(IS_READ_COMMAND(v_fcp_cmnd->cdb[0]))) ||
((v_fcp_cmnd->control == UNF_FCP_RD_DATA) &&
(IS_WRITE_COMMAND(v_fcp_cmnd->cdb[0])))) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MINOR,
"Scsi command direction inconsistent, CDB[0](0x%x), direction(0x%x).",
v_fcp_cmnd->cdb[0], v_fcp_cmnd->control);
return UNF_RETURN_ERROR;
}
memcpy(v_fcp_cmnd->lun, v_scsi_cmnd->pc_lun_id,
sizeof(v_fcp_cmnd->lun));
unf_big_end_to_cpu((void *)v_fcp_cmnd->cdb,
sizeof(v_fcp_cmnd->cdb));
v_fcp_cmnd->data_length = UNF_GET_DATA_LEN(v_scsi_cmnd);
return RETURN_OK;
}
static void unf_adjust_xchg_len(struct unf_xchg_s *v_xchg,
unsigned int v_scsi_cmnd)
{
switch (v_scsi_cmnd) {
case SCSIOPC_REQUEST_SENSE: /* requires different buffer */
v_xchg->data_len = SCSI_SENSE_DATA_LEN;
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MINOR,
"Request Sense new.");
break;
case SCSIOPC_TEST_UNIT_READY:
case SCSIOPC_RESERVE:
case SCSIOPC_RELEASE:
case SCSIOPC_START_STOP_UNIT:
v_xchg->data_len = 0;
break;
default:
break;
}
}
static void unf_copy_dif_control(struct unf_dif_control_info_s *v_dif_control,
struct unf_scsi_cmd_s *v_scsi_cmnd)
{
v_dif_control->fcp_dl = v_scsi_cmnd->dif_control.fcp_dl;
v_dif_control->protect_opcode =
v_scsi_cmnd->dif_control.protect_opcode;
v_dif_control->start_lba = v_scsi_cmnd->dif_control.start_lba;
v_dif_control->app_tag = v_scsi_cmnd->dif_control.app_tag;
v_dif_control->flags = v_scsi_cmnd->dif_control.flags;
v_dif_control->dif_sge_count =
v_scsi_cmnd->dif_control.dif_sge_count;
v_dif_control->dif_sgl = v_scsi_cmnd->dif_control.dif_sgl;
}
static void unf_adjsut_dif_pci_transfer_len(struct unf_xchg_s *v_xchg,
unsigned int direction)
{
struct unf_dif_control_info_s *dif_control = NULL;
unsigned int sector_size = 512;
dif_control = &v_xchg->dif_control;
if (dif_control->protect_opcode == UNF_DIF_ACTION_NONE)
return;
switch (dif_control->protect_opcode & UNF_DIF_ACTION_MASK) {
case UNF_DIF_ACTION_INSERT:
if (direction == DMA_TO_DEVICE) {
/* write IO,insert,Indicates that data with DIF is
* transmitted over the link.
*/
dif_control->fcp_dl =
v_xchg->data_len +
UNF_CAL_BLOCK_CNT(v_xchg->data_len,
sector_size) *
UNF_DIF_AREA_SIZE;
} else {
/* read IO,insert,Indicates that the internal DIf is
* carried, and the link does not carry the DIf.
*/
dif_control->fcp_dl = v_xchg->data_len;
}
break;
case UNF_DIF_ACTION_VERIFY_AND_DELETE:
if (direction == DMA_TO_DEVICE) {
/* write IO,Delete,Indicates that the internal DIf is
* carried, and the link does not carry the DIf.
*/
dif_control->fcp_dl = v_xchg->data_len;
} else {
/* read IO,Delete,Indicates that data with DIF is
* carried on the link and does not contain DIF
* on internal.
*/
dif_control->fcp_dl =
v_xchg->data_len +
UNF_CAL_BLOCK_CNT(v_xchg->data_len,
sector_size) *
UNF_DIF_AREA_SIZE;
}
break;
case UNF_DIF_ACTION_VERIFY_AND_FORWARD:
dif_control->fcp_dl =
v_xchg->data_len +
UNF_CAL_BLOCK_CNT(v_xchg->data_len, sector_size) *
UNF_DIF_AREA_SIZE;
break;
default:
dif_control->fcp_dl = v_xchg->data_len;
break;
}
v_xchg->fcp_cmnd.data_length = dif_control->fcp_dl;
}
static int unf_save_scsi_cmnd_to_xchg(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg,
struct unf_scsi_cmd_s *v_scsi_cmnd)
{
struct unf_lport_s *lport = v_lport;
struct unf_rport_s *rport = v_rport;
struct unf_xchg_s *xchg = v_xchg;
unsigned int result;
v_scsi_cmnd->driver_scribble = (void *)xchg->start_jif;
xchg->rport = rport;
xchg->rport_bind_jifs = rport->rport_alloc_jifs;
if (lport->low_level_func.xchg_mgr_type ==
UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE)
xchg->ox_id = xchg->hot_pool_tag;
/* Build Xchg SCSI_CMND info */
unf_bind_xchg_scsi_cmd(xchg, v_scsi_cmnd);
xchg->data_len = UNF_GET_DATA_LEN(v_scsi_cmnd);
xchg->data_direction = UNF_GET_DATA_DIRECTION(v_scsi_cmnd);
xchg->sid = lport->nport_id;
xchg->did = rport->nport_id;
xchg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index;
xchg->world_id = v_scsi_cmnd->world_id;
xchg->cmnd_sn = v_scsi_cmnd->cmnd_sn;
xchg->scsi_id = v_scsi_cmnd->scsi_id;
/* Build Xchg fcp_cmnd */
result = unf_build_xchg_fcp_cmnd(&xchg->fcp_cmnd, v_scsi_cmnd);
if (unlikely(result != RETURN_OK))
return UNF_RETURN_ERROR;
unf_adjust_xchg_len(xchg, UNF_GET_FCP_CMND(v_scsi_cmnd));
/* Dif (control) info */
unf_copy_dif_control(&xchg->dif_control, v_scsi_cmnd);
memcpy(&xchg->dif_info, &v_scsi_cmnd->dif_info,
sizeof(struct dif_info_s));
unf_adjsut_dif_pci_transfer_len(xchg,
UNF_GET_DATA_DIRECTION(v_scsi_cmnd));
/* single sgl info */
if ((xchg->data_direction != DMA_NONE) &&
(UNF_GET_CMND_SGL(v_scsi_cmnd))) {
xchg->req_sgl_info.sgl = UNF_GET_CMND_SGL(v_scsi_cmnd);
/* Save the sgl header for easy location and printing. */
xchg->req_sgl_info.sgl_start = xchg->req_sgl_info.sgl;
xchg->req_sgl_info.req_index = 0;
xchg->req_sgl_info.entry_index = 0;
}
if (v_scsi_cmnd->dif_control.dif_sgl) {
xchg->dif_sgl_info.sgl = UNF_INI_GET_DIF_SGL(v_scsi_cmnd);
xchg->dif_sgl_info.entry_index = 0;
xchg->dif_sgl_info.req_index = 0;
xchg->dif_sgl_info.sgl_start = xchg->dif_sgl_info.sgl;
}
return RETURN_OK;
}
static int unf_send_fcp_cmnd(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg)
{
struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL;
struct unf_lport_s *lport = v_lport;
struct unf_rport_s *rport = v_rport;
struct unf_xchg_s *xchg = v_xchg;
struct unf_frame_pkg_s pkg = { 0 };
unsigned int result;
unsigned long flags = 0;
memcpy(&pkg.dif_control, &xchg->dif_control,
sizeof(struct unf_dif_control_info_s));
pkg.dif_control.fcp_dl = xchg->dif_control.fcp_dl;
pkg.transfer_len = xchg->data_len; /* Pcie data transfer length */
pkg.xchg_contex = xchg;
pkg.qos_level = 0;
pkg.entry_count = xchg->scsi_cmnd_info.entry_cnt;
scsi_cmnd_info = &v_xchg->scsi_cmnd_info;
if ((xchg->data_direction == DMA_NONE) || (!scsi_cmnd_info->sgl))
pkg.entry_count = 0;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME];
pkg.private[PKG_PRIVATE_XCHG_VP_INDEX] = lport->vp_index;
pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index;
pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hot_pool_tag;
pkg.fcp_cmnd = &xchg->fcp_cmnd;
pkg.frame_head.csctl_sid = lport->nport_id;
pkg.frame_head.rctl_did = rport->nport_id;
pkg.upper_cmd = xchg->scsi_cmnd_info.scsi_cmnd;
/* exch->fcp_rsp_id --->>> pkg->buffer_ptr */
pkg.unf_rsp_pload_bl.buffer_ptr =
(unsigned char *)
v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu;
pkg.unf_rsp_pload_bl.buf_dma_addr =
v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr;
pkg.unf_rsp_pload_bl.length = PAGE_SIZE;
pkg.frame_head.oxid_rxid =
((unsigned int)xchg->ox_id << 16 | xchg->rx_id);
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_INFO,
"[info]LPort (0x%p), Nport ID(0x%x) RPort ID(0x%x) direction(0x%x) magic number(0x%x) send IO to OX_ID(0x%x) entry count(0x%x) tag(0x%x)",
lport, lport->nport_id, rport->nport_id,
v_xchg->data_direction,
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME],
v_xchg->ox_id, pkg.entry_count, xchg->hot_pool_tag);
atomic_inc(&rport->pending_io_cnt);
if ((rport->tape_support_needed == UNF_TRUE) &&
(atomic_read(&rport->pending_io_cnt) <= 3)) {
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
v_xchg->io_state |= INI_IO_STATE_REC_TIMEOUT_WAIT;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
scsi_cmnd_info->abort_timeout = scsi_cmnd_info->time_out;
scsi_cmnd_info->time_out = UNF_REC_TOV;
}
/* 3. add INI I/O timer if necessary */
if (scsi_cmnd_info->time_out != 0) {
/* I/O inner timer, do not used at this time */
lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
xchg,
scsi_cmnd_info->time_out,
UNF_TIMER_TYPE_REQ_IO);
}
/* 4. R_Port state check */
if (unlikely((rport->lport_ini_state != UNF_PORT_STATE_LINKUP) ||
(rport->rp_state > UNF_RPORT_ST_READY))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) upper_cmd(0x%p) is not ready",
lport->port_id, rport, rport->nport_id,
rport->lport_ini_state, rport->rp_state,
pkg.upper_cmd);
result = unf_get_uplevel_cmnd_errcode(
scsi_cmnd_info->err_code_table,
scsi_cmnd_info->err_code_table_cout,
UNF_IO_INCOMPLETE);
scsi_cmnd_info->result = result;
if (scsi_cmnd_info->time_out != 0)
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg);
unf_cm_free_xchg(lport, xchg);
/* DID_IMM_RETRY */
return RETURN_OK;
} else if (rport->rp_state < UNF_RPORT_ST_READY) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) upper_cmd(0x%p) is not ready",
lport->port_id, rport, rport->nport_id,
rport->lport_ini_state, rport->rp_state,
pkg.upper_cmd);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
if (unlikely(scsi_cmnd_info->time_out != 0))
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(
(void *)xchg);
/* Host busy & need scsi retry */
return UNF_RETURN_ERROR;
}
/* 5. send scsi_cmnd to FC_LL Driver */
if (unf_hardware_start_io(lport, &pkg) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port (0x%x) upper_cmd(0x%p) Hardware Send IO failed.",
lport->port_id, pkg.upper_cmd);
unf_release_esgls(xchg);
result = unf_get_uplevel_cmnd_errcode(
scsi_cmnd_info->err_code_table,
scsi_cmnd_info->err_code_table_cout,
UNF_IO_INCOMPLETE);
scsi_cmnd_info->result = result;
if (scsi_cmnd_info->time_out != 0)
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg);
unf_cm_free_xchg(lport, xchg);
/* SCSI_DONE */
return RETURN_OK;
}
return RETURN_OK;
}
int unf_prefer_to_send_scsi_cmnd(struct unf_xchg_s *v_xchg)
{
/*
* About INI_IO_STATE_DRABORT:
* 1. Set ABORT tag: Clean L_Port/V_Port Link Down I/O
* with: INI_busy_list, delay_list, delay_transfer_list, wait_list
*
* 2. Set ABORT tag: for target session:
* with: INI_busy_list, delay_list, delay_transfer_list, wait_list
* a. R_Port remove
* b. Send PLOGI_ACC callback
* c. RCVD PLOGI
* d. RCVD LOGO
*
* 3. if set ABORT: prevent send scsi_cmnd to target
*/
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
int ret;
unsigned long flags = 0;
lport = v_xchg->lport;
rport = v_xchg->rport;
if (unlikely(!lport || !rport)) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Port(0x%p) or RPort(0x%p) is NULL", lport,
rport);
/* if happened (never happen): need retry */
return UNF_RETURN_ERROR;
}
/* 1. inc ref_cnt to protect exchange */
ret = (int)unf_xchg_ref_inc(v_xchg, INI_SEND_CMND);
if (unlikely(ret != RETURN_OK)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) exhg(%p) exception ref(%d) ",
lport->port_id, v_xchg,
atomic_read(&v_xchg->ref_cnt));
/* exchange exception, need retry */
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
v_xchg->io_state |= INI_IO_STATE_UPSEND_ERR;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
/* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */
return UNF_RETURN_ERROR;
}
/* 2. Xchg Abort state check: Free EXCH if necessary */
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
if (unlikely((v_xchg->io_state & INI_IO_STATE_UPABORT) ||
(v_xchg->io_state & INI_IO_STATE_DRABORT))) {
/* Prevent to send: UP_ABORT/DRV_ABORT */
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
v_xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_IMM_RETRY);
unf_xchg_ref_dec(v_xchg, INI_SEND_CMND);
unf_cm_free_xchg(lport, v_xchg);
/*
* Release exchange & return directly:
* 1. FC LLDD rcvd ABTS before scsi_cmnd: do nothing
* 2. INI_IO_STATE_UPABORT/INI_IO_STATE_DRABORT:
* discard this cmnd directly
*/
return RETURN_OK;
}
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
/* 3. Send FCP_CMND to FC_LL Driver */
ret = unf_send_fcp_cmnd(lport, rport, v_xchg);
if (unlikely(ret != RETURN_OK)) {
/* exchange exception, need retry */
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send exhg(%p) OX_ID(0x%x) RX_ID(0x%x) to Rport(%p) NPortID(0x%x) state(0x%x) scsi_id(0x%x) failed",
lport->port_id, v_xchg, v_xchg->ox_id,
v_xchg->rx_id,
rport, rport->nport_id, rport->rp_state,
rport->scsi_id);
spin_lock_irqsave(&v_xchg->xchg_state_lock, flags);
v_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags);
/* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */
unf_cm_free_xchg(lport, v_xchg);
}
/* 4. dec ref_cnt */
unf_xchg_ref_dec(v_xchg, INI_SEND_CMND);
return ret;
}
struct unf_lport_s *unf_find_lport_by_scsi_cmd(
struct unf_scsi_cmd_s *v_scsi_cmnd)
{
struct unf_lport_s *lport = NULL;
/* cmd -->> L_Port */
lport = (struct unf_lport_s *)UNF_GET_HOST_PORT_BY_CMND(v_scsi_cmnd);
if (unlikely(!lport)) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Find Port by scsi_cmnd(0x%p) failed",
v_scsi_cmnd);
/* cmnd -->> scsi_host_id -->> L_Port */
lport = unf_find_lport_by_scsi_host_id(
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
}
return lport;
}
int unf_cm_queue_command(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI Command --->>> FC FCP Command */
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = NULL;
struct unf_rport_s *rport = NULL;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
unsigned int result = 0;
int ret;
unsigned long flags = 0;
unsigned int scsi_id;
unsigned int exhg_mgr_type = UNF_XCHG_MGR_TYPE_RANDOM;
/* 1. Get L_Port */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
/*
* corresponds to the insertion or removal scenario or
* the remove card scenario.
* This method is used to search for LPort information
* based on SCSI_HOST_ID.
* The Slave alloc is not invoked when LUNs are not scanned.
* Therefore, the Lport cannot be obtained.
* You need to obtain the Lport from the Lport linked list.
*
* FC After Link Up, the first SCSI command is inquiry.
* Before inquiry, SCSI delivers slave_alloc.
*/
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR,
"[err]Find Port by scsi cmd(0x%p) failed",
v_scsi_cmnd);
/* find from ini_error_code_table1 */
result = unf_get_uplevel_cmnd_errcode(
v_scsi_cmnd->err_code_table,
v_scsi_cmnd->err_code_table_cout,
UNF_IO_NO_LPORT); /* did_not_connect */
/* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */
unf_complete_cmnd(v_scsi_cmnd, result);
return RETURN_OK;
}
/* Get Local SCSI_Image_table & SCSI_ID */
scsi_image_table = &lport->rport_scsi_table;
scsi_id = v_scsi_cmnd->scsi_id;
/* 2. L_Port State check */
if (unlikely((lport->b_port_removing == UNF_TRUE) ||
(lport->b_pcie_linkdown == UNF_TRUE))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) is removing(%d) or pcielinkdown(%d) and return with scsi_id(0x%x)",
lport->port_id, lport->b_port_removing,
lport->b_pcie_linkdown,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
result = unf_get_uplevel_cmnd_errcode(
v_scsi_cmnd->err_code_table,
v_scsi_cmnd->err_code_table_cout,
UNF_IO_NO_LPORT); /* did_not_connect */
UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (result >> 16));
/* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */
unf_complete_cmnd(v_scsi_cmnd, result);
return RETURN_OK;
}
/* 3. Get R_Port */
rport = unf_find_rport_by_scsi_id(lport,
v_scsi_cmnd->err_code_table,
v_scsi_cmnd->err_code_table_cout,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd),
&result);
if (unlikely(!rport)) {
/* never happen: do not care */
UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR,
"[err]Port(0x%x) find RPort by scsi_id(0x%x) failed",
lport->port_id,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (result >> 16));
/* DID_NOT_CONNECT/DID_SOFT_ERROR & SCSI_DONE &
* RETURN_OK(0) & I/O error
*/
unf_complete_cmnd(v_scsi_cmnd, result);
return RETURN_OK;
}
/* 4. Can't get exchange & retrun host busy, retry by uplevel */
xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(
lport,
exhg_mgr_type << 16 | UNF_XCHG_TYPE_INI);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[err]Port(0x%x) get free exchange for INI IO(0x%x) failed",
lport->port_id,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
/* NOTE: need scsi retry */
return UNF_RETURN_ERROR;
}
xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_ERROR);
/* 5. Save the SCSI CMND information in advance. */
ret = unf_save_scsi_cmnd_to_xchg(lport, rport, xchg, v_scsi_cmnd);
if (unlikely(ret != RETURN_OK)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[err]Port(0x%x) save scsi_cmnd info(0x%x) to exchange failed",
lport->port_id,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
spin_lock_irqsave(&xchg->xchg_state_lock, flags);
xchg->io_state |= INI_IO_STATE_UPSEND_ERR;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flags);
/* INI_IO_STATE_UPSEND_ERR: Don't Do SCSI_DONE,
* need retry I/O
*/
unf_cm_free_xchg(lport, xchg);
/* NOTE: need scsi retry */
return UNF_RETURN_ERROR;
}
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO,
"[info]Get exchange(0x%p) OX_ID(0x%x) RX_ID(0x%x) hot_pool_tag(0x%x) for Pcmd:%p,Cmdsn:0x%lx,WorldId:%u",
xchg, xchg->ox_id, xchg->rx_id,
xchg->hot_pool_tag, v_scsi_cmnd->upper_cmnd,
(unsigned long)v_scsi_cmnd->cmnd_sn,
v_scsi_cmnd->world_id);
/* 6. Send SCSI CMND */
ret = unf_prefer_to_send_scsi_cmnd(xchg);
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_IO_H__
#define __UNF_IO_H__
#define UNF_MAX_TARGET_NUMBER 2048
#define UNF_DEFAULT_MAX_LUN 0xFFFF
#define UNF_MAX_DMA_SEGS 0x400
#define UNF_MAX_SCSI_CMND_LEN 16
#define UNF_MAX_SECTORS 0xffff
#define UNF_MAX_BUS_CHANNEL 0
#define UNF_DMA_BOUNDARY 0xffffffffffffffff
#define UNF_MAX_CMND_PER_LUN 64 /* LUN max command */
#define NO_SENSE 0x00
#define RECOVERED_ERROR 0x01
#define NOT_READY 0x02
#define MEDIUM_ERROR 0x03
#define HARDWARE_ERROR 0x04
#define ILLEGAL_REQUEST 0x05
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
#define COPY_ABORTED 0x0a
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
#define MISCOMPARE 0x0e
#define UNF_GET_SCSI_HOST_ID_BY_CMND(pcmd) ((pcmd)->scsi_host_id)
#define UNF_GET_SCSI_ID_BY_CMND(pcmd) ((pcmd)->scsi_id)
#define UNF_GET_HOST_PORT_BY_CMND(pcmd) ((pcmd)->drv_private)
#define UNF_GET_FCP_CMND(pcmd) ((pcmd)->pcmnd[0])
#define UNF_GET_DATA_LEN(pcmd) ((pcmd)->transfer_len)
#define UNF_GET_DATA_DIRECTION(pcmd) ((pcmd)->data_direction)
#define UNF_GET_HOST_CMND(pcmd) ((pcmd)->upper_cmnd)
#define UNF_GET_CMND_DONE_FUNC(pcmd) ((pcmd)->pfn_done)
#define UNF_GET_SGL_ENTRY_BUF_FUNC(pcmd) ((pcmd)->pfn_unf_ini_get_sgl_entry)
#define UNF_GET_SENSE_BUF_ADDR(pcmd) ((pcmd)->sense_buf)
#define UNF_GET_ERR_CODE_TABLE(pcmd) ((pcmd)->err_code_table)
#define UNF_GET_ERR_CODE_TABLE_COUNT(pcmd) ((pcmd)->err_code_table_cout)
#define UNF_SET_HOST_CMND(pcmd, host_cmd) ((pcmd)->upper_cmnd = (host_cmd))
#define UNF_SET_CMND_DONE_FUNC(pcmd, pfn) ((pcmd)->pfn_done = (pfn))
#define UNF_SET_RESID(pcmd, id_len) ((pcmd)->resid = (id_len))
#define UNF_SET_CMND_RESULT(pcmd, uiresult) ((pcmd)->result = ((int)uiresult))
#define UNF_DONE_SCSI_CMND(pcmd) ((pcmd)->pfn_done(pcmd))
#define UNF_GET_CMND_SGL(pcmd) ((pcmd)->sgl)
#define UNF_INI_GET_DIF_SGL(pcmd) ((pcmd)->dif_control.dif_sgl)
unsigned int unf_ini_scsi_completed(void *v_lport,
struct unf_frame_pkg_s *v_pkg);
unsigned int unf_ini_get_sgl_entry(void *v_pkg, char **v_buf,
unsigned int *v_buf_len);
unsigned int unf_ini_get_dif_sgl_entry(void *v_pkg, char **v_buf,
unsigned int *v_buf_len);
void unf_complete_cmnd(struct unf_scsi_cmd_s *v_scsi_cmnd, unsigned int result);
void unf_done_ini_xchg(struct unf_xchg_s *v_xchg);
unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg);
void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg);
int unf_cm_queue_command(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd);
struct unf_rport_s *unf_find_rport_by_scsi_id(
struct unf_lport_s *v_lport,
struct unf_ini_error_code_s *v_err_code_table,
unsigned int v_err_code_table_cout,
unsigned int v_scsi_id,
unsigned int *v_scsi_result);
struct unf_lport_s *unf_find_lport_by_scsi_cmd(
struct unf_scsi_cmd_s *v_scsi_cmnd);
void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg);
unsigned int unf_get_uplevel_cmnd_errcode(
struct unf_ini_error_code_s *v_err_table,
unsigned int v_err_table_count,
unsigned int v_drv_err_code);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#include "unf_log.h"
#include "unf_exchg.h"
#include "unf_rport.h"
#include "unf_io.h"
#include "unf_portman.h"
#include "unf_service.h"
#include "unf_io_abnormal.h"
static int unf_send_abts_success(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg,
struct unf_scsi_cmd_s *v_scsi_cmnd,
unsigned int time_out_value)
{
int wait_marker = UNF_TRUE;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
unsigned int scsi_id;
unsigned int ret;
unsigned long flag = 0;
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
wait_marker = (v_xchg->abts_state & MARKER_STS_RECEIVED) ?
UNF_FALSE : UNF_TRUE;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
if (wait_marker) {
if (down_timeout(
&v_xchg->task_sema,
(long long)msecs_to_jiffies(time_out_value))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)",
v_lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->hot_pool_tag,
v_xchg->rx_id);
/* Cancel abts rsp timer when sema timeout */
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(
(void *)v_xchg);
/* Cnacel the flag of INI_IO_STATE_UPABORT and
* process the io in TMF
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
v_xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
return UNF_SCSI_ABORT_FAIL;
}
} else {
v_xchg->ucode_abts_state = UNF_IO_SUCCESS;
}
scsi_image_table = &v_lport->rport_scsi_table;
scsi_id = v_scsi_cmnd->scsi_id;
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) ||
(v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) {
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)",
v_lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->rx_id,
v_xchg->ucode_abts_state);
ret = DID_RESET;
UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret);
unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16);
return UNF_SCSI_ABORT_SUCCESS;
}
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
v_xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
/* Cancel abts rsp timer when sema timeout */
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg);
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) send ABTS failed. Exch(0x%p) oxid(0x%x) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)",
v_lport->port_id, v_xchg, v_xchg->ox_id,
v_xchg->hot_pool_tag,
v_xchg->scsi_cmnd_info.result, v_xchg->io_state);
/* return fail and then enter TMF */
return UNF_SCSI_ABORT_FAIL;
}
static int unf_ini_abort_cmnd(struct unf_lport_s *v_lport,
struct unf_xchg_s *v_xchg,
struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/*
* About INI_IO_STATE_UPABORT:
*
* 1. Check: AC power down
* 2. Check: L_Port destroy
* 3. Check: I/O XCHG timeout
* 4. Set ABORT: send ABTS
* 5. Set ABORT: LUN reset
* 6. Set ABORT: Target reset
* 7. Check: Prevent to send I/O to target (UNF_PreferToSendScsiCmnd)
* 8. Check: Done INI XCHG --->>> do not call scsi_done, return directly
* 9. Check: INI SCSI Complete --->>>
* do not call scsi_done, return directly
*/
#define UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT (2000) /* 2s */
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
unsigned long flag = 0;
struct unf_rport_scsi_id_image_s *scsi_image_table = NULL;
unsigned int scsi_id;
unsigned int ret;
unsigned int time_out_value = (unsigned int)UNF_WAIT_SEM_TIMEOUT;
UNF_CHECK_VALID(0x1335, TRUE, v_lport, return UNF_SCSI_ABORT_FAIL);
lport = v_lport;
/* 1. Xchg State Set: INI_IO_STATE_UPABORT */
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state |= INI_IO_STATE_UPABORT;
rport = v_xchg->rport;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
/* 2. R_Port check */
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send ABTS but no RPort, OX_ID(0x%x) RX_ID(0x%x)",
lport->port_id, v_xchg->ox_id, v_xchg->rx_id);
return UNF_SCSI_ABORT_SUCCESS;
}
spin_lock_irqsave(&rport->rport_state_lock, flag);
if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT,
UNF_WARN,
"[warn]Port(0x%x) find RPort's state(0x%x) is not ready but send ABTS also, exchange(0x%p) tag(0x%x)",
lport->port_id, rport->rp_state,
v_xchg, v_xchg->hot_pool_tag);
/*
* Important: Send ABTS also & update timer
* Purpose: only used for release chip (uCode) resource,
* continue
*/
time_out_value = UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT;
}
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
/* 3. L_Port State check */
if (unlikely(lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) is removing", lport->port_id);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
return UNF_SCSI_ABORT_FAIL;
}
scsi_image_table = &lport->rport_scsi_table;
scsi_id = v_scsi_cmnd->scsi_id;
/* If pcie linkdown, complete this io and flush all io */
if (unlikely(lport->b_pcie_linkdown == UNF_TRUE)) {
ret = DID_RESET;
UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret);
unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16);
unf_free_lport_all_xchg(v_lport);
return UNF_SCSI_ABORT_SUCCESS;
}
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT,
"[abort]Port(0x%x) Exchg(0x%p) delay(%llu) SID(0x%x) DID(0x%x) wwpn(0x%llx) OxID(0x%x 0x%x) scsi_id(0x%x) lun_id(0x%x) cmdsn(0x%llx)",
lport->port_id, v_xchg,
(unsigned long long)jiffies_to_msecs(jiffies) -
(unsigned long long)jiffies_to_msecs(v_xchg->alloc_jif),
v_xchg->sid, v_xchg->did, rport->port_name,
v_xchg->ox_id, v_xchg->hot_pool_tag, v_scsi_cmnd->scsi_id,
(unsigned int)v_scsi_cmnd->lun_id, v_scsi_cmnd->cmnd_sn);
/* Init abts marker semaphore */
sema_init(&v_xchg->task_sema, 0);
if (v_xchg->scsi_cmnd_info.time_out != 0)
lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(v_xchg);
/* Add timer for sending ABTS */
v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer(
(void *)v_xchg,
(unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT,
UNF_TIMER_TYPE_INI_ABTS);
/* 4. Send INI ABTS CMND */
if (unf_send_abts(lport, v_xchg) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) Send ABTS failed. Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)",
lport->port_id, v_xchg,
v_xchg->ox_id, v_xchg->hot_pool_tag,
v_xchg->rx_id);
/* Cancel timer when sending ABTS failed */
v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(
(void *)v_xchg);
/* Cnacel the flag of INI_IO_STATE_UPABORT
* and process the io in TMF
*/
spin_lock_irqsave(&v_xchg->xchg_state_lock, flag);
v_xchg->io_state &= ~INI_IO_STATE_UPABORT;
v_xchg->io_state |= INI_IO_STATE_TMF_ABORT;
spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag);
return UNF_SCSI_ABORT_FAIL;
}
return unf_send_abts_success(lport, v_xchg, v_scsi_cmnd,
time_out_value);
}
static void unf_flush_ini_resp_que(struct unf_lport_s *v_lport)
{
UNF_CHECK_VALID(0x1335, TRUE, v_lport, return);
if (v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que)
(void)v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que(v_lport->fc_port);
}
int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/*
* SCSI ABORT Command --->>> FC ABTS Command
* If return ABORT_FAIL then enter TMF process
*/
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = NULL;
struct unf_rport_s *rport = NULL;
struct unf_lport_s *xchg_lport = NULL;
int ret;
unsigned long flag = 0;
/* 1. Get L_Port: Point to Scsi_host */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi host id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return UNF_SCSI_ABORT_FAIL;
}
/* 2. find target Xchg for INI Abort CMND */
xchg = unf_cm_lookup_xchg_by_cmnd_sn(lport, v_scsi_cmnd->cmnd_sn,
v_scsi_cmnd->world_id);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_ABNORMAL,
UNF_WARN,
"[warn]Port(0x%x) can't find exchange by Cmdsn(0x%lx)",
lport->port_id,
(unsigned long)v_scsi_cmnd->cmnd_sn);
unf_flush_ini_resp_que(lport);
return UNF_SCSI_ABORT_SUCCESS;
}
/* 3. increase ref_cnt to protect exchange */
ret = (int)unf_xchg_ref_inc(xchg, INI_EH_ABORT);
if (unlikely(ret != RETURN_OK)) {
unf_flush_ini_resp_que(lport);
return UNF_SCSI_ABORT_SUCCESS;
}
v_scsi_cmnd->upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd;
xchg->debug_hook = UNF_TRUE;
/* 4. Exchang L_Port/R_Port Get & check */
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
xchg_lport = xchg->lport;
rport = xchg->rport;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (unlikely(!xchg_lport || !rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Exchange(0x%p)'s L_Port or R_Port is NULL, state(0x%x)",
xchg, xchg->io_state);
unf_xchg_ref_dec(xchg, INI_EH_ABORT);
if (!xchg_lport)
return UNF_SCSI_ABORT_FAIL; /* for L_Port */
return UNF_SCSI_ABORT_SUCCESS; /* for R_Port */
}
/* 5. Send INI Abort Cmnd */
ret = unf_ini_abort_cmnd(xchg_lport, xchg, v_scsi_cmnd);
/* 6. decrease exchange ref_cnt */
unf_xchg_ref_dec(xchg, INI_EH_ABORT);
return ret;
}
static unsigned int unf_tmf_timeout_recovery_default(void *v_rport,
void *v_xchg)
{
struct unf_lport_s *lport = NULL;
unsigned long flag = 0;
struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg;
struct unf_rport_s *rport = (struct unf_rport_s *)v_rport;
lport = xchg->lport;
UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return UNF_RETURN_ERROR);
spin_lock_irqsave(&rport->rport_state_lock, flag);
unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
unf_rport_enter_logo(lport, rport);
return RETURN_OK;
}
void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg)
{
struct unf_lport_s *lport = NULL;
unsigned long flag = 0;
struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg;
struct unf_rport_s *rport = (struct unf_rport_s *)v_rport;
lport = xchg->lport;
UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return);
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
if (INI_IO_STATE_DONE & xchg->io_state) {
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
return;
}
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (xchg->rport_bind_jifs != rport->rport_alloc_jifs)
return;
spin_lock_irqsave(&rport->rport_state_lock, flag);
unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO);
spin_unlock_irqrestore(&rport->rport_state_lock, flag);
unf_rport_enter_logo(lport, rport);
}
unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg)
{
/* Do port reset or R_Port LOGO */
int ret = UNF_RETURN_ERROR;
struct unf_lport_s *lport = NULL;
struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg;
struct unf_rport_s *rport = (struct unf_rport_s *)v_rport;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg,
return UNF_RETURN_ERROR);
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport,
return UNF_RETURN_ERROR);
lport = xchg->lport->root_lport;
UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport,
return UNF_RETURN_ERROR);
/* 1. TMF response timeout & Marker STS timeout */
if (!(xchg->tmf_state &
(MARKER_STS_RECEIVED | TMF_RESPONSE_RECEIVED))) {
/* TMF timeout & marker timeout */
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) receive marker status timeout and do recovery",
lport->port_id);
/* Do port reset */
ret = unf_cm_reset_port(lport->port_id);
if (ret != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT,
UNF_WARN,
"[warn]Port(0x%x) do reset failed",
lport->port_id);
return UNF_RETURN_ERROR;
}
return RETURN_OK;
}
/* 2. default case: Do LOGO process */
unf_tmf_timeout_recovery_default(rport, xchg);
return RETURN_OK;
}
void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_xchg_s *v_xchg)
{
/*
* for device(lun)/target(session) reset:
* Do port reset or R_Port LOGO
*/
if (v_lport->pfn_unf_tmf_abnormal_recovery)
v_lport->pfn_unf_tmf_abnormal_recovery((void *)v_rport,
(void *)v_xchg);
}
static void unf_build_task_mgmt_fcp_cmnd(struct unf_fcp_cmnd_s *v_fcp_cmnd,
struct unf_scsi_cmd_s *v_scsi_cmnd,
enum unf_task_mgmt_cmnd_e v_task_mgmt)
{
UNF_CHECK_VALID(0x1339, UNF_TRUE, v_fcp_cmnd, return);
UNF_CHECK_VALID(0x1340, UNF_TRUE, v_scsi_cmnd, return);
unf_big_end_to_cpu((void *)v_scsi_cmnd->pc_lun_id, UNF_FCP_LUNID_LEN_8);
(*(unsigned long long *)(v_scsi_cmnd->pc_lun_id)) >>= 8;
memcpy(v_fcp_cmnd->lun, v_scsi_cmnd->pc_lun_id,
sizeof(v_fcp_cmnd->lun));
/*
* If the TASK MANAGEMENT FLAGS field is set to a nonzero value,
* the FCP_CDB field, the FCP_DL field, the TASK ATTRIBUTE field,
* the RDDATA bit, and the WRDATA bit shall be ignored and the
* FCP_BIDIRECTIONAL_READ_DL field shall not be
* included in the FCP_CMND IU payload
*/
v_fcp_cmnd->control = UNF_SET_TASK_MGMT_FLAGS(v_task_mgmt);
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR,
"SCSI cmnd(0x%x) is task mgmt cmnd. ntrl Flag(LITTLE END) is 0x%x.",
v_task_mgmt, v_fcp_cmnd->control);
}
int unf_send_scsi_mgmt_cmnd(struct unf_xchg_s *v_xchg,
struct unf_lport_s *v_lport,
struct unf_rport_s *v_rport,
struct unf_scsi_cmd_s *v_scsi_cmnd,
enum unf_task_mgmt_cmnd_e v_task_mgnt_cmd_type)
{
/*
* 1. Device/LUN reset
* 2. Target/Session reset
*/
struct unf_xchg_s *xchg = NULL;
int ret = SUCCESS;
struct unf_frame_pkg_s pkg = { 0 };
unsigned long flag = 0;
UNF_CHECK_VALID(0x1341, UNF_TRUE, v_xchg, return FAILED);
UNF_CHECK_VALID(0x1342, UNF_TRUE, v_lport, return FAILED);
UNF_CHECK_VALID(0x1343, UNF_TRUE, v_rport, return FAILED);
UNF_CHECK_VALID(0x1344, UNF_TRUE, v_scsi_cmnd, return FAILED);
UNF_CHECK_VALID(0x1345, UNF_TRUE,
((v_task_mgnt_cmd_type <= UNF_FCP_TM_TERMINATE_TASK) &&
(v_task_mgnt_cmd_type >= UNF_FCP_TM_QUERY_TASK_SET)),
return FAILED);
xchg = v_xchg;
xchg->lport = v_lport;
xchg->rport = v_rport;
/* 1. State: Up_Task */
spin_lock_irqsave(&xchg->xchg_state_lock, flag);
xchg->io_state |= INI_IO_STATE_UPTASK;
spin_unlock_irqrestore(&xchg->xchg_state_lock, flag);
if (v_lport->low_level_func.xchg_mgr_type ==
UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) {
xchg->ox_id = xchg->hot_pool_tag;
pkg.frame_head.oxid_rxid =
((unsigned int)xchg->ox_id << 16) | xchg->rx_id;
}
/* 2. Set TASK MANAGEMENT FLAGS of FCP_CMND to
* the corresponding task management command
*/
unf_build_task_mgmt_fcp_cmnd(&xchg->fcp_cmnd, v_scsi_cmnd,
v_task_mgnt_cmd_type);
pkg.xchg_contex = xchg;
pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = v_rport->rport_index;
pkg.fcp_cmnd = &xchg->fcp_cmnd;
pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hot_pool_tag;
pkg.frame_head.csctl_sid = v_lport->nport_id;
pkg.frame_head.rctl_did = v_rport->nport_id;
pkg.unf_rsp_pload_bl.buffer_ptr =
(unsigned char *)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu;
pkg.unf_rsp_pload_bl.buf_dma_addr =
v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr;
pkg.unf_rsp_pload_bl.length = PAGE_SIZE;
pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] =
v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME];
if (unlikely(v_lport->b_pcie_linkdown == UNF_TRUE)) {
unf_free_lport_all_xchg(v_lport);
return SUCCESS;
}
UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT,
"[event]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) Hottag(0x%x) lunid(0x%llx)",
v_lport->port_id, v_task_mgnt_cmd_type,
v_rport->nport_id, xchg->hot_pool_tag,
*((unsigned long long *)v_scsi_cmnd->pc_lun_id));
/* 3. Init exchange task semaphore */
sema_init(&xchg->task_sema, 0);
/* 4. Send Mgmt Task to low-level */
if (unf_hardware_start_io(v_lport, &pkg) != RETURN_OK) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) failed",
v_lport->port_id, v_task_mgnt_cmd_type,
v_rport->nport_id);
return FAILED;
}
/*
* semaphore timeout
*
* Code review: The second input parameter needs to
* be converted to jiffies.
* set semaphore after the message is sent successfully.
* The semaphore is returned when the semaphore times out
* or is woken up.
*
* 5. The semaphore is cleared and counted when the Mgmt
* Task message is sent,
* and is Wake Up when the RSP message is received.
* If the semaphore is not Wake Up, the semaphore is
* triggered after timeout.
* That is, no RSP message is received within the timeout period.
*/
if (down_timeout(&xchg->task_sema,
(long long)msecs_to_jiffies((unsigned int)UNF_WAIT_SEM_TIMEOUT))) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) timeout scsi id(0x%x) lun id(0x%x)",
v_lport->nport_id, v_task_mgnt_cmd_type,
v_rport->nport_id,
v_scsi_cmnd->scsi_id,
(unsigned int)v_scsi_cmnd->lun_id);
/* semaphore timeout */
ret = FAILED;
spin_lock_irqsave(&v_lport->lport_state_lock, flag);
if (v_lport->en_states == UNF_LPORT_ST_RESET)
ret = SUCCESS;
spin_unlock_irqrestore(&v_lport->lport_state_lock, flag);
return ret;
}
/*
* 6. NOTE: no timeout (has been waken up)
* Do Scsi_Cmnd(Mgmt Task) result checking
*
* FAILED: with error code or RSP is error
* SUCCESS: others
*/
if (xchg->scsi_cmnd_info.result == UNF_IO_SUCCESS) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp succeed",
v_lport->nport_id, v_task_mgnt_cmd_type,
v_rport->nport_id);
ret = SUCCESS;
} else {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp failed scsi id(0x%x) lun id(0x%x)",
v_lport->nport_id, v_task_mgnt_cmd_type,
v_rport->nport_id,
v_scsi_cmnd->scsi_id,
(unsigned int)v_scsi_cmnd->lun_id);
ret = FAILED;
}
return ret;
}
int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI Device/LUN Reset Command --->>> FC LUN/Device Reset Command */
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int cmnd_result = 0;
int ret = SUCCESS;
UNF_CHECK_VALID(0x1349, UNF_TRUE, v_scsi_cmnd, return FAILED);
UNF_CHECK_VALID(0x1350, UNF_TRUE, v_scsi_cmnd->pc_lun_id,
return FAILED);
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[event]Enter device/LUN reset handler");
/* 1. Get L_Port */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi_host_id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return FAILED;
}
/* 2. L_Port State checking */
if (unlikely(lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) is removing", lport);
return FAILED;
}
/*
* 3. Get R_Port: no rport is found or rport is not ready,return ok
* from: L_Port -->> rport_scsi_table (image table)
* -->> rport_info_table
*/
rport = unf_find_rport_by_scsi_id(lport,
v_scsi_cmnd->err_code_table,
v_scsi_cmnd->err_code_table_cout,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd),
&cmnd_result);
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) Can't find rport by scsi_id(0x%x)",
lport->port_id,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
return SUCCESS;
}
/*
* 4. Set the I/O of the corresponding LUN to abort.
*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
unf_cm_xchg_abort_by_lun(
lport, rport,
*((unsigned long long *)v_scsi_cmnd->pc_lun_id),
NULL, UNF_FALSE);
/* 5. R_Port state check */
if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) state(0x%x) SCSI Command(0x%p), rport is not ready",
lport->port_id, rport->nport_id,
rport->rp_state, v_scsi_cmnd);
return SUCCESS;
}
/* 6. Get & inc ref_cnt free Xchg for Device reset */
xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport,
UNF_XCHG_TYPE_INI);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) can't get free exchange", lport);
return FAILED;
}
/* increase ref_cnt for protecting exchange */
ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET);
UNF_CHECK_VALID(0x1351, UNF_TRUE, (ret == RETURN_OK), return FAILED);
/* 7. Send Device/LUN Reset to Low level */
ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport,
v_scsi_cmnd,
UNF_FCP_TM_LOGICAL_UNIT_RESET);
if (unlikely(ret == FAILED)) {
/*
* Do port reset or R_Port LOGO:
* 1. FAILED: send failed
* 2. FAILED: semaphore timeout
* 3. SUCCESS: rcvd rsp & semaphore has been waken up
*/
unf_tmf_abnormal_recovery(lport, rport, xchg);
}
/*
* 8. Release resource immediately if necessary
* NOTE: here, semaphore timeout or rcvd rsp
* (semaphore has been waken up)
*/
if (likely((lport->b_port_removing != UNF_TRUE) ||
(lport->root_lport != lport)))
unf_cm_free_xchg(xchg->lport, xchg);
/* decrease ref_cnt */
unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET);
return SUCCESS;
}
int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI Target Reset Command --->>> FC Session Reset/Delete Command */
struct unf_lport_s *lport = NULL;
struct unf_rport_s *rport = NULL;
struct unf_xchg_s *xchg = NULL;
unsigned int cmnd_result = 0;
int ret;
UNF_CHECK_VALID(0x1355, UNF_TRUE, v_scsi_cmnd, return FAILED);
UNF_CHECK_VALID(0x1356, UNF_TRUE, v_scsi_cmnd->pc_lun_id,
return FAILED);
/* 1. Get L_Port */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi_host_id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return FAILED;
}
/* 2. L_Port State check */
if (unlikely(lport->b_port_removing == UNF_TRUE)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) is removing", lport);
return FAILED;
}
/*
* 3. Get R_Port: no rport is found or rport is not ready,return ok
* from: L_Port -->> rport_scsi_table (image table) -->>
* rport_info_table
*/
rport = unf_find_rport_by_scsi_id(lport,
v_scsi_cmnd->err_code_table,
v_scsi_cmnd->err_code_table_cout,
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd),
&cmnd_result);
if (unlikely(!rport)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find rport by scsi_id(0x%x)",
UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd));
return SUCCESS;
}
/*
* 4. set UP_ABORT on Target IO and Session IO
*
* LUN Reset: set UP_ABORT tag, with:
* INI_Busy_list, IO_Wait_list,
* IO_Delay_list, IO_Delay_transfer_list
*/
unf_cm_xchg_abort_by_session(lport, rport);
/* 5. R_Port state check */
if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%x) RPort(0x%x) state(0x%x) is not ready, SCSI Command(0x%p)",
lport->port_id, rport->nport_id,
rport->rp_state, v_scsi_cmnd);
return SUCCESS;
}
/* 6. Get free Xchg for Target Reset CMND */
xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport,
UNF_XCHG_TYPE_INI);
if (unlikely(!xchg)) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Port(0x%p) can't get free exchange", lport);
return FAILED;
}
/* increase ref_cnt to protect exchange */
ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET);
UNF_CHECK_VALID(0x1357, UNF_TRUE, (ret == RETURN_OK), return FAILED);
/* 7. Send Target Reset Cmnd to low-level */
ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport, v_scsi_cmnd,
UNF_FCP_TM_TARGET_RESET);
if (unlikely(ret == FAILED)) {
/*
* Do port reset or R_Port LOGO:
* 1. FAILED: send failed
* 2. FAILED: semaphore timeout
* 3. SUCCESS: rcvd rsp & semaphore has been waken up
*/
unf_tmf_abnormal_recovery(lport, rport, xchg);
}
/*
* 8. Release resource immediately if necessary
* NOTE: here, semaphore timeout or rcvd rsp
* (semaphore has been waken up)
*/
if (likely((lport->b_port_removing != UNF_TRUE) ||
(lport->root_lport != lport)))
unf_cm_free_xchg(xchg->lport, xchg);
/* decrease exchange ref_cnt */
unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET);
return SUCCESS;
}
int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd)
{
/* SCSI BUS Reset Command --->>> FC Port Reset Command */
struct unf_lport_s *lport = NULL;
int cmnd_result = 0;
/* 1. Get L_Port */
lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd);
if (!lport) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Can't find port by scsi_host_id(0x%x)",
UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd));
return FAILED;
}
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT,
"[event]Do port reset with scsi_bus_reset");
cmnd_result = unf_cm_reset_port(lport->port_id);
if (unlikely(cmnd_result == UNF_RETURN_ERROR))
return FAILED;
else
return SUCCESS;
}
void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg,
struct unf_xchg_s *v_xchg)
{
unsigned char *rsp_info = NULL;
unsigned char rsp_code = 0;
unsigned int code_index = 0;
/*
* LLT found that:RSP_CODE is the third byte of FCP_RSP_INFO,
* on Little endian should be byte 0, For detail FCP_4 Table 26
* FCP_RSP_INFO field format
*
* 1. state setting
* 2. wake up semaphore
*/
UNF_CHECK_VALID(0x1321, TRUE, v_pkg, return);
UNF_CHECK_VALID(0x1322, TRUE, v_xchg, return);
v_xchg->tmf_state |= TMF_RESPONSE_RECEIVED;
if (UNF_GET_LL_ERR(v_pkg) != UNF_IO_SUCCESS) {
UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN,
"[warn]Send scsi manage command failed with error code(0x%x)",
UNF_GET_LL_ERR(v_pkg));
v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED;
/* wakeup semaphore & return */
up(&v_xchg->task_sema);
return;
}
rsp_info = v_pkg->unf_rsp_pload_bl.buffer_ptr;
if (!rsp_info && (v_pkg->unf_rsp_pload_bl.length != 0)) {
rsp_info =
(unsigned char *)
v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu;
/* change to little end if necessary */
if (rsp_info && (v_pkg->byte_orders & UNF_BIT_3))
unf_big_end_to_cpu(
rsp_info,
v_pkg->unf_rsp_pload_bl.length);
}
if (!rsp_info) {
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]FCP response data pointer is NULL with Xchg TAG(0x%x)",
v_xchg->hot_pool_tag);
v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS;
/* wakeup semaphore & return */
up(&v_xchg->task_sema);
return;
}
UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR,
"[info]FCP response data length(0x%x), RSP_CODE(0x%x:%x:%x:%x:%x:%x:%x:%x)",
v_pkg->unf_rsp_pload_bl.length,
rsp_info[0],
rsp_info[1],
rsp_info[2],
rsp_info[3],
rsp_info[4],
rsp_info[5],
rsp_info[6],
rsp_info[7]);
rsp_code = rsp_info[code_index];
if ((rsp_code == UNF_FCP_TM_RSP_COMPLETE) ||
(rsp_code == UNF_FCP_TM_RSP_SUCCEED))
v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS;
else
v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED;
/* wakeup semaphore & return */
up(&v_xchg->task_sema);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __UNF_IO__ABNORMAL_H__
#define __UNF_IO__ABNORMAL_H__
#define UNF_GET_LL_ERR(v_pkg) ((v_pkg->status) >> 16)
void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg,
struct unf_xchg_s *v_xchg);
unsigned int unf_hardware_start_io(struct unf_lport_s *v_lport,
struct unf_frame_pkg_s *v_pkg);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册