提交 dff67aa5 编写于 作者: Y Yanling Song 提交者: Zheng Zengkai

scsi: spfc: initial commit the spfc module

Ramaxel inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4DBD7
CVE: NA

Initial commit the spfc module for ramaxel Super FC adapter
Signed-off-by: NYanling Song <songyl@ramaxel.com>
Reviewed-by: NZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 7a99cdfb
......@@ -7135,3 +7135,4 @@ CONFIG_ETMEM_SCAN=m
CONFIG_ETMEM_SWAP=m
CONFIG_NET_VENDOR_RAMAXEL=y
CONFIG_SPNIC=m
CONFIG_SPFC=m
......@@ -8514,3 +8514,4 @@ CONFIG_ETMEM_SWAP=m
CONFIG_USERSWAP=y
CONFIG_NET_VENDOR_RAMAXEL=y
CONFIG_SPNIC=m
CONFIG_SPFC=m
......@@ -1151,6 +1151,7 @@ source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
source "drivers/scsi/qedi/Kconfig"
source "drivers/scsi/qedf/Kconfig"
source "drivers/scsi/spfc/Kconfig"
source "drivers/scsi/huawei/Kconfig"
config SCSI_LPFC
......
......@@ -85,6 +85,7 @@ obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SPFC) += spfc/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_HUAWEI_FC) += huawei/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
......
# SPDX-License-Identifier: GPL-2.0-only
#
# Ramaxel SPFC driver configuration
#
config SPFC
tristate "Ramaxel Fabric Channel Host Adapter Support"
default m
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
depends on ARM64 || X86_64
help
This driver supports Ramaxel Fabric Channel PCIe host adapter.
To compile this driver as part of the kernel, choose Y here.
If unsure, choose N.
The default is M.
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_SPFC) += spfc.o
subdir-ccflags-y += -I$(src)/../../net/ethernet/ramaxel/spnic/hw
subdir-ccflags-y += -I$(src)/hw
subdir-ccflags-y += -I$(src)/common
spfc-objs := common/unf_init.o \
common/unf_event.o \
common/unf_exchg.o \
common/unf_exchg_abort.o \
common/unf_io.o \
common/unf_io_abnormal.o \
common/unf_lport.o \
common/unf_npiv.o \
common/unf_npiv_portman.o \
common/unf_disc.o \
common/unf_rport.o \
common/unf_service.o \
common/unf_ls.o \
common/unf_gs.o \
common/unf_portman.o \
common/unf_scsi.o \
hw/spfc_utils.o \
hw/spfc_lld.o \
hw/spfc_io.o \
hw/spfc_wqe.o \
hw/spfc_service.o \
hw/spfc_chipitf.o \
hw/spfc_queue.o \
hw/spfc_hba.o \
hw/spfc_cqm_bat_cla.o \
hw/spfc_cqm_bitmap_table.o \
hw/spfc_cqm_main.o \
hw/spfc_cqm_object.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_hwdev.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_common.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_hwif.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_wq.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_eqs.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_mbox.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.o \
../../net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.o
此差异已折叠。
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_DISC_H
#define UNF_DISC_H
#include "unf_type.h"
#define UNF_DISC_RETRY_TIMES 3
#define UNF_DISC_NONE 0
#define UNF_DISC_FABRIC 1
#define UNF_DISC_LOOP 2
enum unf_disc_state {
UNF_DISC_ST_START = 0x3000,
UNF_DISC_ST_GIDPT_WAIT,
UNF_DISC_ST_GIDFT_WAIT,
UNF_DISC_ST_END
};
enum unf_disc_event {
UNF_EVENT_DISC_NORMAL_ENTER = 0x8000,
UNF_EVENT_DISC_FAILED = 0x8001,
UNF_EVENT_DISC_SUCCESS = 0x8002,
UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003,
UNF_EVENT_DISC_LINKDOWN = 0x8004
};
enum unf_disc_type {
UNF_DISC_GET_PORT_NAME = 0,
UNF_DISC_GET_NODE_NAME,
UNF_DISC_GET_FEATURE
};
struct unf_disc_gs_event_info {
void *lport;
void *rport;
u32 rport_id;
enum unf_disc_type type;
struct list_head list_entry;
};
u32 unf_get_and_post_disc_event(void *lport, void *sns_port, u32 nport_id,
enum unf_disc_type type);
void unf_flush_disc_event(void *disc, void *vport);
void unf_disc_ctrl_size_inc(void *lport, u32 cmnd);
void unf_disc_error_recovery(void *lport);
void unf_disc_mgr_destroy(void *lport);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#include "unf_event.h"
#include "unf_log.h"
#include "unf_common.h"
#include "unf_lport.h"
struct unf_event_list fc_event_list;
struct unf_global_event_queue global_event_queue;
/* Max global event node */
#define UNF_MAX_GLOBAL_ENENT_NODE 24
u32 unf_init_event_msg(struct unf_lport *lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_cm_event_report *event_node = NULL;
u32 ret = RETURN_OK;
u32 index = 0;
ulong flag = 0;
FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR);
event_mgr = &lport->event_mgr;
/* Get and Initial Event Node resource */
event_mgr->mem_add = vmalloc((size_t)event_mgr->free_event_count *
sizeof(struct unf_cm_event_report));
if (!event_mgr->mem_add) {
FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) allocate event manager failed",
lport->port_id);
return UNF_RETURN_ERROR;
}
memset(event_mgr->mem_add, 0,
((size_t)event_mgr->free_event_count * sizeof(struct unf_cm_event_report)));
event_node = (struct unf_cm_event_report *)(event_mgr->mem_add);
spin_lock_irqsave(&event_mgr->port_event_lock, flag);
for (index = 0; index < event_mgr->free_event_count; index++) {
INIT_LIST_HEAD(&event_node->list_entry);
list_add_tail(&event_node->list_entry, &event_mgr->list_free_event);
event_node++;
}
spin_unlock_irqrestore(&event_mgr->port_event_lock, flag);
return ret;
}
static void unf_del_event_center_fun_op(struct unf_lport *lport)
{
struct unf_event_mgr *event_mgr = NULL;
FC_CHECK_RETURN_VOID(lport);
event_mgr = &lport->event_mgr;
event_mgr->unf_get_free_event_func = NULL;
event_mgr->unf_release_event = NULL;
event_mgr->unf_post_event_func = NULL;
}
void unf_init_event_node(struct unf_cm_event_report *event_node)
{
FC_CHECK_RETURN_VOID(event_node);
event_node->event = UNF_EVENT_TYPE_REQUIRE;
event_node->event_asy_flag = UNF_EVENT_ASYN;
event_node->delay_times = 0;
event_node->para_in = NULL;
event_node->para_out = NULL;
event_node->result = 0;
event_node->lport = NULL;
event_node->unf_event_task = NULL;
}
struct unf_cm_event_report *unf_get_free_event_node(void *lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_cm_event_report *event_node = NULL;
struct list_head *list_node = NULL;
struct unf_lport *unf_lport = NULL;
ulong flags = 0;
FC_CHECK_RETURN_VALUE(lport, NULL);
unf_lport = (struct unf_lport *)lport;
unf_lport = unf_lport->root_lport;
if (unlikely(atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP))
return NULL;
event_mgr = &unf_lport->event_mgr;
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
if (list_empty(&event_mgr->list_free_event)) {
FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Port(0x%x) have no event node anymore",
unf_lport->port_id);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return NULL;
}
list_node = UNF_OS_LIST_NEXT(&event_mgr->list_free_event);
list_del(list_node);
event_mgr->free_event_count--;
event_node = list_entry(list_node, struct unf_cm_event_report, list_entry);
unf_init_event_node(event_node);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
return event_node;
}
void unf_post_event(void *lport, void *event_node)
{
struct unf_cm_event_report *cm_event_node = NULL;
struct unf_chip_manage_info *card_thread_info = NULL;
struct unf_lport *unf_lport = NULL;
ulong flags = 0;
FC_CHECK_RETURN_VOID(event_node);
cm_event_node = (struct unf_cm_event_report *)event_node;
/* If null, post to global event center */
if (!lport) {
spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags);
fc_event_list.list_num++;
list_add_tail(&cm_event_node->list_entry, &fc_event_list.list_head);
spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags);
wake_up_process(event_task_thread);
} else {
unf_lport = (struct unf_lport *)lport;
unf_lport = unf_lport->root_lport;
card_thread_info = unf_lport->chip_info;
/* Post to global event center */
if (!card_thread_info) {
FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN,
"[warn]Port(0x%x) has strange event with type(0x%x)",
unf_lport->nport_id, cm_event_node->event);
spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags);
fc_event_list.list_num++;
list_add_tail(&cm_event_node->list_entry, &fc_event_list.list_head);
spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags);
wake_up_process(event_task_thread);
} else {
spin_lock_irqsave(&card_thread_info->chip_event_list_lock, flags);
card_thread_info->list_num++;
list_add_tail(&cm_event_node->list_entry, &card_thread_info->list_head);
spin_unlock_irqrestore(&card_thread_info->chip_event_list_lock, flags);
wake_up_process(card_thread_info->thread);
}
}
}
void unf_check_event_mgr_status(struct unf_event_mgr *event_mgr)
{
ulong flag = 0;
FC_CHECK_RETURN_VOID(event_mgr);
spin_lock_irqsave(&event_mgr->port_event_lock, flag);
if (event_mgr->emg_completion && event_mgr->free_event_count == UNF_MAX_EVENT_NODE)
complete(event_mgr->emg_completion);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flag);
}
void unf_release_event(void *lport, void *event_node)
{
struct unf_event_mgr *event_mgr = NULL;
struct unf_lport *unf_lport = NULL;
struct unf_cm_event_report *cm_event_node = NULL;
ulong flags = 0;
FC_CHECK_RETURN_VOID(lport);
FC_CHECK_RETURN_VOID(event_node);
cm_event_node = (struct unf_cm_event_report *)event_node;
unf_lport = (struct unf_lport *)lport;
unf_lport = unf_lport->root_lport;
event_mgr = &unf_lport->event_mgr;
spin_lock_irqsave(&event_mgr->port_event_lock, flags);
event_mgr->free_event_count++;
unf_init_event_node(cm_event_node);
list_add_tail(&cm_event_node->list_entry, &event_mgr->list_free_event);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flags);
unf_check_event_mgr_status(event_mgr);
}
void unf_release_global_event(void *event_node)
{
ulong flag = 0;
struct unf_cm_event_report *cm_event_node = NULL;
FC_CHECK_RETURN_VOID(event_node);
cm_event_node = (struct unf_cm_event_report *)event_node;
unf_init_event_node(cm_event_node);
spin_lock_irqsave(&global_event_queue.global_event_list_lock, flag);
global_event_queue.list_number++;
list_add_tail(&cm_event_node->list_entry, &global_event_queue.global_event_list);
spin_unlock_irqrestore(&global_event_queue.global_event_list_lock, flag);
}
u32 unf_init_event_center(void *lport)
{
struct unf_event_mgr *event_mgr = NULL;
u32 ret = RETURN_OK;
struct unf_lport *unf_lport = NULL;
FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR);
unf_lport = (struct unf_lport *)lport;
/* Initial Disc manager */
event_mgr = &unf_lport->event_mgr;
event_mgr->free_event_count = UNF_MAX_EVENT_NODE;
event_mgr->unf_get_free_event_func = unf_get_free_event_node;
event_mgr->unf_release_event = unf_release_event;
event_mgr->unf_post_event_func = unf_post_event;
INIT_LIST_HEAD(&event_mgr->list_free_event);
spin_lock_init(&event_mgr->port_event_lock);
event_mgr->emg_completion = NULL;
ret = unf_init_event_msg(unf_lport);
return ret;
}
void unf_wait_event_mgr_complete(struct unf_event_mgr *event_mgr)
{
struct unf_event_mgr *event_mgr_temp = NULL;
bool wait = false;
ulong mg_flag = 0;
struct completion fc_event_completion;
init_completion(&fc_event_completion);
FC_CHECK_RETURN_VOID(event_mgr);
event_mgr_temp = event_mgr;
spin_lock_irqsave(&event_mgr_temp->port_event_lock, mg_flag);
if (event_mgr_temp->free_event_count != UNF_MAX_EVENT_NODE) {
event_mgr_temp->emg_completion = &fc_event_completion;
wait = true;
}
spin_unlock_irqrestore(&event_mgr_temp->port_event_lock, mg_flag);
if (wait)
wait_for_completion(event_mgr_temp->emg_completion);
spin_lock_irqsave(&event_mgr_temp->port_event_lock, mg_flag);
event_mgr_temp->emg_completion = NULL;
spin_unlock_irqrestore(&event_mgr_temp->port_event_lock, mg_flag);
}
u32 unf_event_center_destroy(void *lport)
{
struct unf_event_mgr *event_mgr = NULL;
struct list_head *list = NULL;
struct list_head *list_tmp = NULL;
struct unf_cm_event_report *event_node = NULL;
u32 ret = RETURN_OK;
ulong flag = 0;
ulong list_lock_flag = 0;
struct unf_lport *unf_lport = NULL;
FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR);
unf_lport = (struct unf_lport *)lport;
event_mgr = &unf_lport->event_mgr;
spin_lock_irqsave(&fc_event_list.fc_event_list_lock, list_lock_flag);
if (!list_empty(&fc_event_list.list_head)) {
list_for_each_safe(list, list_tmp, &fc_event_list.list_head) {
event_node = list_entry(list, struct unf_cm_event_report, list_entry);
if (event_node->lport == unf_lport) {
list_del_init(&event_node->list_entry);
if (event_node->event_asy_flag == UNF_EVENT_SYN) {
event_node->result = UNF_RETURN_ERROR;
complete(&event_node->event_comp);
}
spin_lock_irqsave(&event_mgr->port_event_lock, flag);
event_mgr->free_event_count++;
list_add_tail(&event_node->list_entry,
&event_mgr->list_free_event);
spin_unlock_irqrestore(&event_mgr->port_event_lock, flag);
}
}
}
spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, list_lock_flag);
FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) begin to wait event",
unf_lport->port_id);
unf_wait_event_mgr_complete(event_mgr);
FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR,
"[info]Port(0x%x) wait event process end",
unf_lport->port_id);
unf_del_event_center_fun_op(unf_lport);
vfree(event_mgr->mem_add);
event_mgr->mem_add = NULL;
unf_lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER;
return ret;
}
static void unf_procee_asyn_event(struct unf_cm_event_report *event_node)
{
struct unf_lport *lport = NULL;
u32 ret = UNF_RETURN_ERROR;
lport = (struct unf_lport *)event_node->lport;
FC_CHECK_RETURN_VOID(lport);
if (event_node->unf_event_task) {
ret = (u32)event_node->unf_event_task(event_node->para_in,
event_node->para_out);
}
if (lport->event_mgr.unf_release_event)
lport->event_mgr.unf_release_event(lport, event_node);
if (ret != RETURN_OK) {
FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN,
"[warn]Port(0x%x) handle event(0x%x) failed",
lport->port_id, event_node->event);
}
}
void unf_handle_event(struct unf_cm_event_report *event_node)
{
u32 ret = UNF_RETURN_ERROR;
u32 event = 0;
u32 event_asy_flag = UNF_EVENT_ASYN;
FC_CHECK_RETURN_VOID(event_node);
event = event_node->event;
event_asy_flag = event_node->event_asy_flag;
switch (event_asy_flag) {
case UNF_EVENT_SYN: /* synchronous event node */
case UNF_GLOBAL_EVENT_SYN:
if (event_node->unf_event_task)
ret = (u32)event_node->unf_event_task(event_node->para_in,
event_node->para_out);
event_node->result = ret;
complete(&event_node->event_comp);
break;
case UNF_EVENT_ASYN: /* asynchronous event node */
unf_procee_asyn_event(event_node);
break;
case UNF_GLOBAL_EVENT_ASYN:
if (event_node->unf_event_task) {
ret = (u32)event_node->unf_event_task(event_node->para_in,
event_node->para_out);
}
unf_release_global_event(event_node);
if (ret != RETURN_OK) {
FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN,
"[warn]handle global event(0x%x) failed", event);
}
break;
default:
FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN,
"[warn]Unknown event(0x%x)", event);
break;
}
}
u32 unf_init_global_event_msg(void)
{
struct unf_cm_event_report *event_node = NULL;
u32 ret = RETURN_OK;
u32 index = 0;
ulong flag = 0;
INIT_LIST_HEAD(&global_event_queue.global_event_list);
spin_lock_init(&global_event_queue.global_event_list_lock);
global_event_queue.list_number = 0;
global_event_queue.global_event_add = vmalloc(UNF_MAX_GLOBAL_ENENT_NODE *
sizeof(struct unf_cm_event_report));
if (!global_event_queue.global_event_add) {
FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR,
"[err]Can't allocate global event queue");
return UNF_RETURN_ERROR;
}
memset(global_event_queue.global_event_add, 0,
(UNF_MAX_GLOBAL_ENENT_NODE * sizeof(struct unf_cm_event_report)));
event_node = (struct unf_cm_event_report *)(global_event_queue.global_event_add);
spin_lock_irqsave(&global_event_queue.global_event_list_lock, flag);
for (index = 0; index < UNF_MAX_GLOBAL_ENENT_NODE; index++) {
INIT_LIST_HEAD(&event_node->list_entry);
list_add_tail(&event_node->list_entry, &global_event_queue.global_event_list);
global_event_queue.list_number++;
event_node++;
}
spin_unlock_irqrestore(&global_event_queue.global_event_list_lock, flag);
return ret;
}
void unf_destroy_global_event_msg(void)
{
if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) {
FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL,
"[warn]Global event release not complete with remain nodes(0x%x)",
global_event_queue.list_number);
}
vfree(global_event_queue.global_event_add);
}
u32 unf_schedule_global_event(void *para_in, u32 event_asy_flag,
int (*unf_event_task)(void *arg_in, void *arg_out))
{
struct list_head *list_node = NULL;
struct unf_cm_event_report *event_node = NULL;
ulong flag = 0;
u32 ret = UNF_RETURN_ERROR;
spinlock_t *event_list_lock = NULL;
FC_CHECK_RETURN_VALUE(unf_event_task, UNF_RETURN_ERROR);
if (event_asy_flag != UNF_GLOBAL_EVENT_ASYN && event_asy_flag != UNF_GLOBAL_EVENT_SYN) {
FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN,
"[warn]Event async flag(0x%x) abnormity",
event_asy_flag);
return UNF_RETURN_ERROR;
}
event_list_lock = &global_event_queue.global_event_list_lock;
spin_lock_irqsave(event_list_lock, flag);
if (list_empty(&global_event_queue.global_event_list)) {
spin_unlock_irqrestore(event_list_lock, flag);
return UNF_RETURN_ERROR;
}
list_node = UNF_OS_LIST_NEXT(&global_event_queue.global_event_list);
list_del_init(list_node);
global_event_queue.list_number--;
event_node = list_entry(list_node, struct unf_cm_event_report, list_entry);
spin_unlock_irqrestore(event_list_lock, flag);
/* Initial global event */
unf_init_event_node(event_node);
init_completion(&event_node->event_comp);
event_node->event_asy_flag = event_asy_flag;
event_node->unf_event_task = unf_event_task;
event_node->para_in = (void *)para_in;
event_node->para_out = NULL;
unf_post_event(NULL, event_node);
if (event_asy_flag == UNF_GLOBAL_EVENT_SYN) {
/* must wait for complete */
wait_for_completion(&event_node->event_comp);
ret = event_node->result;
unf_release_global_event(event_node);
} else {
ret = RETURN_OK;
}
return ret;
}
struct unf_cm_event_report *unf_get_one_event_node(void *lport)
{
struct unf_lport *unf_lport = (struct unf_lport *)lport;
FC_CHECK_RETURN_VALUE(lport, NULL);
FC_CHECK_RETURN_VALUE(unf_lport->event_mgr.unf_get_free_event_func, NULL);
return unf_lport->event_mgr.unf_get_free_event_func((void *)unf_lport);
}
void unf_post_one_event_node(void *lport, struct unf_cm_event_report *event)
{
struct unf_lport *unf_lport = (struct unf_lport *)lport;
FC_CHECK_RETURN_VOID(lport);
FC_CHECK_RETURN_VOID(event);
FC_CHECK_RETURN_VOID(unf_lport->event_mgr.unf_post_event_func);
FC_CHECK_RETURN_VOID(event);
unf_lport->event_mgr.unf_post_event_func((void *)unf_lport, event);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_EVENT_H
#define UNF_EVENT_H
#include "unf_type.h"
#define UNF_MAX_EVENT_NODE 256
enum unf_event_type {
UNF_EVENT_TYPE_ALARM = 0, /* Alarm */
UNF_EVENT_TYPE_REQUIRE, /* Require */
UNF_EVENT_TYPE_RECOVERY, /* Recovery */
UNF_EVENT_TYPE_BUTT
};
struct unf_cm_event_report {
/* event type */
u32 event;
/* ASY flag */
u32 event_asy_flag;
/* Delay times,must be async event */
u32 delay_times;
struct list_head list_entry;
void *lport;
/* parameter */
void *para_in;
void *para_out;
u32 result;
/* recovery strategy */
int (*unf_event_task)(void *arg_in, void *arg_out);
struct completion event_comp;
};
struct unf_event_mgr {
spinlock_t port_event_lock;
u32 free_event_count;
struct list_head list_free_event;
struct completion *emg_completion;
void *mem_add;
struct unf_cm_event_report *(*unf_get_free_event_func)(void *lport);
void (*unf_release_event)(void *lport, void *event_node);
void (*unf_post_event_func)(void *lport, void *event_node);
};
struct unf_global_event_queue {
void *global_event_add;
u32 list_number;
struct list_head global_event_list;
spinlock_t global_event_list_lock;
};
struct unf_event_list {
struct list_head list_head;
spinlock_t fc_event_list_lock;
u32 list_num; /* list node number */
};
void unf_handle_event(struct unf_cm_event_report *event_node);
u32 unf_init_global_event_msg(void);
void unf_destroy_global_event_msg(void);
u32 unf_schedule_global_event(void *para_in, u32 event_asy_flag,
int (*unf_event_task)(void *arg_in, void *arg_out));
struct unf_cm_event_report *unf_get_one_event_node(void *lport);
void unf_post_one_event_node(void *lport, struct unf_cm_event_report *event);
u32 unf_event_center_destroy(void *lport);
u32 unf_init_event_center(void *lport);
extern struct task_struct *event_task_thread;
extern struct unf_global_event_queue global_event_queue;
extern struct unf_event_list fc_event_list;
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_EXCHG_H
#define UNF_EXCHG_H
#include "unf_type.h"
#include "unf_fcstruct.h"
#include "unf_lport.h"
#include "unf_scsi_common.h"
enum unf_ioflow_id {
XCHG_ALLOC = 0,
TGT_RECEIVE_ABTS,
TGT_ABTS_DONE,
TGT_IO_SRR,
SFS_RESPONSE,
SFS_TIMEOUT,
INI_SEND_CMND,
INI_RESPONSE_DONE,
INI_EH_ABORT,
INI_EH_DEVICE_RESET,
INI_EH_BLS_DONE,
INI_IO_TIMEOUT,
INI_REQ_TIMEOUT,
XCHG_CANCEL_TIMER,
XCHG_FREE_XCHG,
SEND_ELS,
IO_XCHG_WAIT,
XCHG_BUTT
};
enum unf_xchg_type {
UNF_XCHG_TYPE_INI = 0, /* INI IO */
UNF_XCHG_TYPE_SFS = 1,
UNF_XCHG_TYPE_INVALID
};
enum unf_xchg_mgr_type {
UNF_XCHG_MGR_TYPE_RANDOM = 0,
UNF_XCHG_MGR_TYPE_FIXED = 1,
UNF_XCHG_MGR_TYPE_INVALID
};
enum tgt_io_send_stage {
TGT_IO_SEND_STAGE_NONE = 0,
TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */
TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */
TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */
TGT_IO_SEND_STAGE_INVALID
};
enum tgt_io_send_result {
TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */
TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */
TGT_IO_SEND_RESULT_INVALID
};
struct unf_io_flow_id {
char *stage;
};
#define unf_check_oxid_matched(ox_id, oid, xchg) \
(((ox_id) == (xchg)->oxid) && ((oid) == (xchg)->oid) && \
(atomic_read(&(xchg)->ref_cnt) > 0))
#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \
xchg_alloc_time) \
do { \
if (unlikely(((pkg_alloc_time) != 0) && \
((pkg_alloc_time) != (xchg_alloc_time)))) { \
FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, \
"Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not " \
"equal,PKG " \
"AllocTime:0x%x,Exhg AllocTime:0x%x", \
(lport)->port_id, (lport)->nport_id, xchg_tag, \
exchg, pkg_alloc_time, xchg_alloc_time); \
return UNF_RETURN_ERROR; \
}; \
if (unlikely((pkg_alloc_time) == 0)) { \
FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, \
"Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG " \
"AllocTime:0x%x,Exhg AllocTime:0x%x", \
(lport)->port_id, (lport)->nport_id, xchg_tag, \
exchg, pkg_alloc_time, xchg_alloc_time); \
}; \
} while (0)
#define UNF_SET_SCSI_CMND_RESULT(xchg, cmnd_result) \
((xchg)->scsi_cmnd_info.result = (cmnd_result))
#define UNF_GET_GS_SFS_XCHG_TIMER(lport) (3 * (ulong)(lport)->ra_tov)
#define UNF_GET_BLS_SFS_XCHG_TIMER(lport) (2 * (ulong)(lport)->ra_tov)
#define UNF_GET_ELS_SFS_XCHG_TIMER(lport) (2 * (ulong)(lport)->ra_tov)
#define UNF_ELS_ECHO_RESULT_OK 0
#define UNF_ELS_ECHO_RESULT_FAIL 1
struct unf_xchg;
/* Xchg hot pool, busy IO lookup Xchg */
struct unf_xchg_hot_pool {
/* Xchg sum, in hot pool */
u16 total_xchges;
bool wait_state;
/* pool lock */
spinlock_t xchg_hotpool_lock;
/* Xchg posiontion list */
struct list_head sfs_busylist;
struct list_head ini_busylist;
struct list_head list_destroy_xchg;
/* Next free hot point */
u16 slab_next_index;
u16 slab_total_sum;
u16 base;
struct unf_lport *lport;
struct unf_xchg *xchg_slab[ARRAY_INDEX_0];
};
/* Xchg's FREE POOL */
struct unf_xchg_free_pool {
spinlock_t xchg_freepool_lock;
u32 fcp_xchg_sum;
/* IO used Xchg */
struct list_head list_free_xchg_list;
u32 total_fcp_xchg;
/* SFS used Xchg */
struct list_head list_sfs_xchg_list;
u32 total_sfs_xchg;
u32 sfs_xchg_sum;
struct completion *xchg_mgr_completion;
};
struct unf_big_sfs {
struct list_head entry_bigsfs;
void *addr;
u32 size;
};
struct unf_big_sfs_pool {
void *big_sfs_pool;
u32 free_count;
struct list_head list_freepool;
struct list_head list_busypool;
spinlock_t big_sfs_pool_lock;
};
/* Xchg Manager for vport Xchg */
struct unf_xchg_mgr {
/* MG type */
u32 mgr_type;
/* MG entry */
struct list_head xchg_mgr_entry;
/* MG attribution */
u32 mem_szie;
/* MG alloced resource */
void *fcp_mm_start;
u32 sfs_mem_size;
void *sfs_mm_start;
dma_addr_t sfs_phy_addr;
struct unf_xchg_free_pool free_pool;
struct unf_xchg_hot_pool *hot_pool;
struct unf_big_sfs_pool big_sfs_pool;
struct buf_describe big_sfs_buf_list;
};
struct unf_seq {
/* Seq ID */
u8 seq_id;
/* Seq Cnt */
u16 seq_cnt;
/* Seq state and len,maybe used for fcoe */
u16 seq_stat;
u32 rec_data_len;
};
union unf_xchg_fcp_sfs {
struct unf_sfs_entry sfs_entry;
struct unf_fcp_rsp_iu_entry fcp_rsp_entry;
};
#define UNF_IO_STATE_NEW 0
#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */
#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */
#define TGT_IO_STATE_ABORT (1 << 7)
#define INI_IO_STATE_UPTASK \
(1 << 15) /* INI Upper-layer Task Management Commands */
#define INI_IO_STATE_UPABORT \
(1 << 16) /* INI Upper-layer timeout Abort flag \
*/
#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */
#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */
#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */
#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */
/* INI only clear firmware resource flag */
#define INI_IO_STATE_ABORT_RESOURCE (1 << 21)
/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */
#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22)
#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */
#define INI_IO_STATE_LOGO (1 << 24) /* INI busy IO session logo status */
#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */
#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */
#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */
#define TMF_RESPONSE_RECEIVED (1 << 0)
#define MARKER_STS_RECEIVED (1 << 1)
#define ABTS_RESPONSE_RECEIVED (1 << 2)
struct unf_scsi_cmd_info {
ulong time_out;
ulong abort_time_out;
void *scsi_cmnd;
void (*done)(struct unf_scsi_cmnd *scsi_cmd);
ini_get_sgl_entry_buf unf_get_sgl_entry_buf;
struct unf_ini_error_code *err_code_table; /* error code table */
char *sense_buf;
u32 err_code_table_cout; /* Size of the error code table */
u32 buf_len;
u32 entry_cnt;
u32 result; /* Stores command execution results */
u32 port_id;
/* Re-search for rport based on scsiid during retry. Otherwise,
*data inconsistency will occur
*/
u32 scsi_id;
void *sgl;
uplevel_cmd_done uplevel_done;
};
struct unf_req_sgl_info {
void *sgl;
void *sgl_start;
u32 req_index;
u32 entry_index;
};
struct unf_els_echo_info {
u64 response_time;
struct semaphore echo_sync_sema;
u32 echo_result;
};
struct unf_xchg {
/* Mg resource relative */
/* list delete from HotPool */
struct unf_xchg_hot_pool *hot_pool;
/* attach to FreePool */
struct unf_xchg_free_pool *free_pool;
struct unf_xchg_mgr *xchg_mgr;
struct unf_lport *lport; /* Local LPort/VLPort */
struct unf_rport *rport; /* Rmote Port */
struct unf_rport *disc_rport; /* Discover Rmote Port */
struct list_head list_xchg_entry;
struct list_head list_abort_xchg_entry;
spinlock_t xchg_state_lock;
/* Xchg reference */
atomic_t ref_cnt;
atomic_t esgl_cnt;
bool debug_hook;
/* Xchg attribution */
u16 hotpooltag;
u16 abort_oxid;
u32 xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */
u16 oxid;
u16 rxid;
u32 sid;
u32 did;
u32 oid; /* ID of the exchange initiator */
u32 disc_portid; /* Send GNN_ID/GFF_ID NPortId */
u8 seq_id;
u8 byte_orders; /* Byte order */
struct unf_seq seq;
u32 cmnd_code;
u32 world_id;
/* Dif control */
struct unf_dif_control_info dif_control;
struct dif_info dif_info;
/* IO status Abort,timer out */
u32 io_state; /* TGT_IO_STATE_E */
u32 tmf_state; /* TMF STATE */
u32 ucode_abts_state;
u32 abts_state;
/* IO Enqueuing */
enum tgt_io_send_stage io_send_stage; /* tgt_io_send_stage */
/* IO Enqueuing result, success or failure */
enum tgt_io_send_result io_send_result; /* tgt_io_send_result */
u8 io_send_abort; /* is or not send io abort */
/*result of io abort cmd(succ:true; fail:false)*/
u8 io_abort_result;
/* for INI,Indicates the length of the data transmitted over the PCI
* link
*/
u32 data_len;
/* ResidLen,greater than 0 UnderFlow or Less than Overflow */
int resid_len;
/* +++++++++++++++++IO Special++++++++++++++++++++ */
/* point to tgt cmnd/req/scsi cmnd */
/* Fcp cmnd */
struct unf_fcp_cmnd fcp_cmnd;
struct unf_scsi_cmd_info scsi_cmnd_info;
struct unf_req_sgl_info req_sgl_info;
struct unf_req_sgl_info dif_sgl_info;
u64 cmnd_sn;
void *pinitiator;
/* timestamp */
u64 start_jif;
u64 alloc_jif;
u64 io_front_jif;
u32 may_consume_res_cnt;
u32 fast_consume_res_cnt;
/* scsi req info */
u32 data_direction;
struct unf_big_sfs *big_sfs_buf;
/* scsi cmnd sense_buffer pointer */
union unf_xchg_fcp_sfs fcp_sfs_union;
/* One exchange may use several External Sgls */
struct list_head list_esgls;
struct unf_els_echo_info echo_info;
struct semaphore task_sema;
/* for RRQ ,IO Xchg add to SFS Xchg */
void *io_xchg;
/* Xchg delay work */
struct delayed_work timeout_work;
void (*xfer_or_rsp_echo)(struct unf_xchg *xchg, u32 status);
/* wait list XCHG send function */
int (*scsi_or_tgt_cmnd_func)(struct unf_xchg *xchg);
/* send result callback */
void (*ob_callback)(struct unf_xchg *xchg);
/* Response IO callback */
void (*callback)(void *lport, void *rport, void *xchg);
/* Xchg release function */
void (*free_xchg)(struct unf_xchg *xchg);
/* +++++++++++++++++low level Special++++++++++++++++++++ */
/* private data,provide for low level */
u32 private_data[PKG_MAX_PRIVATE_DATA_SIZE];
u64 rport_bind_jifs;
/* sfs exchg ob callback status */
u32 ob_callback_sts;
u32 scsi_id;
u32 qos_level;
void *ls_rsp_addr;
void *ls_req;
u32 status;
atomic_t delay_flag;
void *upper_ct;
};
struct unf_esgl_page *
unf_get_and_add_one_free_esgl_page(struct unf_lport *lport,
struct unf_xchg *xchg);
void unf_release_xchg_mgr_temp(struct unf_lport *lport);
u32 unf_init_xchg_mgr_temp(struct unf_lport *lport);
u32 unf_alloc_xchg_resource(struct unf_lport *lport);
void unf_free_all_xchg_mgr(struct unf_lport *lport);
void unf_xchg_mgr_destroy(struct unf_lport *lport);
u32 unf_xchg_ref_inc(struct unf_xchg *xchg, enum unf_ioflow_id io_stage);
void unf_xchg_ref_dec(struct unf_xchg *xchg, enum unf_ioflow_id io_stage);
struct unf_xchg_mgr *unf_get_xchg_mgr_by_lport(struct unf_lport *lport,
u32 mgr_idx);
struct unf_xchg_hot_pool *unf_get_hot_pool_by_lport(struct unf_lport *lport,
u32 mgr_idx);
void unf_free_lport_ini_xchg(struct unf_xchg_mgr *xchg_mgr, bool done_ini_flag);
struct unf_xchg *unf_cm_lookup_xchg_by_cmnd_sn(void *lport, u64 command_sn,
u32 world_id, void *pinitiator);
void *unf_cm_lookup_xchg_by_id(void *lport, u16 ox_id, u32 oid);
void unf_cm_xchg_abort_by_lun(struct unf_lport *lport, struct unf_rport *rport,
u64 lun_id, void *tm_xchg,
bool abort_all_lun_flag);
void unf_cm_xchg_abort_by_session(struct unf_lport *lport,
struct unf_rport *rport);
void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport *lport,
struct unf_rport *rport, u32 sid, u32 did,
u32 extra_io_stat);
void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport *lport,
struct unf_rport *rport, u32 sid, u32 did);
void unf_cm_free_xchg(void *lport, void *xchg);
void *unf_cm_get_free_xchg(void *lport, u32 xchg_type);
void *unf_cm_lookup_xchg_by_tag(void *lport, u16 hot_pool_tag);
void unf_release_esgls(struct unf_xchg *xchg);
void unf_show_all_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr);
void unf_destroy_dirty_xchg(struct unf_lport *lport, bool show_only);
void unf_wake_up_scsi_task_cmnd(struct unf_lport *lport);
void unf_set_hot_pool_wait_state(struct unf_lport *lport, bool wait_state);
void unf_free_lport_all_xchg(struct unf_lport *lport);
extern u32 unf_get_up_level_cmnd_errcode(struct unf_ini_error_code *err_table,
u32 err_table_count, u32 drv_err_code);
bool unf_busy_io_completed(struct unf_lport *lport);
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_EXCHG_ABORT_H
#define UNF_EXCHG_ABORT_H
#include "unf_type.h"
#include "unf_exchg.h"
#define UNF_RAW_LUN_ID_MASK 0x000000000000ffff
void unf_xchg_abort_by_lun(void *lport, void *rport, u64 lun_id, void *tm_xchg,
bool abort_all_lun_flag);
void unf_xchg_abort_by_session(void *lport, void *rport);
void unf_xchg_mgr_io_xchg_abort(void *lport, void *rport, u32 sid, u32 did,
u32 extra_io_state);
void unf_xchg_mgr_sfs_xchg_abort(void *lport, void *rport, u32 sid, u32 did);
void unf_xchg_abort_all_xchg(void *lport, u32 xchg_type, bool clean);
void unf_fc_abort_time_out_cmnd(struct unf_lport *lport, struct unf_xchg *xchg);
void unf_fc_ini_io_xchg_time_out(struct work_struct *work);
void unf_sfs_xchg_time_out(struct work_struct *work);
void unf_xchg_up_abort_io_by_scsi_id(void *lport, u32 scsi_id);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_FCSTRUCT_H
#define UNF_FCSTRUCT_H
#include "unf_type.h"
#include "unf_scsi_common.h"
#define FC_RCTL_BLS 0x80000000
/*
* * R_CTL Basic Link Data defines
*/
#define FC_RCTL_BLS_ACC (FC_RCTL_BLS | 0x04000000)
#define FC_RCTL_BLS_RJT (FC_RCTL_BLS | 0x05000000)
/*
* * BA_RJT reason code defines
*/
#define FCXLS_BA_RJT_LOGICAL_ERROR 0x00030000
/*
* * BA_RJT code explanation
*/
#define FCXLS_LS_RJT_INVALID_OXID_RXID 0x00001700
/*
* * ELS ACC
*/
struct unf_els_acc {
struct unf_fc_head frame_hdr;
u32 cmnd;
};
/*
* * ELS RJT
*/
struct unf_els_rjt {
struct unf_fc_head frame_hdr;
u32 cmnd;
u32 reason_code;
};
/*
* * FLOGI payload,
* * FC-LS-2 FLOGI, PLOGI, FDISC or LS_ACC Payload
*/
struct unf_flogi_fdisc_payload {
u32 cmnd;
struct unf_fabric_parm fabric_parms;
};
/*
* * Flogi and Flogi accept frames. They are the same structure
*/
struct unf_flogi_fdisc_acc {
struct unf_fc_head frame_hdr;
struct unf_flogi_fdisc_payload flogi_payload;
};
/*
* * Fdisc and Fdisc accept frames. They are the same structure
*/
struct unf_fdisc_acc {
struct unf_fc_head frame_hdr;
struct unf_flogi_fdisc_payload fdisc_payload;
};
/*
* * PLOGI payload
*/
struct unf_plogi_payload {
u32 cmnd;
struct unf_lgn_parm stparms;
};
/*
*Plogi, Plogi accept, Pdisc and Pdisc accept frames. They are all the same
*structure.
*/
struct unf_plogi_pdisc {
struct unf_fc_head frame_hdr;
struct unf_plogi_payload payload;
};
/*
* * LOGO logout link service requests invalidation of service parameters and
* * port name.
* * see FC-PH 4.3 Section 21.4.8
*/
struct unf_logo_payload {
u32 cmnd;
u32 nport_id;
u32 high_port_name;
u32 low_port_name;
};
/*
* * payload to hold LOGO command
*/
struct unf_logo {
struct unf_fc_head frame_hdr;
struct unf_logo_payload payload;
};
/*
* * payload for ECHO command, refer to FC-LS-2 4.2.4
*/
struct unf_echo_payload {
u32 cmnd;
#define UNF_FC_ECHO_PAYLOAD_LENGTH 255 /* Length in words */
u32 data[UNF_FC_ECHO_PAYLOAD_LENGTH];
};
struct unf_echo {
struct unf_fc_head frame_hdr;
struct unf_echo_payload *echo_pld;
dma_addr_t phy_echo_addr;
};
#define UNF_PRLI_SIRT_EXTRA_SIZE 12
/*
* * payload for PRLI and PRLO
*/
struct unf_prli_payload {
u32 cmnd;
#define UNF_FC_PRLI_PAYLOAD_LENGTH 7 /* Length in words */
u32 parms[UNF_FC_PRLI_PAYLOAD_LENGTH];
};
/*
* * FCHS structure with payload
*/
struct unf_prli_prlo {
struct unf_fc_head frame_hdr;
struct unf_prli_payload payload;
};
struct unf_adisc_payload {
u32 cmnd;
u32 hard_address;
u32 high_port_name;
u32 low_port_name;
u32 high_node_name;
u32 low_node_name;
u32 nport_id;
};
/*
* * FCHS structure with payload
*/
struct unf_adisc {
struct unf_fc_head frame_hdr; /* FCHS structure */
struct unf_adisc_payload
adisc_payl; /* Payload data containing ADISC info
*/
};
/*
* * RLS payload
*/
struct unf_rls_payload {
u32 cmnd;
u32 nport_id; /* in litle endian format */
};
/*
* * RLS
*/
struct unf_rls {
struct unf_fc_head frame_hdr; /* FCHS structure */
struct unf_rls_payload rls; /* payload data containing the RLS info */
};
/*
* * RLS accept payload
*/
struct unf_rls_acc_payload {
u32 cmnd;
u32 link_failure_count;
u32 loss_of_sync_count;
u32 loss_of_signal_count;
u32 primitive_seq_count;
u32 invalid_trans_word_count;
u32 invalid_crc_count;
};
/*
* * RLS accept
*/
struct unf_rls_acc {
struct unf_fc_head frame_hdr; /* FCHS structure */
struct unf_rls_acc_payload
rls; /* payload data containing the RLS ACC info
*/
};
/*
* * FCHS structure with payload
*/
struct unf_rrq {
struct unf_fc_head frame_hdr;
u32 cmnd;
u32 sid;
u32 oxid_rxid;
};
#define UNF_SCR_PAYLOAD_CNT 2
struct unf_scr {
struct unf_fc_head frame_hdr;
u32 payload[UNF_SCR_PAYLOAD_CNT];
};
struct unf_ctiu_prem {
u32 rev_inid;
u32 gstype_gssub_options;
u32 cmnd_rsp_size;
u32 frag_reason_exp_vend;
};
#define UNF_FC4TYPE_CNT 8
struct unf_rftid {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 nport_id;
u32 fc4_types[UNF_FC4TYPE_CNT];
};
struct unf_rffid {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 nport_id;
u32 fc4_feature;
};
struct unf_rffid_rsp {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
};
struct unf_gffid {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 nport_id;
};
struct unf_gffid_rsp {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 fc4_feature[32];
};
struct unf_gnnid {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 nport_id;
};
struct unf_gnnid_rsp {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 node_name[2];
};
struct unf_gpnid {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 nport_id;
};
struct unf_gpnid_rsp {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
u32 port_name[2];
};
struct unf_rft_rsp {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
};
struct unf_ls_rjt_pld {
u32 srr_op; /* 01000000h */
u8 vandor;
u8 reason_exp;
u8 reason;
u8 reserved;
};
struct unf_ls_rjt {
struct unf_fc_head frame_hdr;
struct unf_ls_rjt_pld pld;
};
struct unf_rec_pld {
u32 rec_cmnd;
u32 xchg_org_sid; /* bit0-bit23 */
u16 rx_id;
u16 ox_id;
};
struct unf_rec {
struct unf_fc_head frame_hdr;
struct unf_rec_pld rec_pld;
};
struct unf_rec_acc_pld {
u32 cmnd;
u16 rx_id;
u16 ox_id;
u32 org_addr_id; /* bit0-bit23 */
u32 rsp_addr_id; /* bit0-bit23 */
};
struct unf_rec_acc {
struct unf_fc_head frame_hdr;
struct unf_rec_acc_pld payload;
};
struct unf_gid {
struct unf_ctiu_prem ctiu_pream;
u32 scope_type;
};
struct unf_gid_acc {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
};
#define UNF_LOOPMAP_COUNT 128
struct unf_loop_init {
struct unf_fc_head frame_hdr;
u32 cmnd;
#define UNF_FC_ALPA_BIT_MAP_SIZE 4
u32 alpha_bit_map[UNF_FC_ALPA_BIT_MAP_SIZE];
};
struct unf_loop_map {
struct unf_fc_head frame_hdr;
u32 cmnd;
u32 loop_map[32];
};
struct unf_ctiu_rjt {
struct unf_fc_head frame_hdr;
struct unf_ctiu_prem ctiu_pream;
};
struct unf_gid_acc_pld {
struct unf_ctiu_prem ctiu_pream;
u32 gid_port_id[UNF_GID_PORT_CNT];
};
struct unf_gid_rsp {
struct unf_gid_acc_pld *gid_acc_pld;
};
struct unf_gid_req_rsp {
struct unf_fc_head frame_hdr;
struct unf_gid gid_req;
struct unf_gid_rsp gid_rsp;
};
/* FC-LS-2 Table 31 RSCN Payload */
struct unf_rscn_port_id_page {
u8 port_id_port;
u8 port_id_area;
u8 port_id_domain;
u8 addr_format : 2;
u8 event_qualifier : 4;
u8 reserved : 2;
};
struct unf_rscn_pld {
u32 cmnd;
struct unf_rscn_port_id_page port_id_page[UNF_RSCN_PAGE_SUM];
};
struct unf_rscn {
struct unf_fc_head frame_hdr;
struct unf_rscn_pld *rscn_pld;
};
union unf_sfs_u {
struct {
struct unf_fc_head frame_head;
u8 data[0];
} sfs_common;
struct unf_els_acc els_acc;
struct unf_els_rjt els_rjt;
struct unf_plogi_pdisc plogi;
struct unf_logo logo;
struct unf_echo echo;
struct unf_echo echo_acc;
struct unf_prli_prlo prli;
struct unf_prli_prlo prlo;
struct unf_rls rls;
struct unf_rls_acc rls_acc;
struct unf_plogi_pdisc pdisc;
struct unf_adisc adisc;
struct unf_rrq rrq;
struct unf_flogi_fdisc_acc flogi;
struct unf_fdisc_acc fdisc;
struct unf_scr scr;
struct unf_rec rec;
struct unf_rec_acc rec_acc;
struct unf_ls_rjt ls_rjt;
struct unf_rscn rscn;
struct unf_gid_req_rsp get_id;
struct unf_rftid rft_id;
struct unf_rft_rsp rft_id_rsp;
struct unf_rffid rff_id;
struct unf_rffid_rsp rff_id_rsp;
struct unf_gffid gff_id;
struct unf_gffid_rsp gff_id_rsp;
struct unf_gnnid gnn_id;
struct unf_gnnid_rsp gnn_id_rsp;
struct unf_gpnid gpn_id;
struct unf_gpnid_rsp gpn_id_rsp;
struct unf_plogi_pdisc plogi_acc;
struct unf_plogi_pdisc pdisc_acc;
struct unf_adisc adisc_acc;
struct unf_prli_prlo prli_acc;
struct unf_prli_prlo prlo_acc;
struct unf_flogi_fdisc_acc flogi_acc;
struct unf_fdisc_acc fdisc_acc;
struct unf_loop_init lpi;
struct unf_loop_map loop_map;
struct unf_ctiu_rjt ctiu_rjt;
};
struct unf_sfs_entry {
union unf_sfs_u *fc_sfs_entry_ptr; /* Virtual addr of SFS buffer */
u64 sfs_buff_phy_addr; /* Physical addr of SFS buffer */
u32 sfs_buff_len; /* Length of bytes in SFS buffer */
u32 cur_offset;
};
struct unf_fcp_rsp_iu_entry {
u8 *fcp_rsp_iu;
u32 fcp_sense_len;
};
struct unf_rjt_info {
u32 els_cmnd_code;
u32 reason_code;
u32 reason_explanation;
u8 class_mode;
u8 ucrsvd[3];
};
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_GS_H
#define UNF_GS_H
#include "unf_type.h"
#include "unf_lport.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
u32 unf_send_scr(struct unf_lport *lport,
struct unf_rport *rport);
u32 unf_send_ctpass_thru(struct unf_lport *lport,
void *buffer, u32 bufflen);
u32 unf_send_gid_ft(struct unf_lport *lport,
struct unf_rport *rport);
u32 unf_send_gid_pt(struct unf_lport *lport,
struct unf_rport *rport);
u32 unf_send_gpn_id(struct unf_lport *lport,
struct unf_rport *sns_port, u32 nport_id);
u32 unf_send_gnn_id(struct unf_lport *lport,
struct unf_rport *sns_port, u32 nport_id);
u32 unf_send_gff_id(struct unf_lport *lport,
struct unf_rport *sns_port, u32 nport_id);
u32 unf_send_rff_id(struct unf_lport *lport,
struct unf_rport *rport, u32 fc4_type);
u32 unf_send_rft_id(struct unf_lport *lport,
struct unf_rport *rport);
void unf_rcv_gnn_id_rsp_unknown(struct unf_lport *lport,
struct unf_rport *sns_port, u32 nport_id);
void unf_rcv_gpn_id_rsp_unknown(struct unf_lport *lport, u32 nport_id);
void unf_rcv_gff_id_rsp_unknown(struct unf_lport *lport, u32 nport_id);
void unf_check_rport_need_delay_plogi(struct unf_lport *lport,
struct unf_rport *rport, u32 port_feature);
struct send_com_trans_in {
unsigned char port_wwn[8];
u32 req_buffer_count;
unsigned char req_buffer[ARRAY_INDEX_1];
};
struct send_com_trans_out {
u32 hba_status;
u32 total_resp_buffer_cnt;
u32 actual_resp_buffer_cnt;
unsigned char resp_buffer[ARRAY_INDEX_1];
};
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif
此差异已折叠。
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_IO_H
#define UNF_IO_H
#include "unf_type.h"
#include "unf_scsi_common.h"
#include "unf_exchg.h"
#include "unf_rport.h"
#define UNF_MAX_TARGET_NUMBER 2048
#define UNF_DEFAULT_MAX_LUN 0xFFFF
#define UNF_MAX_DMA_SEGS 0x400
#define UNF_MAX_SCSI_CMND_LEN 16
#define UNF_MAX_BUS_CHANNEL 0
#define UNF_DMA_BOUNDARY 0xffffffffffffffff
#define UNF_MAX_CMND_PER_LUN 64 /* LUN max command */
#define UNF_CHECK_LUN_ID_MATCH(lun_id, raw_lun_id, scsi_id, xchg) \
(((lun_id) == (raw_lun_id) || (lun_id) == INVALID_VALUE64) && \
((scsi_id) == (xchg)->scsi_id))
#define NO_SENSE 0x00
#define RECOVERED_ERROR 0x01
#define NOT_READY 0x02
#define MEDIUM_ERROR 0x03
#define HARDWARE_ERROR 0x04
#define ILLEGAL_REQUEST 0x05
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
#define COPY_ABORTED 0x0a
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
#define MISCOMPARE 0x0e
#define SENSE_DATA_RESPONSE_CODE 0x70
#define ADDITINONAL_SENSE_LEN 0x7
extern u32 sector_size_flag;
#define UNF_GET_SCSI_HOST_ID_BY_CMND(cmd) ((cmd)->scsi_host_id)
#define UNF_GET_SCSI_ID_BY_CMND(cmd) ((cmd)->scsi_id)
#define UNF_GET_HOST_PORT_BY_CMND(cmd) ((cmd)->drv_private)
#define UNF_GET_FCP_CMND(cmd) ((cmd)->pcmnd[ARRAY_INDEX_0])
#define UNF_GET_DATA_LEN(cmd) ((cmd)->transfer_len)
#define UNF_GET_DATA_DIRECTION(cmd) ((cmd)->data_direction)
#define UNF_GET_HOST_CMND(cmd) ((cmd)->upper_cmnd)
#define UNF_GET_CMND_DONE_FUNC(cmd) ((cmd)->done)
#define UNF_GET_UP_LEVEL_CMND_DONE(cmd) ((cmd)->uplevel_done)
#define UNF_GET_SGL_ENTRY_BUF_FUNC(cmd) ((cmd)->unf_ini_get_sgl_entry)
#define UNF_GET_SENSE_BUF_ADDR(cmd) ((cmd)->sense_buf)
#define UNF_GET_ERR_CODE_TABLE(cmd) ((cmd)->err_code_table)
#define UNF_GET_ERR_CODE_TABLE_COUNT(cmd) ((cmd)->err_code_table_cout)
#define UNF_SET_HOST_CMND(cmd, host_cmd) ((cmd)->upper_cmnd = (host_cmd))
#define UNF_SER_CMND_DONE_FUNC(cmd, pfn) ((cmd)->done = (pfn))
#define UNF_SET_UP_LEVEL_CMND_DONE_FUNC(cmd, pfn) ((cmd)->uplevel_done = (pfn))
#define UNF_SET_RESID(cmd, uiresid) ((cmd)->resid = (uiresid))
#define UNF_SET_CMND_RESULT(cmd, uiresult) ((cmd)->result = ((int)(uiresult)))
#define UNF_DONE_SCSI_CMND(cmd) ((cmd)->done(cmd))
#define UNF_GET_CMND_SGL(cmd) ((cmd)->sgl)
#define UNF_INI_GET_DIF_SGL(cmd) ((cmd)->dif_control.dif_sgl)
u32 unf_ini_scsi_completed(void *lport, struct unf_frame_pkg *pkg);
u32 unf_ini_get_sgl_entry(void *pkg, char **buf, u32 *buf_len);
u32 unf_ini_get_dif_sgl_entry(void *pkg, char **buf, u32 *buf_len);
void unf_complete_cmnd(struct unf_scsi_cmnd *scsi_cmnd, u32 result_size);
void unf_done_ini_xchg(struct unf_xchg *xchg);
u32 unf_tmf_timeout_recovery_special(void *rport, void *xchg);
u32 unf_tmf_timeout_recovery_default(void *rport, void *xchg);
void unf_abts_timeout_recovery_default(void *rport, void *xchg);
int unf_cm_queue_command(struct unf_scsi_cmnd *scsi_cmnd);
int unf_cm_eh_abort_handler(struct unf_scsi_cmnd *scsi_cmnd);
int unf_cm_eh_device_reset_handler(struct unf_scsi_cmnd *scsi_cmnd);
int unf_cm_target_reset_handler(struct unf_scsi_cmnd *scsi_cmnd);
int unf_cm_bus_reset_handler(struct unf_scsi_cmnd *scsi_cmnd);
int unf_cm_virtual_reset_handler(struct unf_scsi_cmnd *scsi_cmnd);
struct unf_rport *unf_find_rport_by_scsi_id(struct unf_lport *lport,
struct unf_ini_error_code *errcode_table,
u32 errcode_table_count,
u32 scsi_id, u32 *scsi_result);
u32 UNF_IOExchgDelayProcess(struct unf_lport *lport, struct unf_xchg *xchg);
struct unf_lport *unf_find_lport_by_scsi_cmd(struct unf_scsi_cmnd *scsi_cmnd);
int unf_send_scsi_mgmt_cmnd(struct unf_xchg *xchg, struct unf_lport *lport,
struct unf_rport *rport,
struct unf_scsi_cmnd *scsi_cmnd,
enum unf_task_mgmt_cmd task_mgnt_cmd_type);
void unf_tmf_abnormal_recovery(struct unf_lport *lport, struct unf_rport *rport,
struct unf_xchg *xchg);
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_IO_ABNORMAL_H
#define UNF_IO_ABNORMAL_H
#include "unf_type.h"
#include "unf_lport.h"
#include "unf_exchg.h"
#define UNF_GET_LL_ERR(pkg) (((pkg)->status) >> 16)
void unf_process_scsi_mgmt_result(struct unf_frame_pkg *pkg,
struct unf_xchg *xchg);
u32 unf_hardware_start_io(struct unf_lport *lport, struct unf_frame_pkg *pkg);
u32 unf_recv_abts_marker_status(void *lport, struct unf_frame_pkg *pkg);
u32 unf_recv_tmf_marker_status(void *lport, struct unf_frame_pkg *pkg);
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_LS_H
#define UNF_LS_H
#include "unf_type.h"
#include "unf_exchg.h"
#include "unf_rport.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
u32 unf_send_adisc(struct unf_lport *lport, struct unf_rport *rport);
u32 unf_send_pdisc(struct unf_lport *lport, struct unf_rport *rport);
u32 unf_send_flogi(struct unf_lport *lport, struct unf_rport *rport);
u32 unf_send_fdisc(struct unf_lport *lport, struct unf_rport *rport);
u32 unf_send_plogi(struct unf_lport *lport, struct unf_rport *rport);
u32 unf_send_prli(struct unf_lport *lport, struct unf_rport *rport,
u32 cmnd_code);
u32 unf_send_prlo(struct unf_lport *lport, struct unf_rport *rport);
u32 unf_send_logo(struct unf_lport *lport, struct unf_rport *rport);
u32 unf_send_logo_by_did(struct unf_lport *lport, u32 did);
u32 unf_send_echo(struct unf_lport *lport, struct unf_rport *rport, u32 *time);
u32 unf_send_plogi_rjt_by_did(struct unf_lport *lport, u32 did);
u32 unf_send_rrq(struct unf_lport *lport, struct unf_rport *rport,
struct unf_xchg *xchg);
void unf_flogi_ob_callback(struct unf_xchg *xchg);
void unf_flogi_callback(void *lport, void *rport, void *xchg);
void unf_fdisc_ob_callback(struct unf_xchg *xchg);
void unf_fdisc_callback(void *lport, void *rport, void *xchg);
void unf_plogi_ob_callback(struct unf_xchg *xchg);
void unf_plogi_callback(void *lport, void *rport, void *xchg);
void unf_prli_ob_callback(struct unf_xchg *xchg);
void unf_prli_callback(void *lport, void *rport, void *xchg);
u32 unf_flogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_plogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_rec_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_prli_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_prlo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_rscn_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_logo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_echo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_pdisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_send_pdisc_rjt(struct unf_lport *lport, struct unf_rport *rport,
struct unf_xchg *xchg);
u32 unf_adisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_rrq_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg);
u32 unf_send_rec(struct unf_lport *lport, struct unf_rport *rport,
struct unf_xchg *io_xchg);
u32 unf_low_level_bb_scn(struct unf_lport *lport);
typedef int (*unf_event_task)(void *arg_in, void *arg_out);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __UNF_SERVICE_H__ */
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_NPIV_H
#define UNF_NPIV_H
#include "unf_type.h"
#include "unf_common.h"
#include "unf_lport.h"
/* product VPORT configure */
struct vport_config {
u64 node_name;
u64 port_name;
u32 port_mode; /* INI, TGT or both */
};
/* product Vport function */
#define PORTID_VPINDEX_MASK 0xff000000
#define PORTID_VPINDEX_SHIT 24
u32 unf_npiv_conf(u32 port_id, u64 wwpn, enum unf_rport_qos_level qos_level);
struct unf_lport *unf_creat_vport(struct unf_lport *lport,
struct vport_config *vport_config);
u32 unf_delete_vport(u32 port_id, u32 vp_index);
/* Vport pool creat and release function */
u32 unf_init_vport_pool(struct unf_lport *lport);
void unf_free_vport_pool(struct unf_lport *lport);
/* Lport resigster stLPortMgTemp function */
void unf_vport_remove(void *vport);
void unf_vport_ref_dec(struct unf_lport *vport);
/* linkdown all Vport after receive linkdown event */
void unf_linkdown_all_vports(void *lport);
/* Lport receive Flogi Acc linkup all Vport */
void unf_linkup_all_vports(struct unf_lport *lport);
/* Lport remove delete all Vport */
void unf_destroy_all_vports(struct unf_lport *lport);
void unf_vport_fabric_logo(struct unf_lport *vport);
u32 unf_destroy_one_vport(struct unf_lport *vport);
u32 unf_drop_vport(struct unf_lport *vport);
u32 unf_init_vport_mgr_temp(struct unf_lport *lport);
void unf_release_vport_mgr_temp(struct unf_lport *lport);
struct unf_lport *unf_get_vport_by_slab_index(struct unf_vport_pool *vport_pool,
u16 slab_index);
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef UNF_NPIV_PORTMAN_H
#define UNF_NPIV_PORTMAN_H
#include "unf_type.h"
#include "unf_lport.h"
/* Lport resigster stLPortMgTemp function */
void *unf_lookup_vport_by_index(void *lport, u16 vp_index);
void *unf_lookup_vport_by_portid(void *lport, u32 port_id);
void *unf_lookup_vport_by_did(void *lport, u32 did);
void *unf_lookup_vport_by_wwpn(void *lport, u64 wwpn);
void unf_linkdown_one_vport(struct unf_lport *vport);
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册