提交 a8f304a8 编写于 作者: Y Yanling Song 提交者: Zheng Zengkai

scsi: spfc: Remove redundant mask and spinlock

Ramaxel inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4UA67
CVE: NA

----------------------------------------------

Fix:
1.Remove UNF_ORIGIN_HOTTAG_MASK and UNF_HOTTAG_FLAG
2.Update some output string
3.Remove spinlock protect in free_parent_sq() because there is
  spinlock protect in caller function free_parent_queue_info()
Signed-off-by: NYanling Song <songyl@ramaxel.com>
Reviewed-by: NYun Xu <xuyun@ramaxel.com>
Acked-by: NXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 ddb0a54d
......@@ -12,8 +12,6 @@
#define SPFC_DRV_DESC "Ramaxel Memory Technology Fibre Channel Driver"
#define UNF_MAX_SECTORS 0xffff
#define UNF_ORIGIN_HOTTAG_MASK 0x7fff
#define UNF_HOTTAG_FLAG (1 << 15)
#define UNF_PKG_FREE_OXID 0x0
#define UNF_PKG_FREE_RXID 0x1
......
......@@ -890,8 +890,7 @@ static int unf_send_fcpcmnd(struct unf_lport *lport, struct unf_rport *rport,
unf_xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME];
pkg.private_data[PKG_PRIVATE_XCHG_VP_INDEX] = unf_lport->vp_index;
pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = unf_rport->rport_index;
pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] =
unf_xchg->hotpooltag | UNF_HOTTAG_FLAG;
pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = unf_xchg->hotpooltag;
unf_select_sq(unf_xchg, &pkg);
pkg.fcp_cmnd = &unf_xchg->fcp_cmnd;
......
......@@ -763,7 +763,7 @@ int unf_send_scsi_mgmt_cmnd(struct unf_xchg *xchg, struct unf_lport *lport,
pkg.xchg_contex = unf_xchg;
pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index;
pkg.fcp_cmnd = &unf_xchg->fcp_cmnd;
pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = unf_xchg->hotpooltag | UNF_HOTTAG_FLAG;
pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = unf_xchg->hotpooltag;
pkg.frame_head.csctl_sid = lport->nport_id;
pkg.frame_head.rctl_did = rport->nport_id;
......
......@@ -352,7 +352,7 @@ struct unf_rport *unf_find_valid_rport(struct unf_lport *lport, u64 wwpn, u32 si
spin_unlock_irqrestore(rport_state_lock, flags);
FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO,
"[err]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid",
"[info]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid",
lport->port_id, rport_by_wwpn, wwpn);
rport_by_wwpn = NULL;
......
......@@ -130,7 +130,7 @@ void unf_fill_package(struct unf_frame_pkg *pkg, struct unf_xchg *xchg,
pkg->private_data[PKG_PRIVATE_RPORT_RX_SIZE] = rport->max_frame_size;
}
pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag | UNF_HOTTAG_FLAG;
pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag;
pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] =
xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME];
pkg->private_data[PKG_PRIVATE_LOWLEVEL_XCHG_ADD] =
......@@ -250,7 +250,7 @@ u32 unf_send_abts(struct unf_lport *lport, struct unf_xchg *xchg)
pkg.unf_cmnd_pload_bl.buffer_ptr = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr;
pkg.unf_cmnd_pload_bl.buf_dma_addr = xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr;
pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag | UNF_HOTTAG_FLAG;
pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag;
UNF_SET_XCHG_ALLOC_TIME(&pkg, xchg);
UNF_SET_ABORT_INFO_IOTYPE(&pkg, xchg);
......@@ -407,19 +407,10 @@ static u32 unf_els_cmnd_default_handler(struct unf_lport *lport, struct unf_xchg
rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED;
unf_rport = unf_get_rport_by_nport_id(lport, sid);
if (unf_rport) {
if (unf_rport->rport_index !=
xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]) {
FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x_0x%x) NPort handle(0x%x) from low level is not equal to RPort index(0x%x)",
lport->port_id, lport->nport_id,
xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX],
unf_rport->rport_index);
}
if (unf_rport)
ret = unf_send_els_rjt_by_rport(lport, xchg, unf_rport, &rjt_info);
} else {
else
ret = unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info);
}
return ret;
}
......@@ -1389,7 +1380,7 @@ static void unf_fill_free_xid_pkg(struct unf_xchg *xchg, struct unf_frame_pkg *p
pkg->frame_head.csctl_sid = xchg->sid;
pkg->frame_head.rctl_did = xchg->did;
pkg->frame_head.oxid_rxid = (u32)(((u32)xchg->oxid << UNF_SHIFT_16) | xchg->rxid);
pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag | UNF_HOTTAG_FLAG;
pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag;
UNF_SET_XCHG_ALLOC_TIME(pkg, xchg);
if (xchg->xchg_type == UNF_XCHG_TYPE_SFS) {
......
......@@ -56,7 +56,7 @@ static struct unf_cfg_item spfc_port_cfg_parm[] = {
{"port_topology", 0, 0xf, 0x20},
{"port_alpa", 0, 0xdead, 0xffff}, /* alpa address of port */
/* queue depth of originator registered to SCSI midlayer */
{"max_queue_depth", 0, 128, 128},
{"max_queue_depth", 0, 512, 512},
{"sest_num", 0, 2048, 2048},
{"max_login", 0, 2048, 2048},
/* nodename from 32 bit to 64 bit */
......
......@@ -1138,7 +1138,7 @@ u32 spfc_scq_recv_iresp(struct spfc_hba_info *hba, union spfc_scqe *wqe)
pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = iresp->magic_num;
pkg.frame_head.oxid_rxid = (((iresp->wd0.ox_id) << UNF_SHIFT_16) | (iresp->wd0.rx_id));
hot_tag = (u16)iresp->wd2.hotpooltag & UNF_ORIGIN_HOTTAG_MASK;
hot_tag = (u16)iresp->wd2.hotpooltag;
/* 2. HotTag validity check */
if (likely(hot_tag >= hba->exi_base && (hot_tag < hba->exi_base + hba->exi_count))) {
pkg.status = UNF_IO_SUCCESS;
......
......@@ -2138,11 +2138,9 @@ static void spfc_free_parent_sq(struct spfc_hba_info *hba,
u32 uidelaycnt = 0;
struct list_head *list = NULL;
struct spfc_suspend_sqe_info *suspend_sqe = NULL;
ulong flag = 0;
sq_info = &parq_info->parent_sq_info;
spin_lock_irqsave(&parq_info->parent_queue_state_lock, flag);
while (!list_empty(&sq_info->suspend_sqe_list)) {
list = UNF_OS_LIST_NEXT(&sq_info->suspend_sqe_list);
list_del(list);
......@@ -2156,7 +2154,6 @@ static void spfc_free_parent_sq(struct spfc_hba_info *hba,
kfree(suspend_sqe);
}
}
spin_unlock_irqrestore(&parq_info->parent_queue_state_lock, flag);
/* Free data cos */
spfc_update_cos_rport_cnt(hba, parq_info->queue_data_cos);
......@@ -4475,9 +4472,7 @@ void spfc_free_parent_queue_info(void *handle, struct spfc_parent_queue_info *pa
* with the sq in the queue of the parent
*/
spin_unlock_irqrestore(prtq_state_lock, flag);
spfc_free_parent_sq(hba, parent_queue_info);
spin_lock_irqsave(prtq_state_lock, flag);
/* The initialization of all queue id is invalid */
parent_queue_info->parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32;
......
......@@ -742,7 +742,7 @@ u32 spfc_scq_recv_abts_rsp(struct spfc_hba_info *hba, union spfc_scqe *scqe)
ox_id = (u32)(abts_rsp->wd0.ox_id);
hot_tag = abts_rsp->wd1.hotpooltag & UNF_ORIGIN_HOTTAG_MASK;
hot_tag = abts_rsp->wd1.hotpooltag;
if (unlikely(hot_tag < (u32)hba->exi_base ||
hot_tag >= (u32)(hba->exi_base + hba->exi_count))) {
FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR,
......@@ -1210,7 +1210,7 @@ u32 spfc_scq_recv_ls_gs_rsp(struct spfc_hba_info *hba, union spfc_scqe *scqe)
spfc_swap_16_in_32((u32 *)ls_gs_rsp_scqe->user_id, SPFC_LS_GS_USERID_LEN);
ox_id = ls_gs_rsp_scqe->wd1.ox_id;
hot_tag = ((u16)(ls_gs_rsp_scqe->wd5.hotpooltag) & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base;
hot_tag = ((u16)ls_gs_rsp_scqe->wd5.hotpooltag) - hba->exi_base;
pkg.frame_head.oxid_rxid = (u32)(ls_gs_rsp_scqe->wd1.rx_id) | ox_id << UNF_SHIFT_16;
pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = ls_gs_rsp_scqe->magic_num;
pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag;
......@@ -1317,8 +1317,7 @@ u32 spfc_scq_recv_els_rsp_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe)
pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] =
els_rsp_sts_scqe->magic_num;
pkg.frame_head.oxid_rxid = rx_id | (u32)(els_rsp_sts_scqe->wd0.ox_id) << UNF_SHIFT_16;
hot_tag = (u32)((els_rsp_sts_scqe->wd1.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) -
hba->exi_base);
hot_tag = (u32)(els_rsp_sts_scqe->wd1.hotpooltag - hba->exi_base);
if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe)))
pkg.status = UNF_IO_FAILED;
......@@ -1759,7 +1758,7 @@ u32 spfc_scq_recv_marker_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe)
tmf_marker_sts_scqe = &scqe->itmf_marker_sts;
ox_id = (u32)tmf_marker_sts_scqe->wd1.ox_id;
rx_id = (u32)tmf_marker_sts_scqe->wd1.rx_id;
hot_tag = (tmf_marker_sts_scqe->wd4.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base;
hot_tag = tmf_marker_sts_scqe->wd4.hotpooltag - hba->exi_base;
pkg.frame_head.oxid_rxid = rx_id | (u32)(ox_id) << UNF_SHIFT_16;
pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = tmf_marker_sts_scqe->magic_num;
pkg.frame_head.csctl_sid = tmf_marker_sts_scqe->wd3.sid;
......@@ -1800,7 +1799,7 @@ u32 spfc_scq_recv_abts_marker_sts(struct spfc_hba_info *hba, union spfc_scqe *sc
ox_id = (u32)abts_marker_sts_scqe->wd1.ox_id;
rx_id = (u32)abts_marker_sts_scqe->wd1.rx_id;
hot_tag = (abts_marker_sts_scqe->wd4.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base;
hot_tag = abts_marker_sts_scqe->wd4.hotpooltag - hba->exi_base;
pkg.frame_head.oxid_rxid = rx_id | (u32)(ox_id) << UNF_SHIFT_16;
pkg.frame_head.csctl_sid = abts_marker_sts_scqe->wd3.sid;
pkg.frame_head.rctl_did = abts_marker_sts_scqe->wd2.did;
......@@ -1972,8 +1971,7 @@ u32 spfc_scq_free_xid_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe)
rx_id = (u32)free_xid_sts_scqe->wd0.rx_id;
if (free_xid_sts_scqe->wd1.hotpooltag != INVALID_VALUE16) {
hot_tag = (free_xid_sts_scqe->wd1.hotpooltag &
UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base;
hot_tag = free_xid_sts_scqe->wd1.hotpooltag - hba->exi_base;
}
FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO,
......@@ -1998,7 +1996,7 @@ u32 spfc_scq_exchg_timeout_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe)
rx_id = (u32)time_out_scqe->wd0.rx_id;
if (time_out_scqe->wd1.hotpooltag != INVALID_VALUE16)
hot_tag = (time_out_scqe->wd1.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base;
hot_tag = time_out_scqe->wd1.hotpooltag - hba->exi_base;
FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO,
"Port(0x%x) recv timer time out sts hotpooltag(0x%x) magicnum(0x%x) ox_id(0x%x) rxid(0x%x) sts(%d)",
......@@ -2054,7 +2052,7 @@ u32 spfc_scq_rcv_sq_nop_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe)
FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO,
"[info]Port(0x%x) rport_index(0x%x) find suspend sqe.",
hba->port_cfg.port_id, rport_index);
if (sqn < sqn_max) {
if ((sqn < sqn_max) && (sqn >= sqn_base)) {
ret = spfc_send_nop_cmd(hba, parent_sq_info, magic_num, sqn + 1);
} else if (sqn == sqn_max) {
if (!cancel_delayed_work(&suspend_sqe->timeout_work)) {
......@@ -2065,6 +2063,10 @@ u32 spfc_scq_rcv_sq_nop_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe)
parent_sq_info->need_offloaded = suspend_sqe->old_offload_sts;
ret = spfc_pop_suspend_sqe(hba, prt_qinfo, suspend_sqe);
kfree(suspend_sqe);
} else {
FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN,
"[warn]Port(0x%x) rport(0x%x) rcv error sqn(0x%x)",
hba->port_cfg.port_id, rport_index, sqn);
}
} else {
FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册