提交 08c048e9 编写于 作者: C Chenguangli 提交者: Yang Yingliang

scsi/hifc: add hifc driver chip resource module

driver inclusion
category: feature
bugzilla: NA

-----------------------------------------------------------------------

This module includes cfg, cqm, hwdev, hwif, mgmt, sml. and are mainly used to
initialize chip capabilityes and to initialize resources for communication between
drivers and chip.
Signed-off-by: NChenguangli <chenguangli2@huawei.com>
Acked-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 a8d21c00
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_API_CMD_H_
#define HIFC_API_CMD_H_
#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0
#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16
#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24
#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU
#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU
#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU
#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU
#define HIFC_API_CMD_CELL_CTRL_SET(val, member) \
((((u64)val) & HIFC_API_CMD_CELL_CTRL_##member##_MASK) << \
HIFC_API_CMD_CELL_CTRL_##member##_SHIFT)
#define HIFC_API_CMD_DESC_API_TYPE_SHIFT 0
#define HIFC_API_CMD_DESC_RD_WR_SHIFT 1
#define HIFC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
#define HIFC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3
#define HIFC_API_CMD_DESC_PRIV_DATA_SHIFT 8
#define HIFC_API_CMD_DESC_DEST_SHIFT 32
#define HIFC_API_CMD_DESC_SIZE_SHIFT 40
#define HIFC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
#define HIFC_API_CMD_DESC_API_TYPE_MASK 0x1U
#define HIFC_API_CMD_DESC_RD_WR_MASK 0x1U
#define HIFC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U
#define HIFC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U
#define HIFC_API_CMD_DESC_DEST_MASK 0x1FU
#define HIFC_API_CMD_DESC_SIZE_MASK 0x7FFU
#define HIFC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU
#define HIFC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU
#define HIFC_API_CMD_DESC_SET(val, member) \
((((u64)val) & HIFC_API_CMD_DESC_##member##_MASK) << \
HIFC_API_CMD_DESC_##member##_SHIFT)
#define HIFC_API_CMD_STATUS_HEADER_VALID_SHIFT 0
#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
#define HIFC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU
#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU
#define HIFC_API_CMD_STATUS_HEADER_GET(val, member) \
(((val) >> HIFC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
HIFC_API_CMD_STATUS_HEADER_##member##_MASK)
#define HIFC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
#define HIFC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U
#define HIFC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U
#define HIFC_API_CMD_CHAIN_REQ_SET(val, member) \
(((val) & HIFC_API_CMD_CHAIN_REQ_##member##_MASK) << \
HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT)
#define HIFC_API_CMD_CHAIN_REQ_GET(val, member) \
(((val) >> HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
HIFC_API_CMD_CHAIN_REQ_##member##_MASK)
#define HIFC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
((val) & (~(HIFC_API_CMD_CHAIN_REQ_##member##_MASK \
<< HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1
#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U
#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U
#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U
#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U
#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U
#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U
#define HIFC_API_CMD_CHAIN_CTRL_SET(val, member) \
(((val) & HIFC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
#define HIFC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
((val) & (~(HIFC_API_CMD_CHAIN_CTRL_##member##_MASK \
<< HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
#define HIFC_API_CMD_RESP_HEAD_VALID_MASK 0xFF
#define HIFC_API_CMD_RESP_HEAD_VALID_CODE 0xFF
#define HIFC_API_CMD_RESP_HEADER_VALID(val) \
(((val) & HIFC_API_CMD_RESP_HEAD_VALID_MASK) == \
HIFC_API_CMD_RESP_HEAD_VALID_CODE)
#define HIFC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU
#define HIFC_API_CMD_STATUS_CONS_IDX_SHIFT 0
#define HIFC_API_CMD_STATUS_FSM_MASK 0xFU
#define HIFC_API_CMD_STATUS_FSM_SHIFT 24
#define HIFC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U
#define HIFC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
#define HIFC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
#define HIFC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
#define HIFC_API_CMD_STATUS_GET(val, member) \
(((val) >> HIFC_API_CMD_STATUS_##member##_SHIFT) & \
HIFC_API_CMD_STATUS_##member##_MASK)
/* API CMD registers */
#define HIFC_CSR_API_CMD_BASE 0xF000
#define HIFC_CSR_API_CMD_STRIDE 0x100
#define HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x0 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x4 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x8 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0xC + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x10 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x14 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x1C + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x20 + (idx) * HIFC_CSR_API_CMD_STRIDE)
#define HIFC_CSR_API_CMD_STATUS_0_ADDR(idx) \
(HIFC_CSR_API_CMD_BASE + 0x30 + (idx) * HIFC_CSR_API_CMD_STRIDE)
enum hifc_api_cmd_chain_type {
/* write command with completion notification */
HIFC_API_CMD_WRITE = 0,
/* read command with completion notification */
HIFC_API_CMD_READ = 1,
/* write to mgmt cpu command with completion */
HIFC_API_CMD_WRITE_TO_MGMT_CPU = 2,
/* multi read command with completion notification - not used */
HIFC_API_CMD_MULTI_READ = 3,
/* write command without completion notification */
HIFC_API_CMD_POLL_WRITE = 4,
/* read command without completion notification */
HIFC_API_CMD_POLL_READ = 5,
/* read from mgmt cpu command with completion */
HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6,
HIFC_API_CMD_MAX,
};
struct hifc_api_cmd_status {
u64 header;
u32 buf_desc;
u32 cell_addr_hi;
u32 cell_addr_lo;
u32 rsvd0;
u64 rsvd1;
};
/* HW struct */
struct hifc_api_cmd_cell {
u64 ctrl;
/* address is 64 bit in HW struct */
u64 next_cell_paddr;
u64 desc;
/* HW struct */
union {
struct {
u64 hw_cmd_paddr;
} write;
struct {
u64 hw_wb_resp_paddr;
u64 hw_cmd_paddr;
} read;
};
};
struct hifc_api_cmd_resp_fmt {
u64 header;
u64 rsvd[3];
u64 resp_data;
};
struct hifc_api_cmd_cell_ctxt {
struct hifc_api_cmd_cell *cell_vaddr;
void *api_cmd_vaddr;
struct hifc_api_cmd_resp_fmt *resp;
struct completion done;
int status;
u32 saved_prod_idx;
};
struct hifc_api_cmd_chain_attr {
struct hifc_hwdev *hwdev;
enum hifc_api_cmd_chain_type chain_type;
u32 num_cells;
u16 rsp_size;
u16 cell_size;
};
struct hifc_api_cmd_chain {
struct hifc_hwdev *hwdev;
enum hifc_api_cmd_chain_type chain_type;
u32 num_cells;
u16 cell_size;
u16 rsp_size;
/* HW members is 24 bit format */
u32 prod_idx;
u32 cons_idx;
struct semaphore sem;
/* Async cmd can not be scheduling */
spinlock_t async_lock;
dma_addr_t wb_status_paddr;
struct hifc_api_cmd_status *wb_status;
dma_addr_t head_cell_paddr;
struct hifc_api_cmd_cell *head_node;
struct hifc_api_cmd_cell_ctxt *cell_ctxt;
struct hifc_api_cmd_cell *curr_node;
struct hifc_dma_addr_align cells_addr;
u8 *cell_vaddr_base;
u64 cell_paddr_base;
u8 *rsp_vaddr_base;
u64 rsp_paddr_base;
u8 *buf_vaddr_base;
u64 buf_paddr_base;
u64 cell_size_align;
u64 rsp_size_align;
u64 buf_size_align;
};
int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain,
enum hifc_node_id dest, void *cmd, u16 size);
int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain,
enum hifc_node_id dest, void *cmd, u16 size,
void *ack, u16 ack_size);
int hifc_api_cmd_init(struct hifc_hwdev *hwdev,
struct hifc_api_cmd_chain **chain);
void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/semaphore.h>
#include <linux/vmalloc.h>
#include "hifc_knl_adp.h"
#include "hifc_hw.h"
#include "hifc_hwdev.h"
#include "hifc_hwif.h"
#include "hifc_cqm_main.h"
#include "hifc_api_cmd.h"
#include "hifc_hw.h"
#include "hifc_mgmt.h"
#include "hifc_cfg.h"
uint intr_mode;
int hifc_sync_time(void *hwdev, u64 time)
{
struct hifc_sync_time_info time_info = {0};
u16 out_size = sizeof(time_info);
int err;
time_info.mstime = time;
err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM,
HIFC_MGMT_CMD_SYNC_TIME, &time_info,
sizeof(time_info), &time_info, &out_size,
0);
if (err || time_info.status || !out_size) {
sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl,
"Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n",
err, time_info.status, out_size);
}
return err;
}
static void parse_pub_res_cap(struct service_cap *cap,
struct hifc_dev_cap *dev_cap,
enum func_type type)
{
cap->port_id = dev_cap->port_id;
cap->force_up = dev_cap->force_up;
pr_info("Get public resource capbility, force_up: 0x%x\n",
cap->force_up);
/* FC need max queue number, but max queue number info is in
* l2nic cap, we also put max queue num info in public cap, so
* FC can get correct max queue number info.
*/
cap->max_sqs = dev_cap->nic_max_sq + 1;
cap->max_rqs = dev_cap->nic_max_rq + 1;
cap->host_total_function = dev_cap->host_total_func;
cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
cap->max_connect_num = dev_cap->max_conn_num;
cap->max_stick2cache_num = dev_cap->max_stick2cache_num;
pr_info("Get public resource capbility, svc_cap_en: 0x%x\n",
dev_cap->svc_cap_en);
pr_info("port_id=0x%x\n", cap->port_id);
pr_info("Host_total_function=0x%x, host_oq_id_mask_val=0x%x\n",
cap->host_total_function, cap->host_oq_id_mask_val);
}
static void parse_fc_res_cap(struct service_cap *cap,
struct hifc_dev_cap *dev_cap,
enum func_type type)
{
struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap;
fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx;
fc_cap->scq_num = dev_cap->fc_max_scq;
fc_cap->srq_num = dev_cap->fc_max_srq;
fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx;
fc_cap->vp_id_start = dev_cap->fc_vp_id_start;
fc_cap->vp_id_end = dev_cap->fc_vp_id_end;
pr_info("Get fc resource capbility\n");
pr_info("Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x\n",
fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num,
fc_cap->max_child_qpc_num);
pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n",
fc_cap->vp_id_start, fc_cap->vp_id_end);
}
static void parse_dev_cap(struct hifc_hwdev *dev,
struct hifc_dev_cap *dev_cap, enum func_type type)
{
struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
/* Public resource */
parse_pub_res_cap(cap, dev_cap, type);
/* PPF managed dynamic resource */
parse_fc_res_cap(cap, dev_cap, type);
}
static int get_cap_from_fw(struct hifc_hwdev *dev, enum func_type type)
{
struct hifc_dev_cap dev_cap = {0};
u16 out_len = sizeof(dev_cap);
int err;
dev_cap.version = HIFC_CMD_VER_FUNC_ID;
err = hifc_global_func_id_get(dev, &dev_cap.func_id);
if (err)
return err;
sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n",
dev_cap.func_id);
err = hifc_msg_to_mgmt_sync(dev, HIFC_MOD_CFGM, HIFC_CFG_NIC_CAP,
&dev_cap, sizeof(dev_cap),
&dev_cap, &out_len, 0);
if (err || dev_cap.status || !out_len) {
sdk_err(dev->dev_hdl,
"Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n",
err, dev_cap.status, out_len);
return -EFAULT;
}
parse_dev_cap(dev, &dev_cap, type);
return 0;
}
static void fc_param_fix(struct hifc_hwdev *dev)
{
struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
struct fc_service_cap *fc_cap = &cap->fc_cap;
fc_cap->parent_qpc_size = FC_PCTX_SZ;
fc_cap->child_qpc_size = FC_CCTX_SZ;
fc_cap->sqe_size = FC_SQE_SZ;
fc_cap->scqc_size = FC_SCQC_SZ;
fc_cap->scqe_size = FC_SCQE_SZ;
fc_cap->srqc_size = FC_SRQC_SZ;
fc_cap->srqe_size = FC_SRQE_SZ;
}
static void cfg_get_eq_num(struct hifc_hwdev *dev)
{
struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info;
eq_info->num_ceq = dev->hwif->attr.num_ceqs;
eq_info->num_ceq_remain = eq_info->num_ceq;
}
static int cfg_init_eq(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
struct cfg_eq *eq;
u8 num_ceq, i = 0;
cfg_get_eq_num(dev);
num_ceq = cfg_mgmt->eq_info.num_ceq;
sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n",
cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain);
if (!num_ceq) {
sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n");
return -EFAULT;
}
eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL);
if (!eq)
return -ENOMEM;
for (i = 0; i < num_ceq; ++i) {
eq[i].eqn = i;
eq[i].free = CFG_FREE;
eq[i].type = SERVICE_T_MAX;
}
cfg_mgmt->eq_info.eq = eq;
mutex_init(&cfg_mgmt->eq_info.eq_mutex);
return 0;
}
static int cfg_init_interrupt(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info;
u16 intr_num = dev->hwif->attr.num_irqs;
if (!intr_num) {
sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n");
return -EFAULT;
}
irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info),
GFP_KERNEL);
if (!irq_info->alloc_info)
return -ENOMEM;
irq_info->num_irq_hw = intr_num;
cfg_mgmt->svc_cap.interrupt_type = intr_mode;
mutex_init(&irq_info->irq_mutex);
return 0;
}
static int cfg_enable_interrupt(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw;
void *pcidev = dev->pcidev_hdl;
struct irq_alloc_info_st *irq_info;
struct msix_entry *entry;
u16 i = 0;
int actual_irq;
irq_info = cfg_mgmt->irq_param_info.alloc_info;
sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d.\n",
cfg_mgmt->svc_cap.interrupt_type, nreq);
switch (cfg_mgmt->svc_cap.interrupt_type) {
case INTR_TYPE_MSIX:
if (!nreq) {
sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n");
return -EINVAL;
}
entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
for (i = 0; i < nreq; i++)
entry[i].entry = i;
actual_irq = pci_enable_msix_range(pcidev, entry,
VECTOR_THRESHOLD, nreq);
if (actual_irq < 0) {
sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed.\n");
kfree(entry);
return -ENOMEM;
}
nreq = (u16)actual_irq;
cfg_mgmt->irq_param_info.num_total = nreq;
cfg_mgmt->irq_param_info.num_irq_remain = nreq;
sdk_info(dev->dev_hdl, "Request %d msix vector success.\n",
nreq);
for (i = 0; i < nreq; ++i) {
/* u16 driver uses to specify entry, OS writes */
irq_info[i].info.msix_entry_idx = entry[i].entry;
/* u32 kernel uses to write allocated vector */
irq_info[i].info.irq_id = entry[i].vector;
irq_info[i].type = SERVICE_T_MAX;
irq_info[i].free = CFG_FREE;
}
kfree(entry);
break;
default:
sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n",
cfg_mgmt->svc_cap.interrupt_type);
break;
}
return 0;
}
int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 num,
struct irq_info *irq_info_array, u16 *act_num)
{
struct hifc_hwdev *dev = hwdev;
struct cfg_mgmt_info *cfg_mgmt;
struct cfg_irq_info *irq_info;
struct irq_alloc_info_st *alloc_info;
int max_num_irq;
u16 free_num_irq;
int i, j;
if (!hwdev || !irq_info_array || !act_num)
return -EINVAL;
cfg_mgmt = dev->cfg_mgmt;
irq_info = &cfg_mgmt->irq_param_info;
alloc_info = irq_info->alloc_info;
max_num_irq = irq_info->num_total;
free_num_irq = irq_info->num_irq_remain;
mutex_lock(&irq_info->irq_mutex);
if (num > free_num_irq) {
if (free_num_irq == 0) {
sdk_err(dev->dev_hdl,
"no free irq resource in cfg mgmt.\n");
mutex_unlock(&irq_info->irq_mutex);
return -ENOMEM;
}
sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt.\n",
free_num_irq);
num = free_num_irq;
}
*act_num = 0;
for (i = 0; i < num; i++) {
for (j = 0; j < max_num_irq; j++) {
if (alloc_info[j].free == CFG_FREE) {
if (irq_info->num_irq_remain == 0) {
sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n");
mutex_unlock(&irq_info->irq_mutex);
return -EINVAL;
}
alloc_info[j].type = type;
alloc_info[j].free = CFG_BUSY;
irq_info_array[i].msix_entry_idx =
alloc_info[j].info.msix_entry_idx;
irq_info_array[i].irq_id =
alloc_info[j].info.irq_id;
(*act_num)++;
irq_info->num_irq_remain--;
break;
}
}
}
mutex_unlock(&irq_info->irq_mutex);
return 0;
}
void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id)
{
struct hifc_hwdev *dev = hwdev;
struct cfg_mgmt_info *cfg_mgmt;
struct cfg_irq_info *irq_info;
struct irq_alloc_info_st *alloc_info;
int max_num_irq;
int i;
if (!hwdev)
return;
cfg_mgmt = dev->cfg_mgmt;
irq_info = &cfg_mgmt->irq_param_info;
alloc_info = irq_info->alloc_info;
max_num_irq = irq_info->num_total;
mutex_lock(&irq_info->irq_mutex);
for (i = 0; i < max_num_irq; i++) {
if (irq_id == alloc_info[i].info.irq_id &&
type == alloc_info[i].type) {
if (alloc_info[i].free == CFG_BUSY) {
alloc_info[i].free = CFG_FREE;
irq_info->num_irq_remain++;
if (irq_info->num_irq_remain > max_num_irq) {
sdk_err(dev->dev_hdl, "Find target,but over range\n");
mutex_unlock(&irq_info->irq_mutex);
return;
}
break;
}
}
}
if (i >= max_num_irq)
sdk_warn(dev->dev_hdl, "Irq %d don`t need to free\n", irq_id);
mutex_unlock(&irq_info->irq_mutex);
}
int init_cfg_mgmt(struct hifc_hwdev *dev)
{
int err;
struct cfg_mgmt_info *cfg_mgmt;
cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
if (!cfg_mgmt)
return -ENOMEM;
dev->cfg_mgmt = cfg_mgmt;
cfg_mgmt->hwdev = dev;
err = cfg_init_eq(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n",
err);
goto free_mgmt_mem;
}
err = cfg_init_interrupt(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n",
err);
goto free_eq_mem;
}
err = cfg_enable_interrupt(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n",
err);
goto free_interrupt_mem;
}
return 0;
free_interrupt_mem:
kfree(cfg_mgmt->irq_param_info.alloc_info);
cfg_mgmt->irq_param_info.alloc_info = NULL;
free_eq_mem:
kfree(cfg_mgmt->eq_info.eq);
cfg_mgmt->eq_info.eq = NULL;
free_mgmt_mem:
kfree(cfg_mgmt);
return err;
}
void free_cfg_mgmt(struct hifc_hwdev *dev)
{
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
/* if the allocated resource were recycled */
if (cfg_mgmt->irq_param_info.num_irq_remain !=
cfg_mgmt->irq_param_info.num_total ||
cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq)
sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n");
switch (cfg_mgmt->svc_cap.interrupt_type) {
case INTR_TYPE_MSIX:
pci_disable_msix(dev->pcidev_hdl);
break;
case INTR_TYPE_MSI:
pci_disable_msi(dev->pcidev_hdl);
break;
case INTR_TYPE_INT:
default:
break;
}
kfree(cfg_mgmt->irq_param_info.alloc_info);
cfg_mgmt->irq_param_info.alloc_info = NULL;
kfree(cfg_mgmt->eq_info.eq);
cfg_mgmt->eq_info.eq = NULL;
kfree(cfg_mgmt);
}
int init_capability(struct hifc_hwdev *dev)
{
int err;
enum func_type type = HIFC_FUNC_TYPE(dev);
struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
cfg_mgmt->svc_cap.timer_en = 1;
cfg_mgmt->svc_cap.test_xid_alloc_mode = 1;
cfg_mgmt->svc_cap.test_gpa_check_enable = 1;
err = get_cap_from_fw(dev, type);
if (err) {
sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n");
return err;
}
fc_param_fix(dev);
if (dev->cfg_mgmt->svc_cap.force_up)
dev->feature_cap |= HIFC_FUNC_FORCE_LINK_UP;
sdk_info(dev->dev_hdl, "Init capability success\n");
return 0;
}
void free_capability(struct hifc_hwdev *dev)
{
sdk_info(dev->dev_hdl, "Free capability success");
}
bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap)
{
struct hifc_hwdev *dev = hwdev;
if (!hwdev)
return false;
if (cap)
memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap));
return true;
}
u8 hifc_host_oq_id_mask(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev) {
pr_err("Hwdev pointer is NULL for getting host oq id mask\n");
return 0;
}
return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val;
}
u16 hifc_func_max_qnum(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev) {
pr_err("Hwdev pointer is NULL for getting function max queue number\n");
return 0;
}
return dev->cfg_mgmt->svc_cap.max_sqs;
}
/* Caller should ensure atomicity when calling this function */
int hifc_stateful_init(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
int err;
if (!dev)
return -EINVAL;
if (dev->statufull_ref_cnt++)
return 0;
err = cqm_init(dev);
if (err) {
sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err);
goto init_cqm_err;
}
sdk_info(dev->dev_hdl, "Initialize statefull resource success\n");
return 0;
init_cqm_err:
dev->statufull_ref_cnt--;
return err;
}
/* Caller should ensure atomicity when calling this function */
void hifc_stateful_deinit(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev || !dev->statufull_ref_cnt)
return;
if (--dev->statufull_ref_cnt)
return;
cqm_uninit(hwdev);
sdk_info(dev->dev_hdl, "Clear statefull resource success\n");
}
bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state)
{
struct hifc_hwdev *dev = hwdev;
if (!hwdev || state >= HIFC_HWDEV_MAX_INVAL_INITED)
return false;
return !!test_bit(state, &dev->func_state);
}
static int hifc_os_dep_init(struct hifc_hwdev *hwdev)
{
hwdev->workq = create_singlethread_workqueue(HIFC_HW_WQ_NAME);
if (!hwdev->workq) {
sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n");
return -EFAULT;
}
sema_init(&hwdev->fault_list_sem, 1);
return 0;
}
static void hifc_os_dep_deinit(struct hifc_hwdev *hwdev)
{
destroy_workqueue(hwdev->workq);
}
static int __hilink_phy_init(struct hifc_hwdev *hwdev)
{
int err;
err = hifc_phy_init_status_judge(hwdev);
if (err) {
sdk_info(hwdev->dev_hdl, "Phy init failed\n");
return err;
}
return 0;
}
static int init_hwdev_and_hwif(struct hifc_init_para *para)
{
struct hifc_hwdev *hwdev;
int err;
if (!(*para->hwdev)) {
hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
if (!hwdev)
return -ENOMEM;
*para->hwdev = hwdev;
hwdev->adapter_hdl = para->adapter_hdl;
hwdev->pcidev_hdl = para->pcidev_hdl;
hwdev->dev_hdl = para->dev_hdl;
hwdev->chip_node = para->chip_node;
hwdev->chip_fault_stats = vzalloc(HIFC_CHIP_FAULT_SIZE);
if (!hwdev->chip_fault_stats)
goto alloc_chip_fault_stats_err;
err = hifc_init_hwif(hwdev, para->cfg_reg_base,
para->intr_reg_base,
para->db_base_phy, para->db_base,
para->dwqe_mapping);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init hwif\n");
goto init_hwif_err;
}
}
return 0;
init_hwif_err:
vfree(hwdev->chip_fault_stats);
alloc_chip_fault_stats_err:
*para->hwdev = NULL;
return -EFAULT;
}
static void deinit_hwdev_and_hwif(struct hifc_hwdev *hwdev)
{
hifc_free_hwif(hwdev);
vfree(hwdev->chip_fault_stats);
kfree(hwdev);
}
static int init_hw_cfg(struct hifc_hwdev *hwdev)
{
int err;
err = init_capability(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init capability\n");
return err;
}
err = __hilink_phy_init(hwdev);
if (err)
goto hilink_phy_init_err;
return 0;
hilink_phy_init_err:
free_capability(hwdev);
return err;
}
/* Return:
* 0: all success
* >0: partitial success
* <0: all failed
*/
int hifc_init_hwdev(struct hifc_init_para *para)
{
struct hifc_hwdev *hwdev;
int err;
err = init_hwdev_and_hwif(para);
if (err)
return err;
hwdev = *para->hwdev;
/* detect slave host according to BAR reg */
hwdev->feature_cap = HIFC_FUNC_MGMT | HIFC_FUNC_PORT |
HIFC_FUNC_SUPP_RATE_LIMIT | HIFC_FUNC_SUPP_DFX_REG |
HIFC_FUNC_SUPP_RX_MODE | HIFC_FUNC_SUPP_SET_VF_MAC_VLAN |
HIFC_FUNC_SUPP_CHANGE_MAC;
err = hifc_os_dep_init(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n");
goto os_dep_init_err;
}
hifc_set_chip_present(hwdev);
hifc_init_heartbeat(hwdev);
err = init_cfg_mgmt(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n");
goto init_cfg_mgmt_err;
}
err = hifc_init_comm_ch(hwdev);
if (err) {
if (!(hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK)) {
sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n");
goto init_comm_ch_err;
} else {
sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n");
return hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK;
}
}
err = init_hw_cfg(hwdev);
if (err) {
sdk_err(hwdev->dev_hdl, "Failed to init hardware config\n");
goto init_hw_cfg_err;
}
set_bit(HIFC_HWDEV_ALL_INITED, &hwdev->func_state);
sdk_info(hwdev->dev_hdl, "Init hwdev success\n");
return 0;
init_hw_cfg_err:
return (hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK);
init_comm_ch_err:
free_cfg_mgmt(hwdev);
init_cfg_mgmt_err:
hifc_destroy_heartbeat(hwdev);
hifc_os_dep_deinit(hwdev);
os_dep_init_err:
deinit_hwdev_and_hwif(hwdev);
*para->hwdev = NULL;
return -EFAULT;
}
void hifc_free_hwdev(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
enum hifc_hwdev_init_state state = HIFC_HWDEV_ALL_INITED;
int flag = 0;
if (!hwdev)
return;
if (test_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state)) {
clear_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state);
/* BM slave function not need to exec rx_tx_flush */
hifc_func_rx_tx_flush(hwdev);
free_capability(dev);
}
while (state > HIFC_HWDEV_NONE_INITED) {
if (test_bit(state, &dev->func_state)) {
flag = 1;
break;
}
state--;
}
if (flag) {
hifc_uninit_comm_ch(dev);
free_cfg_mgmt(dev);
hifc_destroy_heartbeat(dev);
hifc_os_dep_deinit(dev);
}
clear_bit(HIFC_HWDEV_NONE_INITED, &dev->func_state);
deinit_hwdev_and_hwif(dev);
}
u64 hifc_get_func_feature_cap(void *hwdev)
{
struct hifc_hwdev *dev = hwdev;
if (!dev) {
pr_err("Hwdev pointer is NULL for getting function feature capability\n");
return 0;
}
return dev->feature_cap;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef __CFG_MGT_H__
#define __CFG_MGT_H__
enum {
CFG_FREE = 0,
CFG_BUSY = 1
};
/* FC */
#define FC_PCTX_SZ 256
#define FC_CCTX_SZ 256
#define FC_SQE_SZ 128
#define FC_SCQC_SZ 64
#define FC_SCQE_SZ 64
#define FC_SRQC_SZ 64
#define FC_SRQE_SZ 32
/* device capability */
struct service_cap {
/* Host global resources */
u16 host_total_function;
u8 host_oq_id_mask_val;
/* DO NOT get interrupt_type from firmware */
enum intr_type interrupt_type;
u8 intr_chip_en;
u8 port_id; /* PF/VF's physical port */
u8 force_up;
u8 timer_en; /* 0:disable, 1:enable */
u16 max_sqs;
u16 max_rqs;
/* For test */
bool test_xid_alloc_mode;
bool test_gpa_check_enable;
u32 max_connect_num; /* PF/VF maximum connection number(1M) */
/* The maximum connections which can be stick to cache memory, max 1K */
u16 max_stick2cache_num;
struct nic_service_cap nic_cap; /* NIC capability */
struct fc_service_cap fc_cap; /* FC capability */
};
struct hifc_sync_time_info {
u8 status;
u8 version;
u8 rsvd0[6];
u64 mstime;
};
struct cfg_eq {
enum hifc_service_type type;
int eqn;
int free; /* 1 - alocated, 0- freed */
};
struct cfg_eq_info {
struct cfg_eq *eq;
u8 num_ceq;
u8 num_ceq_remain;
/* mutex used for allocate EQs */
struct mutex eq_mutex;
};
struct irq_alloc_info_st {
enum hifc_service_type type;
int free; /* 1 - alocated, 0- freed */
struct irq_info info;
};
struct cfg_irq_info {
struct irq_alloc_info_st *alloc_info;
u16 num_total;
u16 num_irq_remain;
u16 num_irq_hw; /* device max irq number */
/* mutex used for allocate EQs */
struct mutex irq_mutex;
};
#define VECTOR_THRESHOLD 2
struct cfg_mgmt_info {
struct hifc_hwdev *hwdev;
struct service_cap svc_cap;
struct cfg_eq_info eq_info; /* EQ */
struct cfg_irq_info irq_param_info; /* IRQ */
u32 func_seq_num; /* temporary */
};
enum cfg_sub_cmd {
/* PPF(PF) <-> FW */
HIFC_CFG_NIC_CAP = 0,
CFG_FW_VERSION,
CFG_UCODE_VERSION,
HIFC_CFG_FUNC_CAP,
HIFC_CFG_MBOX_CAP = 6,
};
struct hifc_dev_cap {
u8 status;
u8 version;
u8 rsvd0[6];
/* Public resource */
u8 sf_svc_attr;
u8 host_id;
u8 sf_en_pf;
u8 sf_en_vf;
u8 ep_id;
u8 intr_type;
u8 max_cos_id;
u8 er_id;
u8 port_id;
u8 max_vf;
u16 svc_cap_en;
u16 host_total_func;
u8 host_oq_id_mask_val;
u8 max_vf_cos_id;
u32 max_conn_num;
u16 max_stick2cache_num;
u16 max_bfilter_start_addr;
u16 bfilter_len;
u16 hash_bucket_num;
u8 cfg_file_ver;
u8 net_port_mode;
u8 valid_cos_bitmap; /* every bit indicate cos is valid */
u8 force_up;
u32 pf_num;
u32 pf_id_start;
u32 vf_num;
u32 vf_id_start;
/* shared resource */
u32 host_pctx_num;
u8 host_sf_en;
u8 rsvd2[3];
u32 host_ccxt_num;
u32 host_scq_num;
u32 host_srq_num;
u32 host_mpt_num;
/* l2nic */
u16 nic_max_sq;
u16 nic_max_rq;
u32 rsvd[46];
/* FC */
u32 fc_max_pctx;
u32 fc_max_scq;
u32 fc_max_srq;
u32 fc_max_cctx;
u32 fc_cctx_id_start;
u8 fc_vp_id_start;
u8 fc_vp_id_end;
u16 func_id;
};
#endif
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_CMDQ_H_
#define HIFC_CMDQ_H_
#define HIFC_DB_OFF 0x00000800
#define HIFC_SCMD_DATA_LEN 16
#define HIFC_CMDQ_DEPTH 4096
#define HIFC_CMDQ_BUF_SIZE 2048U
#define HIFC_CMDQ_BUF_HW_RSVD 8
#define HIFC_CMDQ_MAX_DATA_SIZE \
(HIFC_CMDQ_BUF_SIZE - HIFC_CMDQ_BUF_HW_RSVD)
#define WQ_PAGE_PFN_SHIFT 12
#define WQ_BLOCK_PFN_SHIFT 9
#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
enum hifc_cmdq_type {
HIFC_CMDQ_SYNC,
HIFC_CMDQ_ASYNC,
HIFC_MAX_CMDQ_TYPES,
};
enum hifc_db_src_type {
HIFC_DB_SRC_CMDQ_TYPE,
HIFC_DB_SRC_L2NIC_SQ_TYPE,
};
enum hifc_cmdq_db_type {
HIFC_DB_SQ_RQ_TYPE,
HIFC_DB_CMDQ_TYPE,
};
/* CMDQ WQE CTRLS */
struct hifc_cmdq_header {
u32 header_info;
u32 saved_data;
};
struct hifc_scmd_bufdesc {
u32 buf_len;
u32 rsvd;
u8 data[HIFC_SCMD_DATA_LEN];
};
struct hifc_lcmd_bufdesc {
struct hifc_sge sge;
u32 rsvd1;
u64 saved_async_buf;
u64 rsvd3;
};
struct hifc_cmdq_db {
u32 db_info;
u32 rsvd;
};
struct hifc_status {
u32 status_info;
};
struct hifc_ctrl {
u32 ctrl_info;
};
struct hifc_sge_resp {
struct hifc_sge sge;
u32 rsvd;
};
struct hifc_cmdq_completion {
/* HW Format */
union {
struct hifc_sge_resp sge_resp;
u64 direct_resp;
};
};
struct hifc_cmdq_wqe_scmd {
struct hifc_cmdq_header header;
struct hifc_cmdq_db db;
struct hifc_status status;
struct hifc_ctrl ctrl;
struct hifc_cmdq_completion completion;
struct hifc_scmd_bufdesc buf_desc;
};
struct hifc_cmdq_wqe_lcmd {
struct hifc_cmdq_header header;
struct hifc_status status;
struct hifc_ctrl ctrl;
struct hifc_cmdq_completion completion;
struct hifc_lcmd_bufdesc buf_desc;
};
struct hifc_cmdq_inline_wqe {
struct hifc_cmdq_wqe_scmd wqe_scmd;
};
struct hifc_cmdq_wqe {
/* HW Format */
union {
struct hifc_cmdq_inline_wqe inline_wqe;
struct hifc_cmdq_wqe_lcmd wqe_lcmd;
};
};
struct hifc_cmdq_arm_bit {
u32 q_type;
u32 q_id;
};
struct hifc_cmdq_ctxt_info {
u64 curr_wqe_page_pfn;
u64 wq_block_pfn;
};
struct hifc_cmdq_ctxt {
u8 status;
u8 version;
u8 rsvd0[6];
u16 func_idx;
u8 cmdq_id;
u8 ppf_idx;
u8 rsvd1[4];
struct hifc_cmdq_ctxt_info ctxt_info;
};
enum hifc_cmdq_status {
HIFC_CMDQ_ENABLE = BIT(0),
};
enum hifc_cmdq_cmd_type {
HIFC_CMD_TYPE_NONE,
HIFC_CMD_TYPE_SET_ARM,
HIFC_CMD_TYPE_DIRECT_RESP,
HIFC_CMD_TYPE_SGE_RESP,
HIFC_CMD_TYPE_ASYNC,
HIFC_CMD_TYPE_TIMEOUT,
HIFC_CMD_TYPE_FAKE_TIMEOUT,
};
struct hifc_cmdq_cmd_info {
enum hifc_cmdq_cmd_type cmd_type;
struct completion *done;
int *errcode;
int *cmpt_code;
u64 *direct_resp;
u64 cmdq_msg_id;
};
struct hifc_cmdq {
struct hifc_wq *wq;
enum hifc_cmdq_type cmdq_type;
int wrapped;
/* spinlock for send cmdq commands */
spinlock_t cmdq_lock;
/* doorbell area */
u8 __iomem *db_base;
struct hifc_cmdq_ctxt cmdq_ctxt;
struct hifc_cmdq_cmd_info *cmd_infos;
struct hifc_hwdev *hwdev;
};
struct hifc_cmdqs {
struct hifc_hwdev *hwdev;
struct pci_pool *cmd_buf_pool;
struct hifc_wq *saved_wqs;
struct hifc_cmdq_pages cmdq_pages;
struct hifc_cmdq cmdq[HIFC_MAX_CMDQ_TYPES];
u32 status;
u32 disable_flag;
};
void hifc_cmdq_ceq_handler(void *hwdev, u32 ceqe_data);
int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev);
bool hifc_cmdq_idle(struct hifc_cmdq *cmdq);
int hifc_cmdqs_init(struct hifc_hwdev *hwdev);
void hifc_cmdqs_free(struct hifc_hwdev *hwdev);
void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev,
struct hifc_cmdq *cmdq);
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/* Huawei Hifc PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*
*/
#ifndef HIFC_WQ_H
#define HIFC_WQ_H
#define WQS_BLOCKS_PER_PAGE 4
#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size)
#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
((wq)->num_q_pages - 1))
#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \
((idx) & ((wq)->num_wqebbs_per_page - 1)))
#define WQ_PAGE_ADDR_SIZE sizeof(u64)
#define WQ_PAGE_ADDR_SIZE_SHIFT 3
#define WQ_PAGE_ADDR(wq, idx) \
(u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
(WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT)))
#define WQ_BLOCK_SIZE 4096UL
#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
#define CMDQ_BLOCKS_PER_PAGE 8
#define CMDQ_BLOCK_SIZE 512UL
#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \
CMDQ_BLOCK_SIZE), PAGE_SIZE)
#define ADDR_4K_ALIGNED(addr) (((addr) & 0xfff) == 0)
#define WQ_BASE_VADDR(wqs, wq) \
(u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \
+ (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
#define WQ_BASE_ADDR(wqs, wq) \
(u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
(u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
(((u64)((cmdq_pages)->cmdq_page_paddr)) \
+ (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
(u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
#define WQ_NUM_PAGES(num_wqs) \
(ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
#define MAX_WQE_SIZE(max_sge, wqebb_size) \
((max_sge <= 2) ? (wqebb_size) : \
((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size)))
struct hifc_free_block {
u32 page_idx;
u32 block_idx;
};
struct hifc_wq {
/* The addresses are 64 bit in the HW */
u64 block_paddr;
u64 *shadow_block_vaddr;
u64 *block_vaddr;
u32 wqebb_size;
u32 wq_page_size;
u16 q_depth;
u32 max_wqe_size;
u32 num_wqebbs_per_page;
/* performance: replace mul/div as shift;
* num_wqebbs_per_page must be power of 2
*/
u32 wqebbs_per_page_shift;
u32 page_idx;
u32 block_idx;
u32 num_q_pages;
struct hifc_dma_addr_align *mem_align;
int cons_idx;
int prod_idx;
atomic_t delta;
u16 mask;
u8 *shadow_wqe;
u16 *shadow_idx;
};
struct hifc_cmdq_pages {
/* The addresses are 64 bit in the HW */
u64 cmdq_page_paddr;
u64 *cmdq_page_vaddr;
u64 *cmdq_shadow_page_vaddr;
void *dev_hdl;
};
struct hifc_wqs {
/* The addresses are 64 bit in the HW */
u64 *page_paddr;
u64 **page_vaddr;
u64 **shadow_page_vaddr;
struct hifc_free_block *free_blocks;
u32 alloc_blk_pos;
u32 return_blk_pos;
int num_free_blks;
/* for allocate blocks */
spinlock_t alloc_blocks_lock;
u32 num_pages;
void *dev_hdl;
};
void hifc_wq_wqe_pg_clear(struct hifc_wq *wq);
int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages,
struct hifc_wq *wq, void *dev_hdl,
int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
u16 q_depth, u32 max_wqe_size);
void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages,
struct hifc_wq *wq, int cmdq_blocks);
int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl);
void hifc_wqs_free(struct hifc_wqs *wqs);
int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq,
u32 wqebb_size, u32 wq_page_size, u16 q_depth,
u32 max_wqe_size);
void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq);
void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index);
u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq);
void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx);
void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs);
void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册