提交 48f87d26 编写于 作者: G Guofeng Yue 提交者: Zheng Zengkai

RDMA/hns: Kernel notify usr space to stop ring db

driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I65TUL

---------------------------------------------------------------

In the reset scenario, if the kernel receives the reset signal,
it needs to notify the user space to stop ring doorbell.
Signed-off-by: NYixing Liu <liuyixing1@huawei.com>
Signed-off-by: NGuofeng Yue <yueguofeng@hisilicon.com>
Reviewed-by: NYangyang Li <liyangyang20@huawei.com>
Reviewed-by: NYue Haibing <yuehaibing@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 306b4220
......@@ -59,6 +59,7 @@
#define HNS_ROCE_CEQ 0
#define HNS_ROCE_AEQ 1
#define HNS_ROCE_IS_RESETTING 1
#define HNS_ROCE_CEQE_SIZE 0x4
#define HNS_ROCE_AEQE_SIZE 0x10
......@@ -206,6 +207,7 @@ enum hns_roce_mmap_type {
HNS_ROCE_MMAP_TYPE_DB = 1,
HNS_ROCE_MMAP_TYPE_DWQE,
HNS_ROCE_MMAP_TYPE_DCA,
HNS_ROCE_MMAP_TYPE_RESET,
};
struct hns_user_mmap_entry {
......@@ -248,6 +250,7 @@ struct hns_roce_ucontext {
struct list_head page_list;
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
struct hns_user_mmap_entry *reset_mmap_entry;
u32 config;
struct hns_roce_dca_ctx dca_ctx;
void *dca_dbgfs;
......@@ -1027,6 +1030,8 @@ struct hns_roce_dev {
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
struct page *reset_page; /* store reset state */
void *reset_kaddr; /* addr of reset page */
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
......
......@@ -2812,6 +2812,31 @@ static void free_dip_list(struct hns_roce_dev *hr_dev)
spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
}
static int hns_roce_v2_get_reset_page(struct hns_roce_dev *hr_dev)
{
hr_dev->reset_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!hr_dev->reset_page)
return -ENOMEM;
hr_dev->reset_kaddr = vmap(&hr_dev->reset_page, 1, VM_MAP, PAGE_KERNEL);
if (!hr_dev->reset_kaddr)
goto err_with_vmap;
return 0;
err_with_vmap:
put_page(hr_dev->reset_page);
return -ENOMEM;
}
static void hns_roce_v2_put_reset_page(struct hns_roce_dev *hr_dev)
{
vunmap(hr_dev->reset_kaddr);
hr_dev->reset_kaddr = NULL;
put_page(hr_dev->reset_page);
hr_dev->reset_page = NULL;
}
static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
......@@ -3168,16 +3193,23 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
int ret;
ret = hns_roce_v2_get_reset_page(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
"reset state init failed, ret = %d.\n", ret);
return ret;
}
/* The hns ROCEE requires the extdb info to be cleared before using */
ret = hns_roce_clear_extdb_list_info(hr_dev);
if (ret)
return ret;
goto err_clear_extdb_failed;
hns_roce_set_mac_type(hr_dev);
ret = get_hem_table(hr_dev);
if (ret)
return ret;
goto err_clear_extdb_failed;
if (hr_dev->is_vf)
return 0;
......@@ -3192,6 +3224,8 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
err_llm_init_failed:
put_hem_table(hr_dev);
err_clear_extdb_failed:
hns_roce_v2_put_reset_page(hr_dev);
return ret;
}
......@@ -3203,6 +3237,8 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
if (!hr_dev->is_vf)
hns_roce_free_link_table(hr_dev);
hns_roce_v2_put_reset_page(hr_dev);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
free_dip_list(hr_dev);
}
......@@ -7282,6 +7318,18 @@ void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, bool reset)
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
}
static void hns_roce_v2_reset_notify_user(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_reset_state *state;
state = (struct hns_roce_v2_reset_state *)hr_dev->reset_kaddr;
state->reset_state = HNS_ROCE_IS_RESETTING;
/* Ensure reset state was flushed in memory */
wmb();
}
static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
{
struct hns_roce_dev *hr_dev;
......@@ -7300,6 +7348,9 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
hr_dev->active = false;
hr_dev->dis_db = true;
hns_roce_v2_reset_notify_user(hr_dev);
hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
return 0;
......
......@@ -1340,6 +1340,10 @@ struct hns_roce_link_table {
#define HNS_ROCE_EXT_LLM_ENTRY(addr, id) (((id) << (64 - 12)) | ((addr) >> 12))
#define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
struct hns_roce_v2_reset_state {
u32 reset_state; /* stored to use in user space */
};
struct hns_roce_v2_free_mr {
struct hns_roce_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
struct hns_roce_cq *rsv_cq;
......
......@@ -367,6 +367,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
break;
case HNS_ROCE_MMAP_TYPE_DWQE:
case HNS_ROCE_MMAP_TYPE_DCA:
case HNS_ROCE_MMAP_TYPE_RESET:
ret = rdma_user_mmap_entry_insert_range(
ucontext, &entry->rdma_entry, length, 1,
U32_MAX);
......@@ -408,6 +409,26 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
return 0;
}
static void hns_roce_dealloc_reset_entry(struct hns_roce_ucontext *context)
{
if (context->reset_mmap_entry)
rdma_user_mmap_entry_remove(&context->reset_mmap_entry->rdma_entry);
}
static int hns_roce_alloc_reset_entry(struct ib_ucontext *uctx)
{
struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
context->reset_mmap_entry = hns_roce_user_mmap_entry_insert(uctx,
(u64)hr_dev->reset_kaddr, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_RESET);
if (!context->reset_mmap_entry)
return -ENOMEM;
return 0;
}
static void ucontext_set_resp(struct ib_ucontext *uctx,
struct hns_roce_ib_alloc_ucontext_resp *resp)
{
......@@ -425,6 +446,11 @@ static void ucontext_set_resp(struct ib_ucontext *uctx,
rdma_entry = &context->dca_ctx.dca_mmap_entry->rdma_entry;
resp->dca_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
}
if (context->reset_mmap_entry) {
rdma_entry = &context->reset_mmap_entry->rdma_entry;
resp->reset_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
}
}
static u32 get_udca_max_qps(struct hns_roce_dev *hr_dev,
......@@ -503,6 +529,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
hns_roce_register_udca(hr_dev, get_udca_max_qps(hr_dev, &ucmd),
context);
ret = hns_roce_alloc_reset_entry(uctx);
if (ret)
goto error_fail_reset_entry;
ucontext_set_resp(uctx, &resp);
ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (ret)
......@@ -518,7 +548,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
error_fail_copy_to_udata:
hns_roce_unregister_udca(hr_dev, context);
hns_roce_dealloc_reset_entry(context);
error_fail_reset_entry:
hns_roce_dealloc_uar_entry(context);
error_fail_uar_entry:
......@@ -542,6 +574,7 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
hns_roce_unregister_udca(hr_dev, context);
hns_roce_dealloc_uar_entry(context);
hns_roce_dealloc_reset_entry(context);
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
}
......@@ -578,6 +611,7 @@ static int mmap_dca(struct ib_ucontext *context, struct vm_area_struct *vma)
static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
{
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
struct rdma_user_mmap_entry *rdma_entry;
struct hns_user_mmap_entry *entry;
phys_addr_t pfn;
......@@ -599,8 +633,19 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
case HNS_ROCE_MMAP_TYPE_DCA:
ret = mmap_dca(uctx, vma);
goto out;
case HNS_ROCE_MMAP_TYPE_RESET:
if (vma->vm_flags & (VM_WRITE | VM_EXEC)) {
ret = -EINVAL;
goto out;
}
ret = remap_pfn_range(vma, vma->vm_start,
page_to_pfn(hr_dev->reset_page),
PAGE_SIZE, vma->vm_page_prot);
goto out;
default:
return -EINVAL;
ret = -EINVAL;
goto out;
}
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
......
......@@ -127,6 +127,7 @@ struct hns_roce_ib_alloc_ucontext_resp {
__u32 dca_qps;
__u32 dca_mmap_size;
__aligned_u64 dca_mmap_key;
__aligned_u64 reset_mmap_key;
};
enum hns_roce_uctx_comp_mask {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册