提交 a22e7a2c 编写于 作者: Y Yangyang Li 提交者: Xie XiuQi

RDMA/hns: Kernel notify usr space to stop ring db

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

In the reset scenario, if the kernel receives the reset signal,
it needs to notify the user space to stop ring doorbell.

Feature or Bugfix:Bugfix
Signed-off-by: NYangyang Li <liyangyang20@huawei.com>
Reviewed-by: Nliuyixian <liuyixian@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 14a5933a
......@@ -317,7 +317,7 @@ static inline void hns_roce_inc_rdma_hw_stats(struct ib_device *dev, int stats)
#define PAGE_ADDR_SHIFT 12
#define HNS_ROCE_DISABLE_DB 1
#define HNS_ROCE_IS_RESETTING 1
struct hns_roce_uar {
u64 pfn;
......@@ -1226,8 +1226,9 @@ struct hns_roce_dev {
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
dma_addr_t uar2_dma_addr;
u32 uar2_size;
dma_addr_t tptr_dma_addr; /* only for hw v1 */
u32 tptr_size; /* only for hw v1 */
void *reset_page; /* store reset state for hw v2 */
const struct hns_roce_hw *hw;
const struct hns_roce_dfx_hw *dfx;
void *priv;
......
......@@ -1414,8 +1414,8 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
if (!tptr_buf->buf)
return -ENOMEM;
hr_dev->uar2_dma_addr = tptr_buf->map;
hr_dev->uar2_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
hr_dev->tptr_dma_addr = tptr_buf->map;
hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
return 0;
}
......
......@@ -2459,34 +2459,15 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
link_tbl->table.map);
}
static int hns_roce_v2_uar_init(struct hns_roce_dev *hr_dev)
static int hns_roce_v2_get_reset_page(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_buf_list *uar = &priv->uar;
struct device *dev = &hr_dev->pci_dev->dev;
uar->buf = dma_alloc_coherent(dev, HNS_ROCE_V2_UAR_BUF_SIZE, &uar->map,
GFP_KERNEL);
if (!uar->buf)
hr_dev->reset_page = (void *)get_zeroed_page(GFP_KERNEL);
if (!hr_dev->reset_page)
return -ENOMEM;
memset(uar->buf, 0, HNS_ROCE_V2_UAR_BUF_SIZE);
hr_dev->uar2_dma_addr = uar->map;
hr_dev->uar2_size = HNS_ROCE_V2_UAR_BUF_SIZE;
return 0;
}
static void hns_roce_v2_uar_free(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_buf_list *uar = &priv->uar;
struct device *dev = &hr_dev->pci_dev->dev;
dma_free_coherent(dev, HNS_ROCE_V2_UAR_BUF_SIZE, uar->buf, uar->map);
}
static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
......@@ -2495,9 +2476,10 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
int ret;
int i;
ret = hns_roce_v2_uar_init(hr_dev);
ret = hns_roce_v2_get_reset_page(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "uar init failed, ret = %d.\n", ret);
dev_err(hr_dev->dev,
"reset state init failed, ret = %d.\n", ret);
return ret;
}
......@@ -2554,7 +2536,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
hns_roce_free_link_table(hr_dev, &priv->tsq);
err_tsq_init_failed:
hns_roce_v2_uar_free(hr_dev);
free_page(hr_dev->reset_page);
return ret;
}
......@@ -2568,7 +2550,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
hns_roce_free_link_table(hr_dev, &priv->tpq);
hns_roce_free_link_table(hr_dev, &priv->tsq);
hns_roce_v2_uar_free(hr_dev);
free_page(hr_dev->reset_page);
}
static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
......@@ -7477,12 +7459,16 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
}
static void hns_roce_hw_v2_reset_notify_usr(struct hns_roce_dev *hr_dev)
static void hns_roce_v2_reset_notify_user(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_uar *uar = (struct hns_roce_v2_uar *)priv->uar.buf;
struct hns_roce_v2_reset_state *state;
uar->dis_db = HNS_ROCE_DISABLE_DB;
state = (struct hns_roce_v2_reset_state *)hr_dev->reset_page;
state->reset_state = HNS_ROCE_IS_RESETTING;
/* Ensure reset state was flushed in memory */
wmb();
}
static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
......@@ -7503,7 +7489,7 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
hr_dev->is_reset = true;
hr_dev->active = false;
hr_dev->dis_db = true;
hns_roce_hw_v2_reset_notify_usr(hr_dev);
hns_roce_v2_reset_notify_user(hr_dev);
hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
hns_roce_handle_device_err(hr_dev);
......
......@@ -1855,8 +1855,8 @@ struct hns_roce_link_table_entry {
#define HNS_ROCE_V2_UAR_BUF_SIZE 4096
struct hns_roce_v2_uar {
u32 dis_db;
struct hns_roce_v2_reset_state {
u32 reset_state; /* stored to use in user space */
};
struct hns_roce_v2_priv {
......
......@@ -646,13 +646,26 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
} else if (vma->vm_pgoff == 1 && hr_dev->uar2_dma_addr &&
hr_dev->uar2_size) {
if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->uar2_dma_addr >> PAGE_SHIFT,
hr_dev->uar2_size,
vma->vm_page_prot))
return -EAGAIN;
} else if (vma->vm_pgoff == 1) {
/* vm_pgoff: 1 -- TPTR(hw v1), reset_page(hw v2) */
if (hr_dev->tptr_dma_addr && hr_dev->tptr_size) {
if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size, vma->vm_page_prot)){
dev_err(hr_dev->dev,
"mmap tptr page failed.\n");
return -EAGAIN;
}
}
if (hr_dev->reset_page)
if (remap_pfn_range(vma, vma->vm_start,
virt_to_pfn(hr_dev->reset_page),
PAGE_SIZE, vma->vm_page_prot)) {
dev_err(hr_dev->dev,
"mmap reset page failed.\n");
return -EAGAIN;
}
} else {
dev_err(hr_dev->dev, "mmap failed, vm_pgoff is unsupported.\n");
return -EINVAL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册