提交 4920275a 编写于 作者: J Junxian Huang 提交者: Zheng Zengkai

RDMA/hns: fix the error of missing GID in RoCE bonding mode 1

driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I63IM5

---------------------------------------------------------------------------

In the existing hns RoCE code, ib_dev->ops.get_netdev is not set, which
cause that only one slave will be assigned IP-based GID in RoCE bonding
mode 1.

This patch adds hns_roce_get_netdev() and set the function to
ib_dev->ops.get_netdev so that IB-Core can assign GID to different
net device according to the active slave in mode 1.

Fixes: e62a2027 ("RDMA/hns: support RoCE bonding")
Signed-off-by: NJunxian Huang <huangjunxian6@hisilicon.com>
Reviewed-by: NYangyang Li <liyangyang20@huawei.com>
Reviewed-by: NYue Haibing <yuehaibing@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 b6623fd2
......@@ -26,27 +26,43 @@ static struct hns_roce_dev *hns_roce_get_hrdev_by_netdev(struct net_device *net_
return hr_dev;
}
bool hns_roce_bond_is_active(struct hns_roce_dev *hr_dev)
static struct hns_roce_bond_group *hns_roce_get_bond_grp(struct hns_roce_dev *hr_dev)
{
struct hns_roce_bond_group *bond_grp = NULL;
struct net_device *upper_dev;
struct net_device *net_dev;
if (!netif_is_lag_port(hr_dev->iboe.netdevs[0]))
return false;
rcu_read_lock();
upper_dev = netdev_master_upper_dev_get_rcu(hr_dev->iboe.netdevs[0]);
for_each_netdev_in_bond_rcu(upper_dev, net_dev) {
hr_dev = hns_roce_get_hrdev_by_netdev(net_dev);
if (hr_dev && hr_dev->bond_grp &&
(hr_dev->bond_grp->bond_state == HNS_ROCE_BOND_REGISTERING ||
hr_dev->bond_grp->bond_state == HNS_ROCE_BOND_IS_BONDED)) {
rcu_read_unlock();
return true;
if (hr_dev && hr_dev->bond_grp) {
bond_grp = hr_dev->bond_grp;
break;
}
}
rcu_read_unlock();
return bond_grp;
}
bool hns_roce_bond_is_active(struct hns_roce_dev *hr_dev)
{
struct hns_roce_bond_group *bond_grp;
if (!netif_is_lag_port(hr_dev->iboe.netdevs[0]))
return false;
bond_grp = hns_roce_get_bond_grp(hr_dev);
if (bond_grp &&
(bond_grp->bond_state == HNS_ROCE_BOND_REGISTERING ||
bond_grp->bond_state == HNS_ROCE_BOND_IS_BONDED))
return true;
return false;
}
......@@ -62,12 +78,15 @@ struct net_device *hns_roce_get_bond_netdev(struct hns_roce_dev *hr_dev)
if (!netif_is_lag_port(hr_dev->iboe.netdevs[0]))
return NULL;
if (!bond_grp) {
bond_grp = hns_roce_get_bond_grp(hr_dev);
if (!bond_grp)
return NULL;
}
mutex_lock(&bond_grp->bond_mutex);
if (bond_grp->bond_state != HNS_ROCE_BOND_IS_BONDED)
if (bond_grp->bond_state == HNS_ROCE_BOND_NOT_BONDED)
goto out;
if (bond_grp->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
......@@ -155,7 +174,8 @@ static void hns_roce_set_bond(struct hns_roce_bond_group *bond_grp)
int ret;
int i;
/* bond_grp will be kfree during uninit_instance of main_hr_dev.
/*
* bond_grp will be kfree during uninit_instance of main_hr_dev.
* Thus the main_hr_dev is switched before the uninit_instance
* of the previous main_hr_dev.
*/
......
......@@ -47,6 +47,30 @@
#include "hns_roce_dca.h"
#include "hns_roce_debugfs.h"
static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
u8 port_num)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct net_device *ndev;
if (port_num < 1 || port_num > hr_dev->caps.num_ports)
return NULL;
ndev = hr_dev->hw->get_bond_netdev(hr_dev);
rcu_read_lock();
if (!ndev)
ndev = hr_dev->iboe.netdevs[port_num - 1];
if (ndev)
dev_hold(ndev);
rcu_read_unlock();
return ndev;
}
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
const u8 *addr)
{
......@@ -677,6 +701,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
.get_netdev = hns_roce_get_netdev,
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
.mmap_free = hns_roce_free_mmap,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册