提交 e8478828 编写于 作者: L Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull RDMA/InfiniBand fixes from Roland Dreier:

 - Fix some rough edges from the "IP addressing for IBoE" merge

 - Other misc fixes, mostly to hardware drivers

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (21 commits)
  RDMA/ocrdma: Fix load time panic during GID table init
  RDMA/ocrdma: Fix traffic class shift
  IB/iser: Fix use after free in iser_snd_completion()
  IB/iser: Avoid dereferencing iscsi_iser conn object when not bound to iser connection
  IB/usnic: Fix smatch endianness error
  IB/mlx5: Remove dependency on X86
  mlx5: Add include of <linux/slab.h> because of kzalloc()/kfree() use
  IB/qib: Add missing serdes init sequence
  RDMA/cxgb4: Add missing neigh_release in LE-Workaround path
  IB: Report using RoCE IP based gids in port caps
  IB/mlx4: Build the port IBoE GID table properly under bonding
  IB/mlx4: Do IBoE GID table resets per-port
  IB/mlx4: Do IBoE locking earlier when initializing the GID table
  IB/mlx4: Move rtnl locking to the right place
  IB/mlx4: Make sure GID index 0 is always occupied
  IB/mlx4: Don't allocate range of steerable UD QPs for Ethernet-only device
  RDMA/amso1100: Fix error return code
  RDMA/nes: Fix error return code
  IB/mlx5: Don't set "block multicast loopback" capability
  IB/mlx5: Fix binary compatibility with libmlx5
  ...
...@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) ...@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Initialize network device */ /* Initialize network device */
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
ret = -ENOMEM;
iounmap(mmio_regs); iounmap(mmio_regs);
goto bail4; goto bail4;
} }
...@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) ...@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto bail10; goto bail10;
} }
if (c2_register_device(c2dev)) ret = c2_register_device(c2dev);
if (ret)
goto bail10; goto bail10;
return 0; return 0;
......
...@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev) ...@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
goto bail4; goto bail4;
/* Initialize cached the adapter limits */ /* Initialize cached the adapter limits */
if (c2_rnic_query(c2dev, &c2dev->props)) err = c2_rnic_query(c2dev, &c2dev->props);
if (err)
goto bail5; goto bail5;
/* Initialize the PD pool */ /* Initialize the PD pool */
......
...@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto free_dst; goto free_dst;
} }
neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window); window = (__force u16) htons((__force u16)tcph->window);
......
...@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, ...@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
IB_WIDTH_4X : IB_WIDTH_1X; IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR; props->active_speed = IB_SPEED_QDR;
props->port_cap_flags = IB_PORT_CM_SUP; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz; props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1; props->pkey_tbl_len = 1;
...@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = { ...@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id &dev_attr_board_id
}; };
static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
struct net_device *dev)
{
memcpy(eui, dev->dev_addr, 3);
memcpy(eui + 5, dev->dev_addr + 3, 3);
if (vlan_id < 0x1000) {
eui[3] = vlan_id >> 8;
eui[4] = vlan_id & 0xff;
} else {
eui[3] = 0xff;
eui[4] = 0xfe;
}
eui[0] ^= 2;
}
static void update_gids_task(struct work_struct *work) static void update_gids_task(struct work_struct *work)
{ {
struct update_gid_work *gw = container_of(work, struct update_gid_work, work); struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
...@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work) ...@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work)
struct mlx4_cmd_mailbox *mailbox; struct mlx4_cmd_mailbox *mailbox;
union ib_gid *gids; union ib_gid *gids;
int err; int err;
int i;
struct mlx4_dev *dev = gw->dev->dev; struct mlx4_dev *dev = gw->dev->dev;
mailbox = mlx4_alloc_cmd_mailbox(dev); mailbox = mlx4_alloc_cmd_mailbox(dev);
...@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work) ...@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work)
gids = mailbox->buf; gids = mailbox->buf;
memcpy(gids, gw->gids, sizeof(gw->gids)); memcpy(gids, gw->gids, sizeof(gw->gids));
for (i = 1; i < gw->dev->num_ports + 1; i++) { if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == IB_LINK_LAYER_ETHERNET) {
IB_LINK_LAYER_ETHERNET) { err = mlx4_cmd(dev, mailbox->dma,
err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
MLX4_SET_PORT_GID_TABLE << 8 | i, 1, MLX4_CMD_SET_PORT,
1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
MLX4_CMD_WRAPPED); if (err)
if (err) pr_warn(KERN_WARNING
pr_warn(KERN_WARNING "set port %d command failed\n", gw->port);
"set port %d command failed\n", i);
}
} }
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
...@@ -1425,7 +1437,8 @@ static void reset_gids_task(struct work_struct *work) ...@@ -1425,7 +1437,8 @@ static void reset_gids_task(struct work_struct *work)
} }
static int update_gid_table(struct mlx4_ib_dev *dev, int port, static int update_gid_table(struct mlx4_ib_dev *dev, int port,
union ib_gid *gid, int clear) union ib_gid *gid, int clear,
int default_gid)
{ {
struct update_gid_work *work; struct update_gid_work *work;
int i; int i;
...@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, ...@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
int found = -1; int found = -1;
int max_gids; int max_gids;
max_gids = dev->dev->caps.gid_table_len[port]; if (default_gid) {
for (i = 0; i < max_gids; ++i) { free = 0;
if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, } else {
sizeof(*gid))) max_gids = dev->dev->caps.gid_table_len[port];
found = i; for (i = 1; i < max_gids; ++i) {
if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
if (clear) {
if (found >= 0) {
need_update = 1;
dev->iboe.gid_table[port - 1][found] = zgid;
break;
}
} else {
if (found >= 0)
break;
if (free < 0 &&
!memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
sizeof(*gid))) sizeof(*gid)))
free = i; found = i;
if (clear) {
if (found >= 0) {
need_update = 1;
dev->iboe.gid_table[port - 1][found] =
zgid;
break;
}
} else {
if (found >= 0)
break;
if (free < 0 &&
!memcmp(&dev->iboe.gid_table[port - 1][i],
&zgid, sizeof(*gid)))
free = i;
}
} }
} }
...@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, ...@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
return 0; return 0;
} }
static int reset_gid_table(struct mlx4_ib_dev *dev) static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
{ {
struct update_gid_work *work; gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
}
static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
{
struct update_gid_work *work;
work = kzalloc(sizeof(*work), GFP_ATOMIC); work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) if (!work)
return -ENOMEM; return -ENOMEM;
memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
memset(work->gids, 0, sizeof(work->gids)); memset(work->gids, 0, sizeof(work->gids));
INIT_WORK(&work->work, reset_gids_task); INIT_WORK(&work->work, reset_gids_task);
work->dev = dev; work->dev = dev;
work->port = port;
queue_work(wq, &work->work); queue_work(wq, &work->work);
return 0; return 0;
} }
...@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, ...@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
rdma_vlan_dev_real_dev(event_netdev) : rdma_vlan_dev_real_dev(event_netdev) :
event_netdev; event_netdev;
union ib_gid default_gid;
mlx4_make_default_gid(real_dev, &default_gid);
if (!memcmp(gid, &default_gid, sizeof(*gid)))
return 0;
if (event != NETDEV_DOWN && event != NETDEV_UP) if (event != NETDEV_DOWN && event != NETDEV_UP)
return 0; return 0;
...@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, ...@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
(!netif_is_bond_master(real_dev) && (!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1]))) (real_dev == iboe->netdevs[port - 1])))
update_gid_table(ibdev, port, gid, update_gid_table(ibdev, port, gid,
event == NETDEV_DOWN); event == NETDEV_DOWN, 0);
spin_unlock(&iboe->lock); spin_unlock(&iboe->lock);
return 0; return 0;
...@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, ...@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
rdma_vlan_dev_real_dev(dev) : dev; rdma_vlan_dev_real_dev(dev) : dev;
iboe = &ibdev->iboe; iboe = &ibdev->iboe;
spin_lock(&iboe->lock);
for (port = 1; port <= MLX4_MAX_PORTS; ++port) for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) && if ((netif_is_bond_master(real_dev) &&
...@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, ...@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
(real_dev == iboe->netdevs[port - 1]))) (real_dev == iboe->netdevs[port - 1])))
break; break;
spin_unlock(&iboe->lock);
if ((port == 0) || (port > MLX4_MAX_PORTS)) if ((port == 0) || (port > MLX4_MAX_PORTS))
return 0; return 0;
else else
...@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, ...@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
/*ifa->ifa_address;*/ /*ifa->ifa_address;*/
ipv6_addr_set_v4mapped(ifa->ifa_address, ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid); (struct in6_addr *)&gid);
update_gid_table(ibdev, port, &gid, 0); update_gid_table(ibdev, port, &gid, 0, 0);
} }
endfor_ifa(in_dev); endfor_ifa(in_dev);
in_dev_put(in_dev); in_dev_put(in_dev);
...@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, ...@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
read_lock_bh(&in6_dev->lock); read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr; pgid = (union ib_gid *)&ifp->addr;
update_gid_table(ibdev, port, pgid, 0); update_gid_table(ibdev, port, pgid, 0, 0);
} }
read_unlock_bh(&in6_dev->lock); read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev); in6_dev_put(in6_dev);
...@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, ...@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
#endif #endif
} }
static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
struct net_device *dev, u8 port)
{
union ib_gid gid;
mlx4_make_default_gid(dev, &gid);
update_gid_table(ibdev, port, &gid, 0, 1);
}
static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
{ {
struct net_device *dev; struct net_device *dev;
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
int i;
if (reset_gid_table(ibdev)) for (i = 1; i <= ibdev->num_ports; ++i)
return -1; if (reset_gid_table(ibdev, i))
return -1;
read_lock(&dev_base_lock); read_lock(&dev_base_lock);
spin_lock(&iboe->lock);
for_each_netdev(&init_net, dev) { for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev); u8 port = mlx4_ib_get_dev_port(dev, ibdev);
...@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) ...@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
mlx4_ib_get_dev_addr(dev, ibdev, port); mlx4_ib_get_dev_addr(dev, ibdev, port);
} }
spin_unlock(&iboe->lock);
read_unlock(&dev_base_lock); read_unlock(&dev_base_lock);
return 0; return 0;
...@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) ...@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
spin_lock(&iboe->lock); spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) { mlx4_foreach_ib_transport_port(port, ibdev->dev) {
enum ib_port_state port_state = IB_PORT_NOP;
struct net_device *old_master = iboe->masters[port - 1]; struct net_device *old_master = iboe->masters[port - 1];
struct net_device *curr_netdev;
struct net_device *curr_master; struct net_device *curr_master;
iboe->netdevs[port - 1] = iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
if (iboe->netdevs[port - 1])
mlx4_ib_set_default_gid(ibdev,
iboe->netdevs[port - 1], port);
curr_netdev = iboe->netdevs[port - 1];
if (iboe->netdevs[port - 1] && if (iboe->netdevs[port - 1] &&
netif_is_bond_slave(iboe->netdevs[port - 1])) { netif_is_bond_slave(iboe->netdevs[port - 1])) {
rtnl_lock();
iboe->masters[port - 1] = netdev_master_upper_dev_get( iboe->masters[port - 1] = netdev_master_upper_dev_get(
iboe->netdevs[port - 1]); iboe->netdevs[port - 1]);
rtnl_unlock(); } else {
iboe->masters[port - 1] = NULL;
} }
curr_master = iboe->masters[port - 1]; curr_master = iboe->masters[port - 1];
if (curr_netdev) {
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
} else {
reset_gid_table(ibdev, port);
}
/* if using bonding/team and a slave port is down, we don't the bond IP
* based gids in the table since flows that select port by gid may get
* the down port.
*/
if (curr_master && (port_state == IB_PORT_DOWN)) {
reset_gid_table(ibdev, port);
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
}
/* if bonding is used it is possible that we add it to masters /* if bonding is used it is possible that we add it to masters
only after IP address is assigned to the net bonding * only after IP address is assigned to the net bonding
interface */ * interface.
if (curr_master && (old_master != curr_master)) */
if (curr_master && (old_master != curr_master)) {
reset_gid_table(ibdev, port);
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
mlx4_ib_get_dev_addr(curr_master, ibdev, port); mlx4_ib_get_dev_addr(curr_master, ibdev, port);
}
if (!curr_master && (old_master != curr_master)) {
reset_gid_table(ibdev, port);
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
}
} }
spin_unlock(&iboe->lock); spin_unlock(&iboe->lock);
...@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int i, j; int i, j;
int err; int err;
struct mlx4_ib_iboe *iboe; struct mlx4_ib_iboe *iboe;
int ib_num_ports = 0;
pr_info_once("%s", mlx4_ib_version); pr_info_once("%s", mlx4_ib_version);
...@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1; ibdev->counters[i] = -1;
} }
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
spin_lock_init(&ibdev->sm_lock); spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex); mutex_init(&ibdev->cap_mask_mutex);
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
ib_num_ports) {
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN, MLX4_IB_UC_STEER_QPN_ALIGN,
...@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
} }
} }
#endif #endif
for (i = 1 ; i <= ibdev->num_ports ; ++i)
reset_gid_table(ibdev, i);
rtnl_lock();
mlx4_ib_scan_netdevs(ibdev); mlx4_ib_scan_netdevs(ibdev);
rtnl_unlock();
mlx4_ib_init_gid_table(ibdev); mlx4_ib_init_gid_table(ibdev);
} }
......
config MLX5_INFINIBAND config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support" tristate "Mellanox Connect-IB HCA support"
depends on NETDEVICES && ETHERNET && PCI && X86 depends on NETDEVICES && ETHERNET && PCI
select NET_VENDOR_MELLANOX select NET_VENDOR_MELLANOX
select MLX5_CORE select MLX5_CORE
---help--- ---help---
......
...@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, ...@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_RC_RNR_NAK_GEN;
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
flags = dev->mdev.caps.flags; flags = dev->mdev.caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
...@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_alloc_ucontext_req req; struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp; struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context; struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari; struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars; struct mlx5_uar *uars;
int gross_uuars; int gross_uuars;
int num_uars; int num_uars;
int ver;
int uuarn; int uuarn;
int err; int err;
int i; int i;
int reqlen;
if (!dev->ib_active) if (!dev->ib_active)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
err = ib_copy_from_udata(&req, udata, sizeof(req)); memset(&req, 0, sizeof(req));
reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
ver = 0;
else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
ver = 2;
else
return ERR_PTR(-EINVAL);
err = ib_copy_from_udata(&req, udata, reqlen);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
if (req.flags || req.reserved)
return ERR_PTR(-EINVAL);
if (req.total_num_uuars > MLX5_MAX_UUARS) if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err) if (err)
goto out_uars; goto out_uars;
uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars; uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars; uuari->uars = uars;
uuari->num_uars = num_uars; uuari->num_uars = num_uars;
......
...@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type) ...@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC: case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) + size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg); sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg);
break; break;
case IB_QPT_UD: case IB_QPT_UD:
...@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari, ...@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
break; break;
case MLX5_IB_LATENCY_CLASS_MEDIUM: case MLX5_IB_LATENCY_CLASS_MEDIUM:
uuarn = alloc_med_class_uuar(uuari); if (uuari->ver < 2)
uuarn = -ENOMEM;
else
uuarn = alloc_med_class_uuar(uuari);
break; break;
case MLX5_IB_LATENCY_CLASS_HIGH: case MLX5_IB_LATENCY_CLASS_HIGH:
uuarn = alloc_high_class_uuar(uuari); if (uuari->ver < 2)
uuarn = -ENOMEM;
else
uuarn = alloc_high_class_uuar(uuari);
break; break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH: case MLX5_IB_LATENCY_CLASS_FAST_PATH:
...@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err; int err;
uuari = &dev->mdev.priv.uuari; uuari = &dev->mdev.priv.uuari;
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) if (init_attr->create_flags)
qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
......
...@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req { ...@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
__u32 num_low_latency_uuars; __u32 num_low_latency_uuars;
}; };
struct mlx5_ib_alloc_ucontext_req_v2 {
__u32 total_num_uuars;
__u32 num_low_latency_uuars;
__u32 flags;
__u32 reserved;
};
struct mlx5_ib_alloc_ucontext_resp { struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size; __u32 qp_tab_size;
__u32 bf_reg_size; __u32 bf_reg_size;
......
...@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) ...@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */ /* Initialize network devices */
if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) netdev = nes_netdev_init(nesdev, mmio_regs);
if (netdev == NULL) {
ret = -ENOMEM;
goto bail7; goto bail7;
}
/* Register network device */ /* Register network device */
ret = register_netdev(netdev); ret = register_netdev(netdev);
......
...@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, ...@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
if (is_vlan) if (is_vlan)
netdev = vlan_dev_real_dev(netdev); netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
......
...@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev, ...@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags = props->port_cap_flags =
IB_PORT_CM_SUP | IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID; props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1; props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0; props->bad_pkey_cntr = 0;
...@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, ...@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT; OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH; qp_attr->ah_attr.ah_flags = IB_AH_GRH;
......
...@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) ...@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL); qib_write_kreg(dd, kr_scratch, 0ULL);
/* ensure previous Tx parameters are not still forced */
qib_write_kreg_port(ppd, krp_tx_deemph_override,
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
reset_tx_deemphasis_override));
if (qib_compat_ddr_negotiate) { if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
......
...@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, ...@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
{ {
enum usnic_transport_type trans_type = qp_flow->trans_type; enum usnic_transport_type trans_type = qp_flow->trans_type;
int err; int err;
uint16_t port_num = 0;
switch (trans_type) { switch (trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM: case USNIC_TRANSPORT_ROCE_CUSTOM:
...@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, ...@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
case USNIC_TRANSPORT_IPV4_UDP: case USNIC_TRANSPORT_IPV4_UDP:
err = usnic_transport_sock_get_addr(qp_flow->udp.sock, err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
NULL, NULL, NULL, NULL,
(uint16_t *) id); &port_num);
if (err) if (err)
return err; return err;
/*
* Copy port_num to stack first and then to *id,
* so that the short to int cast works for little
* and big endian systems.
*/
*id = port_num;
break; break;
default: default:
usnic_err("Unsupported transport %u\n", trans_type); usnic_err("Unsupported transport %u\n", trans_type);
......
...@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, ...@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE); ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc); kmem_cache_free(ig.desc_cache, tx_desc);
tx_desc = NULL;
} }
atomic_dec(&ib_conn->post_send_buf_count); atomic_dec(&ib_conn->post_send_buf_count);
if (tx_desc->type == ISCSI_TX_CONTROL) { if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */ /* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc - task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task)); sizeof(struct iscsi_task));
......
...@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) ...@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
/* getting here when the state is UP means that the conn is being * /* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */ * terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
ISER_CONN_TERMINATING)) ISER_CONN_TERMINATING)){
iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, if (ib_conn->iser_conn)
ISCSI_ERR_CONN_FAILED); iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
else
iser_err("iscsi_iser connection isn't bound\n");
}
/* Complete the termination process if no posts are pending */ /* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 && if (ib_conn->post_recv_buf_count == 0 &&
......
...@@ -4,5 +4,5 @@ ...@@ -4,5 +4,5 @@
config MLX5_CORE config MLX5_CORE
tristate tristate
depends on PCI && X86 depends on PCI
default n default n
...@@ -38,8 +38,10 @@ ...@@ -38,8 +38,10 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/spinlock_types.h> #include <linux/spinlock_types.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h> #include <linux/mlx5/doorbell.h>
...@@ -227,6 +229,7 @@ struct mlx5_uuar_info { ...@@ -227,6 +229,7 @@ struct mlx5_uuar_info {
* protect uuar allocation data structs * protect uuar allocation data structs
*/ */
struct mutex lock; struct mutex lock;
u32 ver;
}; };
struct mlx5_bf { struct mlx5_bf {
......
...@@ -226,7 +226,8 @@ enum ib_port_cap_flags { ...@@ -226,7 +226,8 @@ enum ib_port_cap_flags {
IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
IB_PORT_BOOT_MGMT_SUP = 1 << 23, IB_PORT_BOOT_MGMT_SUP = 1 << 23,
IB_PORT_LINK_LATENCY_SUP = 1 << 24, IB_PORT_LINK_LATENCY_SUP = 1 << 24,
IB_PORT_CLIENT_REG_SUP = 1 << 25 IB_PORT_CLIENT_REG_SUP = 1 << 25,
IB_PORT_IP_BASED_GIDS = 1 << 26
}; };
enum ib_port_width { enum ib_port_width {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册