提交 8f399921 编写于 作者: R Roland Dreier

Merge branches 'cma', 'cxgb4', 'flowsteer', 'ipoib', 'misc', 'mlx4', 'mlx5',...

Merge branches 'cma', 'cxgb4', 'flowsteer', 'ipoib', 'misc', 'mlx4', 'mlx5', 'ocrdma', 'qib', 'srp' and 'usnic' into for-next
all: rport_state_diagram.svg rport_state_diagram.png
rport_state_diagram.svg: rport_state_diagram.dot
dot -Tsvg -o $@ $<
rport_state_diagram.png: rport_state_diagram.dot
dot -Tpng -o $@ $<
digraph srp_initiator {
node [shape = doublecircle]; running lost;
node [shape = circle];
{
rank = min;
running_rta [ label = "running;\nreconnect\ntimer\nactive" ];
};
running [ label = "running;\nreconnect\ntimer\nstopped" ];
blocked;
failfast [ label = "fail I/O\nfast" ];
lost;
running -> running_rta [ label = "fast_io_fail_tmo = off and\ndev_loss_tmo = off;\nsrp_start_tl_fail_timers()" ];
running_rta -> running [ label = "fast_io_fail_tmo = off and\ndev_loss_tmo = off;\nreconnecting succeeded" ];
running -> blocked [ label = "fast_io_fail_tmo >= 0 or\ndev_loss_tmo >= 0;\nsrp_start_tl_fail_timers()" ];
running -> failfast [ label = "fast_io_fail_tmo = off and\ndev_loss_tmo = off;\nreconnecting failed\n" ];
blocked -> failfast [ label = "fast_io_fail_tmo\nexpired or\nreconnecting\nfailed" ];
blocked -> lost [ label = "dev_loss_tmo\nexpired or\nsrp_stop_rport_timers()" ];
failfast -> lost [ label = "dev_loss_tmo\nexpired or\nsrp_stop_rport_timers()" ];
blocked -> running [ label = "reconnecting\nsucceeded" ];
failfast -> failfast [ label = "reconnecting\nfailed" ];
failfast -> running [ label = "reconnecting\nsucceeded" ];
running -> lost [ label = "srp_stop_rport_timers()" ];
running_rta -> lost [ label = "srp_stop_rport_timers()" ];
}
......@@ -2158,6 +2158,11 @@ M: Nishank Trivedi <nistrive@cisco.com>
S: Supported
F: drivers/net/ethernet/cisco/enic/
CISCO VIC LOW LATENCY NIC DRIVER
M: Upinder Malhi <umalhi@cisco.com>
S: Supported
F: drivers/infiniband/hw/usnic
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Hartley Sweeten <hsweeten@visionengravers.com>
L: netdev@vger.kernel.org
......@@ -7468,7 +7473,7 @@ S: Maintained
F: drivers/scsi/sr*
SCSI RDMA PROTOCOL (SRP) INITIATOR
M: David Dillow <dillowda@ornl.gov>
M: Bart Van Assche <bvanassche@acm.org>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org
......
......@@ -53,6 +53,7 @@ source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
......
......@@ -10,6 +10,7 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += hw/usnic/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
......
......@@ -334,7 +334,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
unsigned long flags;
int ret;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
/*
......@@ -350,7 +349,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* destroy the listening endpoint */
ret = cm_id->device->iwcm->destroy_listen(cm_id);
cm_id->device->iwcm->destroy_listen(cm_id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_ESTABLISHED:
......
......@@ -613,6 +613,7 @@ static ssize_t show_node_type(struct device *device,
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type);
case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
......
......@@ -116,6 +116,8 @@ rdma_node_get_transport(enum rdma_node_type node_type)
return RDMA_TRANSPORT_IWARP;
case RDMA_NODE_USNIC:
return RDMA_TRANSPORT_USNIC;
case RDMA_NODE_USNIC_UDP:
return RDMA_TRANSPORT_USNIC_UDP;
default:
BUG();
return 0;
......@@ -133,6 +135,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_
return IB_LINK_LAYER_INFINIBAND;
case RDMA_TRANSPORT_IWARP:
case RDMA_TRANSPORT_USNIC:
case RDMA_TRANSPORT_USNIC_UDP:
return IB_LINK_LAYER_ETHERNET;
default:
return IB_LINK_LAYER_UNSPECIFIED;
......
......@@ -169,7 +169,8 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
* We should never get here, as the adapter should
* never send us a reply that we're not expecting.
*/
vq_repbuf_free(c2dev, host_msg);
if (reply_msg != NULL)
vq_repbuf_free(c2dev, host_msg);
pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
return;
}
......
......@@ -76,7 +76,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
(wait ? FW_WR_COMPL(1) : 0));
req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
......
......@@ -55,6 +55,7 @@
#define DRV_RELDATE "April 4, 2008"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
......@@ -92,21 +93,27 @@ static union ib_gid zgid;
static int check_flow_steering_support(struct mlx4_dev *dev)
{
int eth_num_ports = 0;
int ib_num_ports = 0;
int i;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
if (ib_num_ports || mlx4_is_mfunc(dev)) {
pr_warn("Device managed flow steering is unavailable "
"for IB ports or in multifunction env.\n");
return 0;
int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
if (dmfs) {
int i;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
eth_num_ports++;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
dmfs &= (!ib_num_ports ||
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
(!eth_num_ports ||
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
if (ib_num_ports && mlx4_is_mfunc(dev)) {
pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
dmfs = 0;
}
return 1;
}
return 0;
return dmfs;
}
static int mlx4_ib_query_device(struct ib_device *ibdev,
......@@ -165,7 +172,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
if (check_flow_steering_support(dev->dev))
if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
......@@ -819,6 +826,7 @@ struct mlx4_ib_steering {
};
static int parse_flow_attr(struct mlx4_dev *dev,
u32 qp_num,
union ib_flow_spec *ib_spec,
struct _rule_hw *mlx4_spec)
{
......@@ -834,6 +842,14 @@ static int parse_flow_attr(struct mlx4_dev *dev,
mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
break;
case IB_FLOW_SPEC_IB:
type = MLX4_NET_TRANS_RULE_ID_IB;
mlx4_spec->ib.l3_qpn =
cpu_to_be32(qp_num);
mlx4_spec->ib.qpn_mask =
cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
break;
case IB_FLOW_SPEC_IPV4:
type = MLX4_NET_TRANS_RULE_ID_IPV4;
......@@ -865,6 +881,115 @@ static int parse_flow_attr(struct mlx4_dev *dev,
return mlx4_hw_rule_sz(dev, type);
}
struct default_rules {
__u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
__u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
__u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
__u8 link_layer;
};
static const struct default_rules default_table[] = {
{
.mandatory_fields = {IB_FLOW_SPEC_IPV4},
.mandatory_not_fields = {IB_FLOW_SPEC_ETH},
.rules_create_list = {IB_FLOW_SPEC_IB},
.link_layer = IB_LINK_LAYER_INFINIBAND
}
};
static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
struct ib_flow_attr *flow_attr)
{
int i, j, k;
void *ib_flow;
const struct default_rules *pdefault_rules = default_table;
u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
pdefault_rules++) {
__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
memset(&field_types, 0, sizeof(field_types));
if (link_layer != pdefault_rules->link_layer)
continue;
ib_flow = flow_attr + 1;
/* we assume the specs are sorted */
for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
j < flow_attr->num_of_specs; k++) {
union ib_flow_spec *current_flow =
(union ib_flow_spec *)ib_flow;
/* same layer but different type */
if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
(pdefault_rules->mandatory_fields[k] &
IB_FLOW_SPEC_LAYER_MASK)) &&
(current_flow->type !=
pdefault_rules->mandatory_fields[k]))
goto out;
/* same layer, try match next one */
if (current_flow->type ==
pdefault_rules->mandatory_fields[k]) {
j++;
ib_flow +=
((union ib_flow_spec *)ib_flow)->size;
}
}
ib_flow = flow_attr + 1;
for (j = 0; j < flow_attr->num_of_specs;
j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
/* same layer and same type */
if (((union ib_flow_spec *)ib_flow)->type ==
pdefault_rules->mandatory_not_fields[k])
goto out;
return i;
}
out:
return -1;
}
static int __mlx4_ib_create_default_rules(
struct mlx4_ib_dev *mdev,
struct ib_qp *qp,
const struct default_rules *pdefault_rules,
struct _rule_hw *mlx4_spec) {
int size = 0;
int i;
for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
sizeof(pdefault_rules->rules_create_list[0]); i++) {
int ret;
union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) {
case 0:
/* no rule */
continue;
case IB_FLOW_SPEC_IB:
ib_spec.type = IB_FLOW_SPEC_IB;
ib_spec.size = sizeof(struct ib_flow_spec_ib);
break;
default:
/* invalid rule */
return -EINVAL;
}
/* We must put empty rule, qpn is being ignored */
ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
mlx4_spec);
if (ret < 0) {
pr_info("invalid parsing\n");
return -EINVAL;
}
mlx4_spec = (void *)mlx4_spec + ret;
size += ret;
}
return size;
}
static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
int domain,
enum mlx4_net_trans_promisc_mode flow_type,
......@@ -876,6 +1001,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_ib_dev *mdev = to_mdev(qp->device);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
int default_flow;
static const u16 __mlx4_domain[] = {
[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
......@@ -910,8 +1036,21 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
ib_flow = flow_attr + 1;
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
/* Add default flows */
default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
if (default_flow >= 0) {
ret = __mlx4_ib_create_default_rules(
mdev, qp, default_table + default_flow,
mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return -EINVAL;
}
size += ret;
}
for (i = 0; i < flow_attr->num_of_specs; i++) {
ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size);
ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return -EINVAL;
......@@ -1682,6 +1821,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
if (check_flow_steering_support(dev)) {
ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
......@@ -1710,8 +1850,35 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
&ibdev->steer_qpn_base);
if (err)
goto err_counter;
ibdev->ib_uc_qpns_bitmap =
kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
sizeof(long),
GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap) {
dev_err(&dev->pdev->dev, "bit map alloc failed\n");
goto err_steer_qp_release;
}
bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_base +
ibdev->steer_qpn_count - 1);
if (err)
goto err_steer_free_bitmap;
}
if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_counter;
goto err_steer_free_bitmap;
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
......@@ -1762,6 +1929,13 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
err_reg:
ib_unregister_device(&ibdev->ib_dev);
err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
err_counter:
for (; i; --i)
if (ibdev->counters[i - 1] != -1)
......@@ -1782,6 +1956,69 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
return NULL;
}
int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
{
int offset;
WARN_ON(!dev->ib_uc_qpns_bitmap);
offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
dev->steer_qpn_count,
get_count_order(count));
if (offset < 0)
return offset;
*qpn = dev->steer_qpn_base + offset;
return 0;
}
void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
{
if (!qpn ||
dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
return;
BUG_ON(qpn < dev->steer_qpn_base);
bitmap_release_region(dev->ib_uc_qpns_bitmap,
qpn - dev->steer_qpn_base,
get_count_order(count));
}
int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
int is_attach)
{
int err;
size_t flow_size;
struct ib_flow_attr *flow = NULL;
struct ib_flow_spec_ib *ib_spec;
if (is_attach) {
flow_size = sizeof(struct ib_flow_attr) +
sizeof(struct ib_flow_spec_ib);
flow = kzalloc(flow_size, GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->port = mqp->port;
flow->num_of_specs = 1;
flow->size = flow_size;
ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
ib_spec->type = IB_FLOW_SPEC_IB;
ib_spec->size = sizeof(struct ib_flow_spec_ib);
/* Add an empty rule for IB L2 */
memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
IB_FLOW_DOMAIN_NIC,
MLX4_FS_REGULAR,
&mqp->reg_id);
} else {
err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
}
kfree(flow);
return err;
}
static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
{
struct mlx4_ib_dev *ibdev = ibdev_ptr;
......@@ -1795,6 +2032,13 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
kfree(ibdev->ib_uc_qpns_bitmap);
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
if (ibdev->counters[p] != -1)
......
......@@ -68,6 +68,8 @@ enum {
/*module param to indicate if SM assigns the alias_GUID*/
extern int mlx4_ib_sm_guid_assign;
#define MLX4_IB_UC_STEER_QPN_ALIGN 1
#define MLX4_IB_UC_MAX_NUM_QPS 256
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
......@@ -153,6 +155,7 @@ struct mlx4_ib_wq {
enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
MLX4_IB_SRIOV_SQP = 1 << 31,
};
......@@ -270,6 +273,7 @@ struct mlx4_ib_qp {
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
u64 reg_id;
};
......@@ -494,6 +498,10 @@ struct mlx4_ib_dev {
struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
struct pkey_mgt pkeys;
unsigned long *ib_uc_qpns_bitmap;
int steer_qpn_count;
int steer_qpn_base;
int steering_support;
};
struct ib_event_work {
......@@ -752,5 +760,9 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
__be64 mlx4_ib_gen_node_guid(void);
int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
int is_attach);
#endif /* MLX4_IB_H */
......@@ -716,6 +716,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
qp->flags |= MLX4_IB_QP_LSO;
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
if (dev->steering_support ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
qp->flags |= MLX4_IB_QP_NETIF;
else
goto err;
}
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
if (err)
goto err;
......@@ -765,7 +773,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->qp_type == IB_QPT_RAW_PACKET)
err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
else
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
if (qp->flags & MLX4_IB_QP_NETIF)
err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
else
err = mlx4_qp_reserve_range(dev->dev, 1, 1,
&qpn);
if (err)
goto err_proxy;
}
......@@ -790,8 +802,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return 0;
err_qpn:
if (!sqpn)
mlx4_qp_release_range(dev->dev, qpn, 1);
if (!sqpn) {
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qpn, 1);
else
mlx4_qp_release_range(dev->dev, qpn, 1);
}
err_proxy:
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
free_proxy_bufs(pd->device, qp);
......@@ -932,8 +948,12 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_qp_free(dev->dev, &qp->mqp);
if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp))
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
else
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
}
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
......@@ -987,9 +1007,16 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
*/
if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP))
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF))
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
if (init_attr->qp_type != IB_QPT_UD)
return ERR_PTR(-EINVAL);
}
if (init_attr->create_flags &&
(udata ||
((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) &&
......@@ -1235,6 +1262,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
struct mlx4_qp_context *context;
enum mlx4_qp_optpar optpar = 0;
int sqd_event;
int steer_qp = 0;
int err = -EINVAL;
context = kzalloc(sizeof *context, GFP_KERNEL);
......@@ -1319,6 +1347,11 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
} else
context->pri_path.counter_index = 0xff;
if (qp->flags & MLX4_IB_QP_NETIF) {
mlx4_ib_steer_qp_reg(dev, qp, 1);
steer_qp = 1;
}
}
if (attr_mask & IB_QP_PKEY_INDEX) {
......@@ -1547,9 +1580,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->sq_next_wqe = 0;
if (qp->rq.wqe_cnt)
*qp->db.db = 0;
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_reg(dev, qp, 0);
}
out:
if (err && steer_qp)
mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
return err;
}
......@@ -2762,6 +2800,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
if (qp->flags & MLX4_IB_QP_LSO)
qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
if (qp->flags & MLX4_IB_QP_NETIF)
qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
qp_init_attr->sq_sig_type =
qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
......
......@@ -582,8 +582,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->pkey_group.attrs =
alloc_group_attrs(show_port_pkey, store_port_pkey,
dev->dev->caps.pkey_table_len[port_num]);
if (!p->pkey_group.attrs)
if (!p->pkey_group.attrs) {
ret = -ENOMEM;
goto err_alloc;
}
ret = sysfs_create_group(&p->kobj, &p->pkey_group);
if (ret)
......@@ -591,8 +593,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->gid_group.name = "gid_idx";
p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1);
if (!p->gid_group.attrs)
if (!p->gid_group.attrs) {
ret = -ENOMEM;
goto err_free_pkey;
}
ret = sysfs_create_group(&p->kobj, &p->gid_group);
if (ret)
......
......@@ -73,14 +73,24 @@ static void *get_cqe(struct mlx5_ib_cq *cq, int n)
return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
}
static u8 sw_ownership_bit(int n, int nent)
{
return (n & nent) ? 1 : 0;
}
static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
{
void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
struct mlx5_cqe64 *cqe64;
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
!((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
return cqe;
} else {
return NULL;
}
}
static void *next_cqe_sw(struct mlx5_ib_cq *cq)
......@@ -351,6 +361,11 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
qp->sq.last_poll = tail;
}
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_buf_free(&dev->mdev, &buf->buf);
}
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
......@@ -366,6 +381,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
void *cqe;
int idx;
repoll:
cqe = next_cqe_sw(cq);
if (!cqe)
return -EAGAIN;
......@@ -379,7 +395,18 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
*/
rmb();
/* TBD: resize CQ */
opcode = cqe64->op_own >> 4;
if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
if (likely(cq->resize_buf)) {
free_cq_buf(dev, &cq->buf);
cq->buf = *cq->resize_buf;
kfree(cq->resize_buf);
cq->resize_buf = NULL;
goto repoll;
} else {
mlx5_ib_warn(dev, "unexpected resize cqe\n");
}
}
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
......@@ -398,7 +425,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
}
wc->qp = &(*cur_qp)->ibqp;
opcode = cqe64->op_own >> 4;
switch (opcode) {
case MLX5_CQE_REQ:
wq = &(*cur_qp)->sq;
......@@ -503,15 +529,11 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
return err;
buf->cqe_size = cqe_size;
buf->nent = nent;
return 0;
}
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_buf_free(&dev->mdev, &buf->buf);
}
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, struct mlx5_create_cq_mbox_in **cqb,
......@@ -576,16 +598,16 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
ib_umem_release(cq->buf.umem);
}
static void init_cq_buf(struct mlx5_ib_cq *cq, int nent)
static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < nent; i++) {
cqe = get_cqe(cq, i);
cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64;
cqe64->op_own = 0xf1;
for (i = 0; i < buf->nent; i++) {
cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
}
......@@ -610,7 +632,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto err_db;
init_cq_buf(cq, entries);
init_cq_buf(cq, &cq->buf);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
......@@ -818,12 +840,266 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
return -ENOSYS;
struct mlx5_modify_cq_mbox_in *in;
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
int err;
u32 fsel;
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
return -ENOSYS;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
in->cqn = cpu_to_be32(mcq->mcq.cqn);
fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
in->ctx.cq_period = cpu_to_be16(cq_period);
in->ctx.cq_max_count = cpu_to_be16(cq_count);
in->field_select = cpu_to_be32(fsel);
err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
kfree(in);
if (err)
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
return err;
}
static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, struct ib_udata *udata, int *npas,
int *page_shift, int *cqe_size)
{
struct mlx5_ib_resize_cq ucmd;
struct ib_umem *umem;
int err;
int npages;
struct ib_ucontext *context = cq->buf.umem->context;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (err)
return err;
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
}
mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
npas, NULL);
cq->resize_umem = umem;
*cqe_size = ucmd.cqe_size;
return 0;
}
static void un_resize_user(struct mlx5_ib_cq *cq)
{
ib_umem_release(cq->resize_umem);
}
static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size)
{
int err;
cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
if (!cq->resize_buf)
return -ENOMEM;
err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
if (err)
goto ex;
init_cq_buf(cq, cq->resize_buf);
return 0;
ex:
kfree(cq->resize_buf);
return err;
}
static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
{
free_cq_buf(dev, cq->resize_buf);
cq->resize_buf = NULL;
}
static int copy_resize_cqes(struct mlx5_ib_cq *cq)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_cqe64 *scqe64;
struct mlx5_cqe64 *dcqe64;
void *start_cqe;
void *scqe;
void *dcqe;
int ssize;
int dsize;
int i;
u8 sw_own;
ssize = cq->buf.cqe_size;
dsize = cq->resize_buf->cqe_size;
if (ssize != dsize) {
mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
return -EINVAL;
}
i = cq->mcq.cons_index;
scqe = get_sw_cqe(cq, i);
scqe64 = ssize == 64 ? scqe : scqe + 64;
start_cqe = scqe;
if (!scqe) {
mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
return -EINVAL;
}
while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
dcqe = get_cqe_from_buf(cq->resize_buf,
(i + 1) & (cq->resize_buf->nent),
dsize);
dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
memcpy(dcqe, scqe, dsize);
dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
++i;
scqe = get_sw_cqe(cq, i);
scqe64 = ssize == 64 ? scqe : scqe + 64;
if (!scqe) {
mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
return -EINVAL;
}
if (scqe == start_cqe) {
pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
cq->mcq.cqn);
return -ENOMEM;
}
}
++cq->mcq.cons_index;
return 0;
}
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
return -ENOSYS;
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_modify_cq_mbox_in *in;
int err;
int npas;
int page_shift;
int inlen;
int uninitialized_var(cqe_size);
unsigned long flags;
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
pr_info("Firmware does not support resize CQ\n");
return -ENOSYS;
}
if (entries < 1)
return -EINVAL;
entries = roundup_pow_of_two(entries + 1);
if (entries > dev->mdev.caps.max_cqes + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
return 0;
mutex_lock(&cq->resize_mutex);
if (udata) {
err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
&cqe_size);
} else {
cqe_size = 64;
err = resize_kernel(dev, cq, entries, cqe_size);
if (!err) {
npas = cq->resize_buf->buf.npages;
page_shift = cq->resize_buf->buf.page_shift;
}
}
if (err)
goto ex;
inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto ex_resize;
}
if (udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
in->pas, 0);
else
mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
MLX5_MODIFY_CQ_MASK_PG_OFFSET |
MLX5_MODIFY_CQ_MASK_PG_SIZE);
in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
in->ctx.page_offset = 0;
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
in->cqn = cpu_to_be32(cq->mcq.cqn);
err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
if (err)
goto ex_alloc;
if (udata) {
cq->ibcq.cqe = entries - 1;
ib_umem_release(cq->buf.umem);
cq->buf.umem = cq->resize_umem;
cq->resize_umem = NULL;
} else {
struct mlx5_ib_cq_buf tbuf;
int resized = 0;
spin_lock_irqsave(&cq->lock, flags);
if (cq->resize_buf) {
err = copy_resize_cqes(cq);
if (!err) {
tbuf = cq->buf;
cq->buf = *cq->resize_buf;
kfree(cq->resize_buf);
cq->resize_buf = NULL;
resized = 1;
}
}
cq->ibcq.cqe = entries - 1;
spin_unlock_irqrestore(&cq->lock, flags);
if (resized)
free_cq_buf(dev, &tbuf);
}
mutex_unlock(&cq->resize_mutex);
mlx5_vfree(in);
return 0;
ex_alloc:
mlx5_vfree(in);
ex_resize:
if (udata)
un_resize_user(cq);
else
un_resize_kernel(dev, cq);
ex:
mutex_unlock(&cq->resize_mutex);
return err;
}
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
......
......@@ -541,6 +541,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
int gross_uuars;
int num_uars;
int uuarn;
int err;
......@@ -559,11 +560,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (req.total_num_uuars == 0)
return ERR_PTR(-EINVAL);
req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE);
req.total_num_uuars = ALIGN(req.total_num_uuars,
MLX5_NON_FP_BF_REGS_PER_PAGE);
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
return ERR_PTR(-EINVAL);
num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE;
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp;
resp.bf_reg_size = dev->mdev.caps.bf_reg_size;
resp.cache_line_size = L1_CACHE_BYTES;
......@@ -585,7 +588,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_ctx;
}
uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars),
uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
sizeof(*uuari->bitmap),
GFP_KERNEL);
if (!uuari->bitmap) {
......@@ -595,13 +598,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
/*
* clear all fast path uuars
*/
for (i = 0; i < req.total_num_uuars; i++) {
for (i = 0; i < gross_uuars; i++) {
uuarn = i & 3;
if (uuarn == 2 || uuarn == 3)
set_bit(i, uuari->bitmap);
}
uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL);
uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
if (!uuari->count) {
err = -ENOMEM;
goto out_bitmap;
......
......@@ -195,6 +195,7 @@ struct mlx5_ib_cq_buf {
struct mlx5_buf buf;
struct ib_umem *umem;
int cqe_size;
int nent;
};
enum mlx5_ib_qp_flags {
......@@ -220,7 +221,7 @@ struct mlx5_ib_cq {
/* protect resize cq
*/
struct mutex resize_mutex;
struct mlx5_ib_cq_resize *resize_buf;
struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem;
int cqe_size;
};
......@@ -264,7 +265,6 @@ struct mlx5_ib_mr {
enum ib_wc_status status;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
unsigned long start;
};
struct mlx5_ib_fast_reg_page_list {
......
......@@ -146,7 +146,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
mr->start = jiffies;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
sizeof(*in), reg_mr_callback,
mr, &mr->out);
......
......@@ -340,14 +340,57 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
static int first_med_uuar(void)
{
return 1;
}
static int next_uuar(int n)
{
n++;
while (((n % 4) & 2))
n++;
return n;
}
static int num_med_uuar(struct mlx5_uuar_info *uuari)
{
int n;
n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
uuari->num_low_latency_uuars - 1;
return n >= 0 ? n : 0;
}
static int max_uuari(struct mlx5_uuar_info *uuari)
{
return uuari->num_uars * 4;
}
static int first_hi_uuar(struct mlx5_uuar_info *uuari)
{
int med;
int i;
int t;
med = num_med_uuar(uuari);
for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
t++;
if (t == med)
return next_uuar(i);
}
return 0;
}
static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
{
int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
int start_uuar;
int i;
start_uuar = nuuars - uuari->num_low_latency_uuars;
for (i = start_uuar; i < nuuars; i++) {
for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
if (!test_bit(i, uuari->bitmap)) {
set_bit(i, uuari->bitmap);
uuari->count[i]++;
......@@ -360,19 +403,10 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
{
int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
int minidx = 1;
int uuarn;
int end;
int minidx = first_med_uuar();
int i;
end = nuuars - uuari->num_low_latency_uuars;
for (i = 1; i < end; i++) {
uuarn = i & 3;
if (uuarn == 2 || uuarn == 3)
continue;
for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
if (uuari->count[i] < uuari->count[minidx])
minidx = i;
}
......@@ -489,12 +523,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
{
struct mlx5_ib_ucontext *context;
struct mlx5_ib_create_qp ucmd;
int page_shift;
int page_shift = 0;
int uar_index;
int npages;
u32 offset;
u32 offset = 0;
int uuarn;
int ncont;
int ncont = 0;
int err;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
......@@ -510,11 +544,16 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
if (uuarn < 0) {
mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
mlx5_ib_dbg(dev, "reverting to high latency\n");
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
mlx5_ib_dbg(dev, "reverting to medium latency\n");
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
if (uuarn < 0) {
mlx5_ib_dbg(dev, "uuar allocation failed\n");
return uuarn;
mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
mlx5_ib_dbg(dev, "reverting to high latency\n");
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
if (uuarn < 0) {
mlx5_ib_warn(dev, "uuar allocation failed\n");
return uuarn;
}
}
}
......@@ -525,23 +564,29 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_uuar;
qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(qp->umem);
goto err_uuar;
if (ucmd.buf_addr && qp->buf_size) {
qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(qp->umem);
goto err_uuar;
}
} else {
qp->umem = NULL;
}
mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
&ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
if (qp->umem) {
mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
&ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
}
mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
}
mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
*in = mlx5_vzalloc(*inlen);
......@@ -549,7 +594,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = -ENOMEM;
goto err_umem;
}
mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
if (qp->umem)
mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
(*in)->ctx.log_pg_sz_remote_qpn =
cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
(*in)->ctx.params2 = cpu_to_be32(offset << 6);
......@@ -580,7 +626,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_vfree(*in);
err_umem:
ib_umem_release(qp->umem);
if (qp->umem)
ib_umem_release(qp->umem);
err_uuar:
free_uuar(&context->uuari, uuarn);
......@@ -593,7 +640,8 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &qp->db);
ib_umem_release(qp->umem);
if (qp->umem)
ib_umem_release(qp->umem);
free_uuar(&context->uuari, qp->uuarn);
}
......@@ -2212,6 +2260,10 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
/* Make sure doorbell record is visible to the HCA before
* we hit doorbell */
wmb();
if (bf->need_lock)
spin_lock(&bf->lock);
......
......@@ -93,6 +93,9 @@ struct mlx5_ib_create_cq_resp {
struct mlx5_ib_resize_cq {
__u64 buf_addr;
__u16 cqe_size;
__u16 reserved0;
__u32 reserved1;
};
struct mlx5_ib_create_srq {
......
......@@ -1354,8 +1354,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
neigh->ha, ntohl(rt->rt_gateway));
if (arpindex >= 0) {
if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
neigh->ha, ETH_ALEN)) {
if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
/* Mac address same as in nes_arp_table */
goto out;
}
......
......@@ -84,6 +84,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
ah->av->valid = cpu_to_le32(ah->av->valid);
return status;
}
......
......@@ -31,7 +31,7 @@
#define Bit(_b) (1 << (_b))
#define OCRDMA_GEN1_FAMILY 0xB
#define OCRDMA_GEN2_FAMILY 0x2
#define OCRDMA_GEN2_FAMILY 0x0F
#define OCRDMA_SUBSYS_ROCE 10
enum {
......@@ -1694,7 +1694,7 @@ struct ocrdma_grh {
u16 rsvd;
} __packed;
#define OCRDMA_AV_VALID Bit(0)
#define OCRDMA_AV_VALID Bit(7)
#define OCRDMA_AV_VLAN_VALID Bit(1)
struct ocrdma_av {
......
......@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
enum ib_qp_type sqptype, dqptype;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
IB_QPT_UD : sqp->ibqp.qp_type;
dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
IB_QPT_UD : qp->ibqp.qp_type;
if (dqptype != sqptype ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
......
config INFINIBAND_USNIC
tristate "Verbs support for Cisco VIC"
depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU
select ENIC
select NET_VENDOR_CISCO
select PCI_IOV
select INFINIBAND_USER_ACCESS
---help---
This is a low-level driver for Cisco's Virtual Interface
Cards (VICs), including the VIC 1240 and 1280 cards.
ccflags-y := -Idrivers/net/ethernet/cisco/enic
obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o
usnic_verbs-y=\
usnic_fwd.o \
usnic_transport.o \
usnic_uiom.o \
usnic_uiom_interval_tree.o \
usnic_vnic.o \
usnic_ib_main.o \
usnic_ib_qp_grp.o \
usnic_ib_sysfs.o \
usnic_ib_verbs.o \
usnic_debugfs.o \
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_H_
#define USNIC_H_
#define DRV_NAME "usnic_verbs"
#define PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC 0x00cf /* User space NIC */
#define DRV_VERSION "1.0.3"
#define DRV_RELDATE "December 19, 2013"
#endif /* USNIC_H_ */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_ABI_H
#define USNIC_ABI_H
/* ABI between userspace and kernel */
#define USNIC_UVERBS_ABI_VERSION 4
#define USNIC_QP_GRP_MAX_WQS 8
#define USNIC_QP_GRP_MAX_RQS 8
#define USNIC_QP_GRP_MAX_CQS 16
enum usnic_transport_type {
USNIC_TRANSPORT_UNKNOWN = 0,
USNIC_TRANSPORT_ROCE_CUSTOM = 1,
USNIC_TRANSPORT_IPV4_UDP = 2,
USNIC_TRANSPORT_MAX = 3,
};
struct usnic_transport_spec {
enum usnic_transport_type trans_type;
union {
struct {
uint16_t port_num;
} usnic_roce;
struct {
uint32_t sock_fd;
} udp;
};
};
struct usnic_ib_create_qp_cmd {
struct usnic_transport_spec spec;
};
/*TODO: Future - usnic_modify_qp needs to pass in generic filters */
struct usnic_ib_create_qp_resp {
u32 vfid;
u32 qp_grp_id;
u64 bar_bus_addr;
u32 bar_len;
/*
* WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
* expands the scope of ABI to many files.
*/
u32 wq_cnt;
u32 rq_cnt;
u32 cq_cnt;
u32 wq_idx[USNIC_QP_GRP_MAX_WQS];
u32 rq_idx[USNIC_QP_GRP_MAX_RQS];
u32 cq_idx[USNIC_QP_GRP_MAX_CQS];
u32 transport;
u32 reserved[9];
};
#endif /* USNIC_ABI_H */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_CMN_PKT_HDR_H
#define USNIC_CMN_PKT_HDR_H
#define USNIC_ROCE_ETHERTYPE (0x8915)
#define USNIC_ROCE_GRH_VER (8)
#define USNIC_PROTO_VER (1)
#define USNIC_ROCE_GRH_VER_SHIFT (4)
#endif /* USNIC_COMMON_PKT_HDR_H */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_CMN_UTIL_H
#define USNIC_CMN_UTIL_H
static inline void
usnic_mac_to_gid(const char *const mac, char *raw_gid)
{
raw_gid[0] = 0xfe;
raw_gid[1] = 0x80;
memset(&raw_gid[2], 0, 6);
raw_gid[8] = mac[0]^2;
raw_gid[9] = mac[1];
raw_gid[10] = mac[2];
raw_gid[11] = 0xff;
raw_gid[12] = 0xfe;
raw_gid[13] = mac[3];
raw_gid[14] = mac[4];
raw_gid[15] = mac[5];
}
static inline void
usnic_mac_ip_to_gid(const char *const mac, const __be32 inaddr, char *raw_gid)
{
raw_gid[0] = 0xfe;
raw_gid[1] = 0x80;
memset(&raw_gid[2], 0, 2);
memcpy(&raw_gid[4], &inaddr, 4);
raw_gid[8] = mac[0]^2;
raw_gid[9] = mac[1];
raw_gid[10] = mac[2];
raw_gid[11] = 0xff;
raw_gid[12] = 0xfe;
raw_gid[13] = mac[3];
raw_gid[14] = mac[4];
raw_gid[15] = mac[5];
}
static inline void
usnic_write_gid_if_id_from_mac(char *mac, char *raw_gid)
{
raw_gid[8] = mac[0]^2;
raw_gid[9] = mac[1];
raw_gid[10] = mac[2];
raw_gid[11] = 0xff;
raw_gid[12] = 0xfe;
raw_gid[13] = mac[3];
raw_gid[14] = mac[4];
raw_gid[15] = mac[5];
}
#endif /* USNIC_COMMON_UTIL_H */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include "usnic.h"
#include "usnic_log.h"
#include "usnic_debugfs.h"
#include "usnic_ib_qp_grp.h"
#include "usnic_transport.h"
static struct dentry *debugfs_root;
static struct dentry *flows_dentry;
static ssize_t usnic_debugfs_buildinfo_read(struct file *f, char __user *data,
size_t count, loff_t *ppos)
{
char buf[500];
int res;
if (*ppos > 0)
return 0;
res = scnprintf(buf, sizeof(buf),
"version: %s\n"
"build date: %s\n",
DRV_VERSION, DRV_RELDATE);
return simple_read_from_buffer(data, count, ppos, buf, res);
}
static const struct file_operations usnic_debugfs_buildinfo_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = usnic_debugfs_buildinfo_read
};
static ssize_t flowinfo_read(struct file *f, char __user *data,
size_t count, loff_t *ppos)
{
struct usnic_ib_qp_grp_flow *qp_flow;
int n;
int left;
char *ptr;
char buf[512];
qp_flow = f->private_data;
ptr = buf;
left = count;
if (*ppos > 0)
return 0;
spin_lock(&qp_flow->qp_grp->lock);
n = scnprintf(ptr, left,
"QP Grp ID: %d Transport: %s ",
qp_flow->qp_grp->grp_id,
usnic_transport_to_str(qp_flow->trans_type));
UPDATE_PTR_LEFT(n, ptr, left);
if (qp_flow->trans_type == USNIC_TRANSPORT_ROCE_CUSTOM) {
n = scnprintf(ptr, left, "Port_Num:%hu\n",
qp_flow->usnic_roce.port_num);
UPDATE_PTR_LEFT(n, ptr, left);
} else if (qp_flow->trans_type == USNIC_TRANSPORT_IPV4_UDP) {
n = usnic_transport_sock_to_str(ptr, left,
qp_flow->udp.sock);
UPDATE_PTR_LEFT(n, ptr, left);
n = scnprintf(ptr, left, "\n");
UPDATE_PTR_LEFT(n, ptr, left);
}
spin_unlock(&qp_flow->qp_grp->lock);
return simple_read_from_buffer(data, count, ppos, buf, ptr - buf);
}
static const struct file_operations flowinfo_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = flowinfo_read,
};
void usnic_debugfs_init(void)
{
debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
if (IS_ERR(debugfs_root)) {
usnic_err("Failed to create debugfs root dir, check if debugfs is enabled in kernel configuration\n");
goto out_clear_root;
}
flows_dentry = debugfs_create_dir("flows", debugfs_root);
if (IS_ERR_OR_NULL(flows_dentry)) {
usnic_err("Failed to create debugfs flow dir with err %ld\n",
PTR_ERR(flows_dentry));
goto out_free_root;
}
debugfs_create_file("build-info", S_IRUGO, debugfs_root,
NULL, &usnic_debugfs_buildinfo_ops);
return;
out_free_root:
debugfs_remove_recursive(debugfs_root);
out_clear_root:
debugfs_root = NULL;
}
void usnic_debugfs_exit(void)
{
if (!debugfs_root)
return;
debugfs_remove_recursive(debugfs_root);
debugfs_root = NULL;
}
void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
{
if (IS_ERR_OR_NULL(flows_dentry))
return;
scnprintf(qp_flow->dentry_name, sizeof(qp_flow->dentry_name),
"%u", qp_flow->flow->flow_id);
qp_flow->dbgfs_dentry = debugfs_create_file(qp_flow->dentry_name,
S_IRUGO,
flows_dentry,
qp_flow,
&flowinfo_ops);
if (IS_ERR_OR_NULL(qp_flow->dbgfs_dentry)) {
usnic_err("Failed to create dbg fs entry for flow %u\n",
qp_flow->flow->flow_id);
}
}
void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
{
if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry))
debugfs_remove(qp_flow->dbgfs_dentry);
}
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_DEBUGFS_H_
#define USNIC_DEBUGFS_H_
#include "usnic_ib_qp_grp.h"
void usnic_debugfs_init(void);
void usnic_debugfs_exit(void);
void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow);
void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow);
#endif /*!USNIC_DEBUGFS_H_ */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "enic_api.h"
#include "usnic_common_pkt_hdr.h"
#include "usnic_fwd.h"
#include "usnic_log.h"
static int usnic_fwd_devcmd_locked(struct usnic_fwd_dev *ufdev, int vnic_idx,
enum vnic_devcmd_cmd cmd, u64 *a0,
u64 *a1)
{
int status;
struct net_device *netdev = ufdev->netdev;
lockdep_assert_held(&ufdev->lock);
status = enic_api_devcmd_proxy_by_index(netdev,
vnic_idx,
cmd,
a0, a1,
1000);
if (status) {
if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) {
usnic_dbg("Dev %s vnic idx %u cmd %u already deleted",
ufdev->name, vnic_idx, cmd);
} else {
usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n",
ufdev->name, vnic_idx, cmd,
status);
}
} else {
usnic_dbg("Dev %s vnic idx %u cmd %u success",
ufdev->name, vnic_idx, cmd);
}
return status;
}
static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1)
{
int status;
spin_lock(&ufdev->lock);
status = usnic_fwd_devcmd_locked(ufdev, vnic_idx, cmd, a0, a1);
spin_unlock(&ufdev->lock);
return status;
}
struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
{
struct usnic_fwd_dev *ufdev;
ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL);
if (!ufdev)
return NULL;
ufdev->pdev = pdev;
ufdev->netdev = pci_get_drvdata(pdev);
spin_lock_init(&ufdev->lock);
strncpy(ufdev->name, netdev_name(ufdev->netdev),
sizeof(ufdev->name) - 1);
return ufdev;
}
void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
{
kfree(ufdev);
}
void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
{
spin_lock(&ufdev->lock);
memcpy(&ufdev->mac, mac, sizeof(ufdev->mac));
spin_unlock(&ufdev->lock);
}
int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
{
int status;
spin_lock(&ufdev->lock);
if (ufdev->inaddr == 0) {
ufdev->inaddr = inaddr;
status = 0;
} else {
status = -EFAULT;
}
spin_unlock(&ufdev->lock);
return status;
}
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
{
spin_lock(&ufdev->lock);
ufdev->inaddr = 0;
spin_unlock(&ufdev->lock);
}
void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev)
{
spin_lock(&ufdev->lock);
ufdev->link_up = 1;
spin_unlock(&ufdev->lock);
}
void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev)
{
spin_lock(&ufdev->lock);
ufdev->link_up = 0;
spin_unlock(&ufdev->lock);
}
void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu)
{
spin_lock(&ufdev->lock);
ufdev->mtu = mtu;
spin_unlock(&ufdev->lock);
}
static int usnic_fwd_dev_ready_locked(struct usnic_fwd_dev *ufdev)
{
lockdep_assert_held(&ufdev->lock);
if (!ufdev->link_up)
return -EPERM;
return 0;
}
static int validate_filter_locked(struct usnic_fwd_dev *ufdev,
struct filter *filter)
{
lockdep_assert_held(&ufdev->lock);
if (filter->type == FILTER_IPV4_5TUPLE) {
if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_AD))
return -EACCES;
if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_PT))
return -EBUSY;
else if (ufdev->inaddr == 0)
return -EINVAL;
else if (filter->u.ipv4.dst_port == 0)
return -ERANGE;
else if (ntohl(ufdev->inaddr) != filter->u.ipv4.dst_addr)
return -EFAULT;
else
return 0;
}
return 0;
}
static void fill_tlv(struct filter_tlv *tlv, struct filter *filter,
struct filter_action *action)
{
tlv->type = CLSF_TLV_FILTER;
tlv->length = sizeof(struct filter);
*((struct filter *)&tlv->val) = *filter;
tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) +
sizeof(struct filter));
tlv->type = CLSF_TLV_ACTION;
tlv->length = sizeof(struct filter_action);
*((struct filter_action *)&tlv->val) = *action;
}
struct usnic_fwd_flow*
usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
struct usnic_filter_action *uaction)
{
struct filter_tlv *tlv;
struct pci_dev *pdev;
struct usnic_fwd_flow *flow;
uint64_t a0, a1;
uint64_t tlv_size;
dma_addr_t tlv_pa;
int status;
pdev = ufdev->pdev;
tlv_size = (2*sizeof(struct filter_tlv) + sizeof(struct filter) +
sizeof(struct filter_action));
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
if (!flow)
return ERR_PTR(-ENOMEM);
tlv = pci_alloc_consistent(pdev, tlv_size, &tlv_pa);
if (!tlv) {
usnic_err("Failed to allocate memory\n");
status = -ENOMEM;
goto out_free_flow;
}
fill_tlv(tlv, filter, &uaction->action);
spin_lock(&ufdev->lock);
status = usnic_fwd_dev_ready_locked(ufdev);
if (status) {
usnic_err("Forwarding dev %s not ready with status %d\n",
ufdev->name, status);
goto out_free_tlv;
}
status = validate_filter_locked(ufdev, filter);
if (status) {
usnic_err("Failed to validate filter with status %d\n",
status);
goto out_free_tlv;
}
/* Issue Devcmd */
a0 = tlv_pa;
a1 = tlv_size;
status = usnic_fwd_devcmd_locked(ufdev, uaction->vnic_idx,
CMD_ADD_FILTER, &a0, &a1);
if (status) {
usnic_err("VF %s Filter add failed with status:%d",
ufdev->name, status);
status = -EFAULT;
goto out_free_tlv;
} else {
usnic_dbg("VF %s FILTER ID:%llu", ufdev->name, a0);
}
flow->flow_id = (uint32_t) a0;
flow->vnic_idx = uaction->vnic_idx;
flow->ufdev = ufdev;
out_free_tlv:
spin_unlock(&ufdev->lock);
pci_free_consistent(pdev, tlv_size, tlv, tlv_pa);
if (!status)
return flow;
out_free_flow:
kfree(flow);
return ERR_PTR(status);
}
int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow)
{
int status;
u64 a0, a1;
a0 = flow->flow_id;
status = usnic_fwd_devcmd(flow->ufdev, flow->vnic_idx,
CMD_DEL_FILTER, &a0, &a1);
if (status) {
if (status == ERR_EINVAL) {
usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d",
flow->flow_id, flow->vnic_idx,
flow->ufdev->name, status);
} else {
usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d",
flow->ufdev->name, flow->vnic_idx,
flow->flow_id, status);
}
status = 0;
/*
* Log the error and fake success to the caller because if
* a flow fails to be deleted in the firmware, it is an
* unrecoverable error.
*/
} else {
usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED",
flow->ufdev->name, flow->vnic_idx,
flow->flow_id);
}
kfree(flow);
return status;
}
int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
{
int status;
struct net_device *pf_netdev;
u64 a0, a1;
pf_netdev = ufdev->netdev;
a0 = qp_idx;
a1 = CMD_QP_RQWQ;
status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE,
&a0, &a1);
if (status) {
usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d",
netdev_name(pf_netdev),
vnic_idx,
qp_idx,
status);
} else {
usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED",
netdev_name(pf_netdev),
vnic_idx, qp_idx);
}
return status;
}
int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
{
int status;
u64 a0, a1;
struct net_device *pf_netdev;
pf_netdev = ufdev->netdev;
a0 = qp_idx;
a1 = CMD_QP_RQWQ;
status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE,
&a0, &a1);
if (status) {
usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d",
netdev_name(pf_netdev),
vnic_idx,
qp_idx,
status);
} else {
usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED",
netdev_name(pf_netdev),
vnic_idx,
qp_idx);
}
return status;
}
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_FWD_H_
#define USNIC_FWD_H_
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/in.h>
#include "usnic_abi.h"
#include "usnic_common_pkt_hdr.h"
#include "vnic_devcmd.h"
struct usnic_fwd_dev {
struct pci_dev *pdev;
struct net_device *netdev;
spinlock_t lock;
/*
* The following fields can be read directly off the device.
* However, they should be set by a accessor function, except name,
* which cannot be changed.
*/
bool link_up;
char mac[ETH_ALEN];
unsigned int mtu;
__be32 inaddr;
char name[IFNAMSIZ+1];
};
struct usnic_fwd_flow {
uint32_t flow_id;
struct usnic_fwd_dev *ufdev;
unsigned int vnic_idx;
};
struct usnic_filter_action {
int vnic_idx;
struct filter_action action;
};
struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);
void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu);
/*
* Allocate a flow on this forwarding device. Whoever calls this function,
* must monitor netdev events on ufdev's netdevice. If NETDEV_REBOOT or
* NETDEV_DOWN is seen, flow will no longer function and must be
* immediately freed by calling usnic_dealloc_flow.
*/
struct usnic_fwd_flow*
usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
struct usnic_filter_action *action);
int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow);
int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx);
int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx);
static inline void usnic_fwd_init_usnic_filter(struct filter *filter,
uint32_t usnic_id)
{
filter->type = FILTER_USNIC_ID;
filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE;
filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE |
FILTER_FIELD_USNIC_ID |
FILTER_FIELD_USNIC_PROTO;
filter->u.usnic.proto_version = (USNIC_ROCE_GRH_VER <<
USNIC_ROCE_GRH_VER_SHIFT) |
USNIC_PROTO_VER;
filter->u.usnic.usnic_id = usnic_id;
}
static inline void usnic_fwd_init_udp_filter(struct filter *filter,
uint32_t daddr, uint16_t dport)
{
filter->type = FILTER_IPV4_5TUPLE;
filter->u.ipv4.flags = FILTER_FIELD_5TUP_PROTO;
filter->u.ipv4.protocol = PROTO_UDP;
if (daddr) {
filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_AD;
filter->u.ipv4.dst_addr = daddr;
}
if (dport) {
filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_PT;
filter->u.ipv4.dst_port = dport;
}
}
#endif /* !USNIC_FWD_H_ */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_IB_H_
#define USNIC_IB_H_
#include <linux/iommu.h>
#include <linux/netdevice.h>
#include <rdma/ib_verbs.h>
#include "usnic.h"
#include "usnic_abi.h"
#include "usnic_vnic.h"
#define USNIC_IB_PORT_CNT 1
#define USNIC_IB_NUM_COMP_VECTORS 1
extern unsigned int usnic_ib_share_vf;
struct usnic_ib_ucontext {
struct ib_ucontext ibucontext;
/* Protected by usnic_ib_dev->usdev_lock */
struct list_head qp_grp_list;
struct list_head link;
};
struct usnic_ib_pd {
struct ib_pd ibpd;
struct usnic_uiom_pd *umem_pd;
};
struct usnic_ib_mr {
struct ib_mr ibmr;
struct usnic_uiom_reg *umem;
};
struct usnic_ib_dev {
struct ib_device ib_dev;
struct pci_dev *pdev;
struct net_device *netdev;
struct usnic_fwd_dev *ufdev;
struct list_head ib_dev_link;
struct list_head vf_dev_list;
struct list_head ctx_list;
struct mutex usdev_lock;
/* provisioning information */
struct kref vf_cnt;
unsigned int vf_res_cnt[USNIC_VNIC_RES_TYPE_MAX];
/* sysfs vars for QPN reporting */
struct kobject *qpn_kobj;
};
struct usnic_ib_vf {
struct usnic_ib_dev *pf;
spinlock_t lock;
struct usnic_vnic *vnic;
unsigned int qp_grp_ref_cnt;
struct usnic_ib_pd *pd;
struct list_head link;
};
static inline
struct usnic_ib_dev *to_usdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct usnic_ib_dev, ib_dev);
}
static inline
struct usnic_ib_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
}
static inline
struct usnic_ib_pd *to_upd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct usnic_ib_pd, ibpd);
}
static inline
struct usnic_ib_ucontext *to_uucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
}
static inline
struct usnic_ib_mr *to_umr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct usnic_ib_mr, ibmr);
}
void usnic_ib_log_vf(struct usnic_ib_vf *vf);
#define UPDATE_PTR_LEFT(N, P, L) \
do { \
L -= (N); \
P += (N); \
} while (0)
#endif /* USNIC_IB_H_ */
此差异已折叠。
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "usnic_log.h"
#include "usnic_vnic.h"
#include "usnic_fwd.h"
#include "usnic_uiom.h"
#include "usnic_debugfs.h"
#include "usnic_ib_qp_grp.h"
#include "usnic_ib_sysfs.h"
#include "usnic_transport.h"
#define DFLT_RQ_IDX 0
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
{
switch (state) {
case IB_QPS_RESET:
return "Rst";
case IB_QPS_INIT:
return "Init";
case IB_QPS_RTR:
return "RTR";
case IB_QPS_RTS:
return "RTS";
case IB_QPS_SQD:
return "SQD";
case IB_QPS_SQE:
return "SQE";
case IB_QPS_ERR:
return "ERR";
default:
return "UNKOWN STATE";
}
}
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
{
return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
}
int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
{
struct usnic_ib_qp_grp *qp_grp = obj;
struct usnic_ib_qp_grp_flow *default_flow;
if (obj) {
default_flow = list_first_entry(&qp_grp->flows_lst,
struct usnic_ib_qp_grp_flow, link);
return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(
qp_grp->state),
qp_grp->owner_pid,
usnic_vnic_get_index(qp_grp->vf->vnic),
default_flow->flow->flow_id);
} else {
return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
}
}
static struct usnic_vnic_res_chunk *
get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
{
lockdep_assert_held(&qp_grp->lock);
/*
* The QP res chunk, used to derive qp indices,
* are just indices of the RQs
*/
return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
}
static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
{
int status;
int i, vnic_idx;
struct usnic_vnic_res_chunk *res_chunk;
struct usnic_vnic_res *res;
lockdep_assert_held(&qp_grp->lock);
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
if (IS_ERR_OR_NULL(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
}
for (i = 0; i < res_chunk->cnt; i++) {
res = res_chunk->res[i];
status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
res->vnic_idx);
if (status) {
usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
res->vnic_idx, qp_grp->ufdev->name,
vnic_idx, status);
goto out_err;
}
}
return 0;
out_err:
for (i--; i >= 0; i--) {
res = res_chunk->res[i];
usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
res->vnic_idx);
}
return status;
}
static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
{
int i, vnic_idx;
struct usnic_vnic_res_chunk *res_chunk;
struct usnic_vnic_res *res;
int status = 0;
lockdep_assert_held(&qp_grp->lock);
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
if (IS_ERR_OR_NULL(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
}
for (i = 0; i < res_chunk->cnt; i++) {
res = res_chunk->res[i];
status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
res->vnic_idx);
if (status) {
usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
res->vnic_idx,
qp_grp->ufdev->name,
vnic_idx, status);
}
}
return status;
}
static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
struct usnic_filter_action *uaction)
{
struct usnic_vnic_res_chunk *res_chunk;
res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
if (IS_ERR_OR_NULL(res_chunk)) {
usnic_err("Unable to get %s with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
PTR_ERR(res_chunk));
return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
}
uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
uaction->action.type = FILTER_ACTION_RQ_STEERING;
uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
return 0;
}
static struct usnic_ib_qp_grp_flow*
create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
struct usnic_transport_spec *trans_spec)
{
uint16_t port_num;
int err;
struct filter filter;
struct usnic_filter_action uaction;
struct usnic_ib_qp_grp_flow *qp_flow;
struct usnic_fwd_flow *flow;
enum usnic_transport_type trans_type;
trans_type = trans_spec->trans_type;
port_num = trans_spec->usnic_roce.port_num;
/* Reserve Port */
port_num = usnic_transport_rsrv_port(trans_type, port_num);
if (port_num == 0)
return ERR_PTR(-EINVAL);
/* Create Flow */
usnic_fwd_init_usnic_filter(&filter, port_num);
err = init_filter_action(qp_grp, &uaction);
if (err)
goto out_unreserve_port;
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
usnic_err("Unable to alloc flow failed with err %ld\n",
PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_unreserve_port;
}
/* Create Flow Handle */
qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
if (IS_ERR_OR_NULL(qp_flow)) {
err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
goto out_dealloc_flow;
}
qp_flow->flow = flow;
qp_flow->trans_type = trans_type;
qp_flow->usnic_roce.port_num = port_num;
qp_flow->qp_grp = qp_grp;
return qp_flow;
out_dealloc_flow:
usnic_fwd_dealloc_flow(flow);
out_unreserve_port:
usnic_transport_unrsrv_port(trans_type, port_num);
return ERR_PTR(err);
}
static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
{
usnic_fwd_dealloc_flow(qp_flow->flow);
usnic_transport_unrsrv_port(qp_flow->trans_type,
qp_flow->usnic_roce.port_num);
kfree(qp_flow);
}
static struct usnic_ib_qp_grp_flow*
create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
struct usnic_transport_spec *trans_spec)
{
struct socket *sock;
int sock_fd;
int err;
struct filter filter;
struct usnic_filter_action uaction;
struct usnic_ib_qp_grp_flow *qp_flow;
struct usnic_fwd_flow *flow;
enum usnic_transport_type trans_type;
uint32_t addr;
uint16_t port_num;
int proto;
trans_type = trans_spec->trans_type;
sock_fd = trans_spec->udp.sock_fd;
/* Get and check socket */
sock = usnic_transport_get_socket(sock_fd);
if (IS_ERR_OR_NULL(sock))
return ERR_CAST(sock);
err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
if (err)
goto out_put_sock;
if (proto != IPPROTO_UDP) {
usnic_err("Protocol for fd %d is not UDP", sock_fd);
err = -EPERM;
goto out_put_sock;
}
/* Create flow */
usnic_fwd_init_udp_filter(&filter, addr, port_num);
err = init_filter_action(qp_grp, &uaction);
if (err)
goto out_put_sock;
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
usnic_err("Unable to alloc flow failed with err %ld\n",
PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_put_sock;
}
/* Create qp_flow */
qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
if (IS_ERR_OR_NULL(qp_flow)) {
err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
goto out_dealloc_flow;
}
qp_flow->flow = flow;
qp_flow->trans_type = trans_type;
qp_flow->udp.sock = sock;
qp_flow->qp_grp = qp_grp;
return qp_flow;
out_dealloc_flow:
usnic_fwd_dealloc_flow(flow);
out_put_sock:
usnic_transport_put_socket(sock);
return ERR_PTR(err);
}
static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
{
usnic_fwd_dealloc_flow(qp_flow->flow);
usnic_transport_put_socket(qp_flow->udp.sock);
kfree(qp_flow);
}
static struct usnic_ib_qp_grp_flow*
create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
struct usnic_transport_spec *trans_spec)
{
struct usnic_ib_qp_grp_flow *qp_flow;
enum usnic_transport_type trans_type;
trans_type = trans_spec->trans_type;
switch (trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM:
qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
break;
case USNIC_TRANSPORT_IPV4_UDP:
qp_flow = create_udp_flow(qp_grp, trans_spec);
break;
default:
usnic_err("Unsupported transport %u\n",
trans_spec->trans_type);
return ERR_PTR(-EINVAL);
}
if (!IS_ERR_OR_NULL(qp_flow)) {
list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
usnic_debugfs_flow_add(qp_flow);
}
return qp_flow;
}
static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
{
usnic_debugfs_flow_remove(qp_flow);
list_del(&qp_flow->link);
switch (qp_flow->trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM:
release_roce_custom_flow(qp_flow);
break;
case USNIC_TRANSPORT_IPV4_UDP:
release_udp_flow(qp_flow);
break;
default:
WARN(1, "Unsupported transport %u\n",
qp_flow->trans_type);
break;
}
}
static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
{
struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
release_and_remove_flow(qp_flow);
}
int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
enum ib_qp_state new_state,
void *data)
{
int status = 0;
int vnic_idx;
struct ib_event ib_event;
enum ib_qp_state old_state;
struct usnic_transport_spec *trans_spec;
struct usnic_ib_qp_grp_flow *qp_flow;
old_state = qp_grp->state;
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
trans_spec = (struct usnic_transport_spec *) data;
spin_lock(&qp_grp->lock);
switch (new_state) {
case IB_QPS_RESET:
switch (old_state) {
case IB_QPS_RESET:
/* NO-OP */
break;
case IB_QPS_INIT:
release_and_remove_all_flows(qp_grp);
status = 0;
break;
case IB_QPS_RTR:
case IB_QPS_RTS:
case IB_QPS_ERR:
status = disable_qp_grp(qp_grp);
release_and_remove_all_flows(qp_grp);
break;
default:
status = -EINVAL;
}
break;
case IB_QPS_INIT:
switch (old_state) {
case IB_QPS_RESET:
if (trans_spec) {
qp_flow = create_and_add_flow(qp_grp,
trans_spec);
if (IS_ERR_OR_NULL(qp_flow)) {
status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
break;
}
} else {
/*
* Optional to specify filters.
*/
status = 0;
}
break;
case IB_QPS_INIT:
if (trans_spec) {
qp_flow = create_and_add_flow(qp_grp,
trans_spec);
if (IS_ERR_OR_NULL(qp_flow)) {
status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
break;
}
} else {
/*
* Doesn't make sense to go into INIT state
* from INIT state w/o adding filters.
*/
status = -EINVAL;
}
break;
case IB_QPS_RTR:
status = disable_qp_grp(qp_grp);
break;
case IB_QPS_RTS:
status = disable_qp_grp(qp_grp);
break;
default:
status = -EINVAL;
}
break;
case IB_QPS_RTR:
switch (old_state) {
case IB_QPS_INIT:
status = enable_qp_grp(qp_grp);
break;
default:
status = -EINVAL;
}
break;
case IB_QPS_RTS:
switch (old_state) {
case IB_QPS_RTR:
/* NO-OP FOR NOW */
break;
default:
status = -EINVAL;
}
break;
case IB_QPS_ERR:
ib_event.device = &qp_grp->vf->pf->ib_dev;
ib_event.element.qp = &qp_grp->ibqp;
ib_event.event = IB_EVENT_QP_FATAL;
switch (old_state) {
case IB_QPS_RESET:
qp_grp->ibqp.event_handler(&ib_event,
qp_grp->ibqp.qp_context);
break;
case IB_QPS_INIT:
release_and_remove_all_flows(qp_grp);
qp_grp->ibqp.event_handler(&ib_event,
qp_grp->ibqp.qp_context);
break;
case IB_QPS_RTR:
case IB_QPS_RTS:
status = disable_qp_grp(qp_grp);
release_and_remove_all_flows(qp_grp);
qp_grp->ibqp.event_handler(&ib_event,
qp_grp->ibqp.qp_context);
break;
default:
status = -EINVAL;
}
break;
default:
status = -EINVAL;
}
spin_unlock(&qp_grp->lock);
if (!status) {
qp_grp->state = new_state;
usnic_info("Transistioned %u from %s to %s",
qp_grp->grp_id,
usnic_ib_qp_grp_state_to_string(old_state),
usnic_ib_qp_grp_state_to_string(new_state));
} else {
usnic_err("Failed to transistion %u from %s to %s",
qp_grp->grp_id,
usnic_ib_qp_grp_state_to_string(old_state),
usnic_ib_qp_grp_state_to_string(new_state));
}
return status;
}
static struct usnic_vnic_res_chunk**
alloc_res_chunk_list(struct usnic_vnic *vnic,
struct usnic_vnic_res_spec *res_spec, void *owner_obj)
{
enum usnic_vnic_res_type res_type;
struct usnic_vnic_res_chunk **res_chunk_list;
int err, i, res_cnt, res_lst_sz;
for (res_lst_sz = 0;
res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
res_lst_sz++) {
/* Do Nothing */
}
res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
GFP_ATOMIC);
if (!res_chunk_list)
return ERR_PTR(-ENOMEM);
for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
i++) {
res_type = res_spec->resources[i].type;
res_cnt = res_spec->resources[i].cnt;
res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
res_cnt, owner_obj);
if (IS_ERR_OR_NULL(res_chunk_list[i])) {
err = res_chunk_list[i] ?
PTR_ERR(res_chunk_list[i]) : -ENOMEM;
usnic_err("Failed to get %s from %s with err %d\n",
usnic_vnic_res_type_to_str(res_type),
usnic_vnic_pci_name(vnic),
err);
goto out_free_res;
}
}
return res_chunk_list;
out_free_res:
for (i--; i > 0; i--)
usnic_vnic_put_resources(res_chunk_list[i]);
kfree(res_chunk_list);
return ERR_PTR(err);
}
static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
{
int i;
for (i = 0; res_chunk_list[i]; i++)
usnic_vnic_put_resources(res_chunk_list[i]);
kfree(res_chunk_list);
}
static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
struct usnic_ib_pd *pd,
struct usnic_ib_qp_grp *qp_grp)
{
int err;
struct pci_dev *pdev;
lockdep_assert_held(&vf->lock);
pdev = usnic_vnic_get_pdev(vf->vnic);
if (vf->qp_grp_ref_cnt == 0) {
err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
if (err) {
usnic_err("Failed to attach %s to domain\n",
pci_name(pdev));
return err;
}
vf->pd = pd;
}
vf->qp_grp_ref_cnt++;
WARN_ON(vf->pd != pd);
qp_grp->vf = vf;
return 0;
}
static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
{
struct pci_dev *pdev;
struct usnic_ib_pd *pd;
lockdep_assert_held(&qp_grp->vf->lock);
pd = qp_grp->vf->pd;
pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
qp_grp->vf->pd = NULL;
usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
}
qp_grp->vf = NULL;
}
static void log_spec(struct usnic_vnic_res_spec *res_spec)
{
char buf[512];
usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
usnic_dbg("%s\n", buf);
}
static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
uint32_t *id)
{
enum usnic_transport_type trans_type = qp_flow->trans_type;
int err;
switch (trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM:
*id = qp_flow->usnic_roce.port_num;
break;
case USNIC_TRANSPORT_IPV4_UDP:
err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
NULL, NULL,
(uint16_t *) id);
if (err)
return err;
break;
default:
usnic_err("Unsupported transport %u\n", trans_type);
return -EINVAL;
}
return 0;
}
struct usnic_ib_qp_grp *
usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
struct usnic_ib_pd *pd,
struct usnic_vnic_res_spec *res_spec,
struct usnic_transport_spec *transport_spec)
{
struct usnic_ib_qp_grp *qp_grp;
int err;
enum usnic_transport_type transport = transport_spec->trans_type;
struct usnic_ib_qp_grp_flow *qp_flow;
lockdep_assert_held(&vf->lock);
err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
res_spec);
if (err) {
usnic_err("Spec does not meet miniumum req for transport %d\n",
transport);
log_spec(res_spec);
return ERR_PTR(err);
}
qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
if (!qp_grp) {
usnic_err("Unable to alloc qp_grp - Out of memory\n");
return NULL;
}
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
qp_grp);
if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
err = qp_grp->res_chunk_list ?
PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
usnic_err("Unable to alloc res for %d with err %d\n",
qp_grp->grp_id, err);
goto out_free_qp_grp;
}
err = qp_grp_and_vf_bind(vf, pd, qp_grp);
if (err)
goto out_free_res;
INIT_LIST_HEAD(&qp_grp->flows_lst);
spin_lock_init(&qp_grp->lock);
qp_grp->ufdev = ufdev;
qp_grp->state = IB_QPS_RESET;
qp_grp->owner_pid = current->pid;
qp_flow = create_and_add_flow(qp_grp, transport_spec);
if (IS_ERR_OR_NULL(qp_flow)) {
usnic_err("Unable to create and add flow with err %ld\n",
PTR_ERR(qp_flow));
err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
goto out_qp_grp_vf_unbind;
}
err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
if (err)
goto out_release_flow;
qp_grp->ibqp.qp_num = qp_grp->grp_id;
usnic_ib_sysfs_qpn_add(qp_grp);
return qp_grp;
out_release_flow:
release_and_remove_flow(qp_flow);
out_qp_grp_vf_unbind:
qp_grp_and_vf_unbind(qp_grp);
out_free_res:
free_qp_grp_res(qp_grp->res_chunk_list);
out_free_qp_grp:
kfree(qp_grp);
return ERR_PTR(err);
}
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
{
WARN_ON(qp_grp->state != IB_QPS_RESET);
lockdep_assert_held(&qp_grp->vf->lock);
release_and_remove_all_flows(qp_grp);
usnic_ib_sysfs_qpn_remove(qp_grp);
qp_grp_and_vf_unbind(qp_grp);
free_qp_grp_res(qp_grp->res_chunk_list);
kfree(qp_grp);
}
struct usnic_vnic_res_chunk*
usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
enum usnic_vnic_res_type res_type)
{
int i;
for (i = 0; qp_grp->res_chunk_list[i]; i++) {
if (qp_grp->res_chunk_list[i]->type == res_type)
return qp_grp->res_chunk_list[i];
}
return ERR_PTR(-EINVAL);
}
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_IB_QP_GRP_H_
#define USNIC_IB_QP_GRP_H_
#include <linux/debugfs.h>
#include <rdma/ib_verbs.h>
#include "usnic_ib.h"
#include "usnic_abi.h"
#include "usnic_fwd.h"
#include "usnic_vnic.h"
/*
* The qp group struct represents all the hw resources needed to present a ib_qp
*/
struct usnic_ib_qp_grp {
struct ib_qp ibqp;
enum ib_qp_state state;
int grp_id;
struct usnic_fwd_dev *ufdev;
struct usnic_ib_ucontext *ctx;
struct list_head flows_lst;
struct usnic_vnic_res_chunk **res_chunk_list;
pid_t owner_pid;
struct usnic_ib_vf *vf;
struct list_head link;
spinlock_t lock;
struct kobject kobj;
};
struct usnic_ib_qp_grp_flow {
struct usnic_fwd_flow *flow;
enum usnic_transport_type trans_type;
union {
struct {
uint16_t port_num;
} usnic_roce;
struct {
struct socket *sock;
} udp;
};
struct usnic_ib_qp_grp *qp_grp;
struct list_head link;
/* Debug FS */
struct dentry *dbgfs_dentry;
char dentry_name[32];
};
static const struct
usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
{ /*USNIC_TRANSPORT_UNKNOWN*/
.resources = {
{.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
},
},
{ /*USNIC_TRANSPORT_ROCE_CUSTOM*/
.resources = {
{.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
{.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
{.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
{.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
},
},
{ /*USNIC_TRANSPORT_IPV4_UDP*/
.resources = {
{.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
{.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
{.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
{.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
},
},
};
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
struct usnic_ib_qp_grp *
usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
struct usnic_ib_pd *pd,
struct usnic_vnic_res_spec *res_spec,
struct usnic_transport_spec *trans_spec);
void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp);
int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
enum ib_qp_state new_state,
void *data);
struct usnic_vnic_res_chunk
*usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
enum usnic_vnic_res_type type);
static inline
struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct usnic_ib_qp_grp, ibqp);
}
#endif /* USNIC_IB_QP_GRP_H_ */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include "usnic_common_util.h"
#include "usnic_ib.h"
#include "usnic_ib_qp_grp.h"
#include "usnic_vnic.h"
#include "usnic_ib_verbs.h"
#include "usnic_log.h"
static ssize_t usnic_ib_show_fw_ver(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct usnic_ib_dev *us_ibdev =
container_of(device, struct usnic_ib_dev, ib_dev.dev);
struct ethtool_drvinfo info;
mutex_lock(&us_ibdev->usdev_lock);
us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
mutex_unlock(&us_ibdev->usdev_lock);
return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version);
}
static ssize_t usnic_ib_show_board(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct usnic_ib_dev *us_ibdev =
container_of(device, struct usnic_ib_dev, ib_dev.dev);
unsigned short subsystem_device_id;
mutex_lock(&us_ibdev->usdev_lock);
subsystem_device_id = us_ibdev->pdev->subsystem_device;
mutex_unlock(&us_ibdev->usdev_lock);
return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
}
/*
* Report the configuration for this PF
*/
static ssize_t
usnic_ib_show_config(struct device *device, struct device_attribute *attr,
char *buf)
{
struct usnic_ib_dev *us_ibdev;
char *ptr;
unsigned left;
unsigned n;
enum usnic_vnic_res_type res_type;
us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
/* Buffer space limit is 1 page */
ptr = buf;
left = PAGE_SIZE;
mutex_lock(&us_ibdev->usdev_lock);
if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
char *busname;
/*
* bus name seems to come with annoying prefix.
* Remove it if it is predictable
*/
busname = us_ibdev->pdev->bus->name;
if (strncmp(busname, "PCI Bus ", 8) == 0)
busname += 8;
n = scnprintf(ptr, left,
"%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
us_ibdev->ib_dev.name,
busname,
PCI_SLOT(us_ibdev->pdev->devfn),
PCI_FUNC(us_ibdev->pdev->devfn),
netdev_name(us_ibdev->netdev),
us_ibdev->ufdev->mac,
atomic_read(&us_ibdev->vf_cnt.refcount));
UPDATE_PTR_LEFT(n, ptr, left);
for (res_type = USNIC_VNIC_RES_TYPE_EOL;
res_type < USNIC_VNIC_RES_TYPE_MAX;
res_type++) {
if (us_ibdev->vf_res_cnt[res_type] == 0)
continue;
n = scnprintf(ptr, left, " %d %s%s",
us_ibdev->vf_res_cnt[res_type],
usnic_vnic_res_type_to_str(res_type),
(res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ?
"," : "");
UPDATE_PTR_LEFT(n, ptr, left);
}
n = scnprintf(ptr, left, "\n");
UPDATE_PTR_LEFT(n, ptr, left);
} else {
n = scnprintf(ptr, left, "%s: no VFs\n",
us_ibdev->ib_dev.name);
UPDATE_PTR_LEFT(n, ptr, left);
}
mutex_unlock(&us_ibdev->usdev_lock);
return ptr - buf;
}
static ssize_t
usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
char *buf)
{
struct usnic_ib_dev *us_ibdev;
us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
return scnprintf(buf, PAGE_SIZE, "%s\n",
netdev_name(us_ibdev->netdev));
}
static ssize_t
usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
char *buf)
{
struct usnic_ib_dev *us_ibdev;
us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
return scnprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&us_ibdev->vf_cnt.refcount));
}
static ssize_t
usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
char *buf)
{
struct usnic_ib_dev *us_ibdev;
int qp_per_vf;
us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
return scnprintf(buf, PAGE_SIZE,
"%d\n", qp_per_vf);
}
static ssize_t
usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
char *buf)
{
struct usnic_ib_dev *us_ibdev;
us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
static struct device_attribute *usnic_class_attributes[] = {
&dev_attr_fw_ver,
&dev_attr_board_id,
&dev_attr_config,
&dev_attr_iface,
&dev_attr_max_vf,
&dev_attr_qp_per_vf,
&dev_attr_cq_per_vf,
};
struct qpn_attribute {
struct attribute attr;
ssize_t (*show)(struct usnic_ib_qp_grp *, char *buf);
};
/*
* Definitions for supporting QPN entries in sysfs
*/
static ssize_t
usnic_ib_qpn_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct usnic_ib_qp_grp *qp_grp;
struct qpn_attribute *qpn_attr;
qp_grp = container_of(kobj, struct usnic_ib_qp_grp, kobj);
qpn_attr = container_of(attr, struct qpn_attribute, attr);
return qpn_attr->show(qp_grp, buf);
}
static const struct sysfs_ops usnic_ib_qpn_sysfs_ops = {
.show = usnic_ib_qpn_attr_show
};
#define QPN_ATTR_RO(NAME) \
struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME)
static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "0x%p\n", qp_grp->ctx);
}
static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
{
int i, j, n;
int left;
char *ptr;
struct usnic_vnic_res_chunk *res_chunk;
struct usnic_vnic_res *vnic_res;
left = PAGE_SIZE;
ptr = buf;
n = scnprintf(ptr, left,
"QPN: %d State: (%s) PID: %u VF Idx: %hu ",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(qp_grp->state),
qp_grp->owner_pid,
usnic_vnic_get_index(qp_grp->vf->vnic));
UPDATE_PTR_LEFT(n, ptr, left);
for (i = 0; qp_grp->res_chunk_list[i]; i++) {
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
n = scnprintf(ptr, left, "%s[%d] ",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
UPDATE_PTR_LEFT(n, ptr, left);
}
}
n = scnprintf(ptr, left, "\n");
UPDATE_PTR_LEFT(n, ptr, left);
return ptr - buf;
}
static QPN_ATTR_RO(context);
static QPN_ATTR_RO(summary);
static struct attribute *usnic_ib_qpn_default_attrs[] = {
&qpn_attr_context.attr,
&qpn_attr_summary.attr,
NULL
};
static struct kobj_type usnic_ib_qpn_type = {
.sysfs_ops = &usnic_ib_qpn_sysfs_ops,
.default_attrs = usnic_ib_qpn_default_attrs
};
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
{
int i;
int err;
for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
err = device_create_file(&us_ibdev->ib_dev.dev,
usnic_class_attributes[i]);
if (err) {
usnic_err("Failed to create device file %d for %s eith err %d",
i, us_ibdev->ib_dev.name, err);
return -EINVAL;
}
}
/* create kernel object for looking at individual QPs */
kobject_get(&us_ibdev->ib_dev.dev.kobj);
us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
&us_ibdev->ib_dev.dev.kobj);
if (us_ibdev->qpn_kobj == NULL) {
kobject_put(&us_ibdev->ib_dev.dev.kobj);
return -ENOMEM;
}
return 0;
}
void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
device_remove_file(&us_ibdev->ib_dev.dev,
usnic_class_attributes[i]);
}
kobject_put(us_ibdev->qpn_kobj);
}
void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp)
{
struct usnic_ib_dev *us_ibdev;
int err;
us_ibdev = qp_grp->vf->pf;
err = kobject_init_and_add(&qp_grp->kobj, &usnic_ib_qpn_type,
kobject_get(us_ibdev->qpn_kobj),
"%d", qp_grp->grp_id);
if (err) {
kobject_put(us_ibdev->qpn_kobj);
return;
}
}
void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp)
{
struct usnic_ib_dev *us_ibdev;
us_ibdev = qp_grp->vf->pf;
kobject_put(&qp_grp->kobj);
kobject_put(us_ibdev->qpn_kobj);
}
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_IB_SYSFS_H_
#define USNIC_IB_SYSFS_H_
#include "usnic_ib.h"
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev);
void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
#endif /* !USNIC_IB_SYSFS_H_ */
此差异已折叠。
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_IB_VERBS_H_
#define USNIC_IB_VERBS_H_
#include "usnic_ib.h"
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
u8 port_num);
int usnic_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props);
int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey);
struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata);
int usnic_ib_dealloc_pd(struct ib_pd *pd);
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int usnic_ib_destroy_qp(struct ib_qp *qp);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
int vector, struct ib_ucontext *context,
struct ib_udata *udata);
int usnic_ib_destroy_cq(struct ib_cq *cq);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int usnic_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata);
int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma);
struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
struct ib_ah_attr *ah_attr);
int usnic_ib_destroy_ah(struct ib_ah *ah);
int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
struct ib_wc *wc);
int usnic_ib_req_notify_cq(struct ib_cq *cq,
enum ib_cq_notify_flags flags);
struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc);
#endif /* !USNIC_IB_VERBS_H */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_LOG_H_
#define USNIC_LOG_H_
#include "usnic.h"
extern unsigned int usnic_log_lvl;
#define USNIC_LOG_LVL_NONE (0)
#define USNIC_LOG_LVL_ERR (1)
#define USNIC_LOG_LVL_INFO (2)
#define USNIC_LOG_LVL_DBG (3)
#define usnic_printk(lvl, args...) \
do { \
printk(lvl "%s:%s:%d: ", DRV_NAME, __func__, \
__LINE__); \
printk(args); \
} while (0)
#define usnic_dbg(args...) \
do { \
if (unlikely(usnic_log_lvl >= USNIC_LOG_LVL_DBG)) { \
usnic_printk(KERN_INFO, args); \
} \
} while (0)
#define usnic_info(args...) \
do { \
if (usnic_log_lvl >= USNIC_LOG_LVL_INFO) { \
usnic_printk(KERN_INFO, args); \
} \
} while (0)
#define usnic_err(args...) \
do { \
if (usnic_log_lvl >= USNIC_LOG_LVL_ERR) { \
usnic_printk(KERN_ERR, args); \
} \
} while (0)
#endif /* !USNIC_LOG_H_ */
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/bitmap.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <net/inet_sock.h>
#include "usnic_transport.h"
#include "usnic_log.h"
/* ROCE */
static unsigned long *roce_bitmap;
static u16 roce_next_port = 1;
#define ROCE_BITMAP_SZ ((1 << (8 /*CHAR_BIT*/ * sizeof(u16)))/8 /*CHAR BIT*/)
static DEFINE_SPINLOCK(roce_bitmap_lock);
const char *usnic_transport_to_str(enum usnic_transport_type type)
{
switch (type) {
case USNIC_TRANSPORT_UNKNOWN:
return "Unknown";
case USNIC_TRANSPORT_ROCE_CUSTOM:
return "roce custom";
case USNIC_TRANSPORT_IPV4_UDP:
return "IPv4 UDP";
case USNIC_TRANSPORT_MAX:
return "Max?";
default:
return "Not known";
}
}
int usnic_transport_sock_to_str(char *buf, int buf_sz,
struct socket *sock)
{
int err;
uint32_t addr;
uint16_t port;
int proto;
memset(buf, 0, buf_sz);
err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port);
if (err)
return 0;
return scnprintf(buf, buf_sz, "Proto:%u Addr:%pI4h Port:%hu",
proto, &addr, port);
}
/*
* reserve a port number. if "0" specified, we will try to pick one
* starting at roce_next_port. roce_next_port will take on the values
* 1..4096
*/
u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num)
{
if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
spin_lock(&roce_bitmap_lock);
if (!port_num) {
port_num = bitmap_find_next_zero_area(roce_bitmap,
ROCE_BITMAP_SZ,
roce_next_port /* start */,
1 /* nr */,
0 /* align */);
roce_next_port = (port_num & 4095) + 1;
} else if (test_bit(port_num, roce_bitmap)) {
usnic_err("Failed to allocate port for %s\n",
usnic_transport_to_str(type));
spin_unlock(&roce_bitmap_lock);
goto out_fail;
}
bitmap_set(roce_bitmap, port_num, 1);
spin_unlock(&roce_bitmap_lock);
} else {
usnic_err("Failed to allocate port - transport %s unsupported\n",
usnic_transport_to_str(type));
goto out_fail;
}
usnic_dbg("Allocating port %hu for %s\n", port_num,
usnic_transport_to_str(type));
return port_num;
out_fail:
return 0;
}
void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
{
if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
spin_lock(&roce_bitmap_lock);
if (!port_num) {
usnic_err("Unreserved unvalid port num 0 for %s\n",
usnic_transport_to_str(type));
goto out_roce_custom;
}
if (!test_bit(port_num, roce_bitmap)) {
usnic_err("Unreserving invalid %hu for %s\n",
port_num,
usnic_transport_to_str(type));
goto out_roce_custom;
}
bitmap_clear(roce_bitmap, port_num, 1);
usnic_dbg("Freeing port %hu for %s\n", port_num,
usnic_transport_to_str(type));
out_roce_custom:
spin_unlock(&roce_bitmap_lock);
} else {
usnic_err("Freeing invalid port %hu for %d\n", port_num, type);
}
}
struct socket *usnic_transport_get_socket(int sock_fd)
{
struct socket *sock;
int err;
char buf[25];
/* sockfd_lookup will internally do a fget */
sock = sockfd_lookup(sock_fd, &err);
if (!sock) {
usnic_err("Unable to lookup socket for fd %d with err %d\n",
sock_fd, err);
return ERR_PTR(-ENOENT);
}
usnic_transport_sock_to_str(buf, sizeof(buf), sock);
usnic_dbg("Get sock %s\n", buf);
return sock;
}
void usnic_transport_put_socket(struct socket *sock)
{
char buf[100];
usnic_transport_sock_to_str(buf, sizeof(buf), sock);
usnic_dbg("Put sock %s\n", buf);
sockfd_put(sock);
}
int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
uint32_t *addr, uint16_t *port)
{
int len;
int err;
struct sockaddr_in sock_addr;
err = sock->ops->getname(sock,
(struct sockaddr *)&sock_addr,
&len, 0);
if (err)
return err;
if (sock_addr.sin_family != AF_INET)
return -EINVAL;
if (proto)
*proto = sock->sk->sk_protocol;
if (port)
*port = ntohs(((struct sockaddr_in *)&sock_addr)->sin_port);
if (addr)
*addr = ntohl(((struct sockaddr_in *)
&sock_addr)->sin_addr.s_addr);
return 0;
}
int usnic_transport_init(void)
{
roce_bitmap = kzalloc(ROCE_BITMAP_SZ, GFP_KERNEL);
if (!roce_bitmap) {
usnic_err("Failed to allocate bit map");
return -ENOMEM;
}
/* Do not ever allocate bit 0, hence set it here */
bitmap_set(roce_bitmap, 0, 1);
return 0;
}
void usnic_transport_fini(void)
{
kfree(roce_bitmap);
}
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_TRANSPORT_H_
#define USNIC_TRANSPORT_H_
#include "usnic_abi.h"
const char *usnic_transport_to_str(enum usnic_transport_type trans_type);
/*
* Returns number of bytes written, excluding null terminator. If
* nothing was written, the function returns 0.
*/
int usnic_transport_sock_to_str(char *buf, int buf_sz,
struct socket *sock);
/*
* Reserve a port. If "port_num" is set, then the function will try
* to reserve that particular port.
*/
u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num);
void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num);
/*
* Do a fget on the socket refered to by sock_fd and returns the socket.
* Socket will not be destroyed before usnic_transport_put_socket has
* been called.
*/
struct socket *usnic_transport_get_socket(int sock_fd);
void usnic_transport_put_socket(struct socket *sock);
/*
* Call usnic_transport_get_socket before calling *_sock_get_addr
*/
int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
uint32_t *addr, uint16_t *port);
int usnic_transport_init(void);
void usnic_transport_fini(void);
#endif /* !USNIC_TRANSPORT_H */
此差异已折叠。
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_UIOM_H_
#define USNIC_UIOM_H_
#include <linux/list.h>
#include <linux/scatterlist.h>
#include "usnic_uiom_interval_tree.h"
#define USNIC_UIOM_READ (1)
#define USNIC_UIOM_WRITE (2)
#define USNIC_UIOM_MAX_PD_CNT (1000)
#define USNIC_UIOM_MAX_MR_CNT (1000000)
#define USNIC_UIOM_MAX_MR_SIZE (~0UL)
#define USNIC_UIOM_PAGE_SIZE (PAGE_SIZE)
struct usnic_uiom_dev {
struct device *dev;
struct list_head link;
};
struct usnic_uiom_pd {
struct iommu_domain *domain;
spinlock_t lock;
struct rb_root rb_root;
struct list_head devs;
int dev_cnt;
};
struct usnic_uiom_reg {
struct usnic_uiom_pd *pd;
unsigned long va;
size_t length;
int offset;
int page_size;
int writable;
struct list_head chunk_list;
struct work_struct work;
struct mm_struct *mm;
unsigned long diff;
};
struct usnic_uiom_chunk {
struct list_head list;
int nents;
struct scatterlist page_list[0];
};
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void);
void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd);
int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev);
void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd,
struct device *dev);
struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd);
void usnic_uiom_free_dev_list(struct device **devs);
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
unsigned long addr, size_t size,
int access, int dmasync);
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
int usnic_uiom_init(char *drv_name);
void usnic_uiom_fini(void);
#endif /* USNIC_UIOM_H_ */
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/list_sort.h>
#include <linux/interval_tree_generic.h>
#include "usnic_uiom_interval_tree.h"
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
#define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
do { \
node = usnic_uiom_interval_node_alloc(start, \
end, ref_cnt, flags); \
if (!node) { \
err = -ENOMEM; \
goto err_out; \
} \
} while (0)
#define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list))
#define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
err_out, list) \
do { \
MAKE_NODE(node, start, end, \
ref_cnt, flags, err, \
err_out); \
MARK_FOR_ADD(node, list); \
} while (0)
#define FLAGS_EQUAL(flags1, flags2, mask) \
(((flags1) & (mask)) == ((flags2) & (mask)))
static struct usnic_uiom_interval_node*
usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt,
int flags)
{
struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval),
GFP_ATOMIC);
if (!interval)
return NULL;
interval->start = start;
interval->last = last;
interval->flags = flags;
interval->ref_cnt = ref_cnt;
return interval;
}
static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct usnic_uiom_interval_node *node_a, *node_b;
node_a = list_entry(a, struct usnic_uiom_interval_node, link);
node_b = list_entry(b, struct usnic_uiom_interval_node, link);
/* long to int */
if (node_a->start < node_b->start)
return -1;
else if (node_a->start > node_b->start)
return 1;
return 0;
}
static void
find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
unsigned long last,
struct list_head *list)
{
struct usnic_uiom_interval_node *node;
INIT_LIST_HEAD(list);
for (node = usnic_uiom_interval_tree_iter_first(root, start, last);
node;
node = usnic_uiom_interval_tree_iter_next(node, start, last))
list_add_tail(&node->link, list);
list_sort(NULL, list, interval_cmp);
}
int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
int flags, int flag_mask,
struct rb_root *root,
struct list_head *diff_set)
{
struct usnic_uiom_interval_node *interval, *tmp;
int err = 0;
long int pivot = start;
LIST_HEAD(intersection_set);
INIT_LIST_HEAD(diff_set);
find_intervals_intersection_sorted(root, start, last,
&intersection_set);
list_for_each_entry(interval, &intersection_set, link) {
if (pivot < interval->start) {
MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1,
1, flags, err, err_out,
diff_set);
pivot = interval->start;
}
/*
* Invariant: Set [start, pivot] is either in diff_set or root,
* but not in both.
*/
if (pivot > interval->last) {
continue;
} else if (pivot <= interval->last &&
FLAGS_EQUAL(interval->flags, flags,
flag_mask)) {
pivot = interval->last + 1;
}
}
if (pivot <= last)
MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out,
diff_set);
return 0;
err_out:
list_for_each_entry_safe(interval, tmp, diff_set, link) {
list_del(&interval->link);
kfree(interval);
}
return err;
}
void usnic_uiom_put_interval_set(struct list_head *intervals)
{
struct usnic_uiom_interval_node *interval, *tmp;
list_for_each_entry_safe(interval, tmp, intervals, link)
kfree(interval);
}
int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
unsigned long last, int flags)
{
struct usnic_uiom_interval_node *interval, *tmp;
unsigned long istart, ilast;
int iref_cnt, iflags;
unsigned long lpivot = start;
int err = 0;
LIST_HEAD(to_add);
LIST_HEAD(intersection_set);
find_intervals_intersection_sorted(root, start, last,
&intersection_set);
list_for_each_entry(interval, &intersection_set, link) {
/*
* Invariant - lpivot is the left edge of next interval to be
* inserted
*/
istart = interval->start;
ilast = interval->last;
iref_cnt = interval->ref_cnt;
iflags = interval->flags;
if (istart < lpivot) {
MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt,
iflags, err, err_out, &to_add);
} else if (istart > lpivot) {
MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags,
err, err_out, &to_add);
lpivot = istart;
} else {
lpivot = istart;
}
if (ilast > last) {
MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1,
iflags | flags, err, err_out,
&to_add);
MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt,
iflags, err, err_out, &to_add);
} else {
MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1,
iflags | flags, err, err_out,
&to_add);
}
lpivot = ilast + 1;
}
if (lpivot <= last)
MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out,
&to_add);
list_for_each_entry_safe(interval, tmp, &intersection_set, link) {
usnic_uiom_interval_tree_remove(interval, root);
kfree(interval);
}
list_for_each_entry(interval, &to_add, link)
usnic_uiom_interval_tree_insert(interval, root);
return 0;
err_out:
list_for_each_entry_safe(interval, tmp, &to_add, link)
kfree(interval);
return err;
}
void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
unsigned long last, struct list_head *removed)
{
struct usnic_uiom_interval_node *interval;
for (interval = usnic_uiom_interval_tree_iter_first(root, start, last);
interval;
interval = usnic_uiom_interval_tree_iter_next(interval,
start,
last)) {
if (--interval->ref_cnt == 0)
list_add_tail(&interval->link, removed);
}
list_for_each_entry(interval, removed, link)
usnic_uiom_interval_tree_remove(interval, root);
}
INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
unsigned long, __subtree_last,
START, LAST, , usnic_uiom_interval_tree)
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -104,6 +104,8 @@ int ipoib_open(struct net_device *dev)
ipoib_dbg(priv, "bringing up interface\n");
netif_carrier_off(dev);
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_pkey_dev_delay_open(dev))
......@@ -1366,8 +1368,6 @@ void ipoib_setup(struct net_device *dev)
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
netif_carrier_off(dev);
priv->dev = dev;
spin_lock_init(&priv->lock);
......
......@@ -192,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (dev->features & NETIF_F_SG)
init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册