提交 e21dd863 编写于 作者: D David S. Miller

Merge branch 'mlx4'

Amir Vadai says:

====================
net/mlx4: Mellanox driver update 07-11-2013

This patchset contains some enhancements and bug fixes for the mlx4_* drivers.
Patchset was applied and tested against commit: "9bb8ca86 virtio-net: switch to
use XPS to choose txq"
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -526,7 +526,6 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, ...@@ -526,7 +526,6 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return 0; return 0;
memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64); memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
...@@ -547,8 +546,6 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, ...@@ -547,8 +546,6 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
memset(mailbox->buf, 0, 256);
if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
*(u8 *) mailbox->buf = !!reset_qkey_viols << 6; *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
...@@ -879,8 +876,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att ...@@ -879,8 +876,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_ib_dev *mdev = to_mdev(qp->device); struct mlx4_ib_dev *mdev = to_mdev(qp->device);
struct mlx4_cmd_mailbox *mailbox; struct mlx4_cmd_mailbox *mailbox;
struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct mlx4_net_trans_rule_hw_ctrl *ctrl;
size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
(sizeof(struct _rule_hw) * flow_attr->num_of_specs);
static const u16 __mlx4_domain[] = { static const u16 __mlx4_domain[] = {
[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS, [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
...@@ -905,7 +900,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att ...@@ -905,7 +900,6 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
memset(mailbox->buf, 0, rule_size);
ctrl = mailbox->buf; ctrl = mailbox->buf;
ctrl->prio = cpu_to_be16(__mlx4_domain[domain] | ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
......
...@@ -1539,11 +1539,6 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, ...@@ -1539,11 +1539,6 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
return ret; return ret;
} }
static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
{
return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
}
static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port) int slave, int port)
{ {
...@@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, ...@@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
struct mlx4_dev *dev = &(priv->dev); struct mlx4_dev *dev = &(priv->dev);
int err; int err;
int admin_vlan_ix = NO_INDX; int admin_vlan_ix = NO_INDX;
enum mlx4_vlan_transition vlan_trans;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
...@@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, ...@@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.link_state == vp_admin->link_state) vp_oper->state.link_state == vp_admin->link_state)
return 0; return 0;
vlan_trans = calculate_transition(vp_oper->state.default_vlan,
vp_admin->default_vlan);
if (!(priv->mfunc.master.slave_state[slave].active && if (!(priv->mfunc.master.slave_state[slave].active &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
/* even if the UPDATE_QP command isn't supported, we still want /* even if the UPDATE_QP command isn't supported, we still want
* to set this VF link according to the admin directive * to set this VF link according to the admin directive
*/ */
...@@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, ...@@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
return -ENOMEM; return -ENOMEM;
if (vp_oper->state.default_vlan != vp_admin->default_vlan) { if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port, if (MLX4_VGT != vp_admin->default_vlan) {
vp_admin->default_vlan, err = __mlx4_register_vlan(&priv->dev, port,
&admin_vlan_ix); vp_admin->default_vlan,
if (err) { &admin_vlan_ix);
kfree(work); if (err) {
mlx4_warn((&priv->dev), kfree(work);
"No vlan resources slave %d, port %d\n", mlx4_warn((&priv->dev),
slave, port); "No vlan resources slave %d, port %d\n",
return err; slave, port);
return err;
}
} else {
admin_vlan_ix = NO_INDX;
} }
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg((&(priv->dev)), mlx4_dbg((&(priv->dev)),
...@@ -2199,6 +2193,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) ...@@ -2199,6 +2193,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
return mailbox; return mailbox;
} }
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
......
...@@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, ...@@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
cq_context = mailbox->buf; cq_context = mailbox->buf;
memset(cq_context, 0, sizeof *cq_context);
cq_context->cq_max_count = cpu_to_be16(count); cq_context->cq_max_count = cpu_to_be16(count);
cq_context->cq_period = cpu_to_be16(period); cq_context->cq_period = cpu_to_be16(period);
...@@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, ...@@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
cq_context = mailbox->buf; cq_context = mailbox->buf;
memset(cq_context, 0, sizeof *cq_context);
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
cq_context->log_page_size = mtt->page_shift - 12; cq_context->log_page_size = mtt->page_shift - 12;
mtt_addr = mlx4_mtt_addr(dev, mtt); mtt_addr = mlx4_mtt_addr(dev, mtt);
...@@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
} }
cq_context = mailbox->buf; cq_context = mailbox->buf;
memset(cq_context, 0, sizeof *cq_context);
cq_context->flags = cpu_to_be32(!!collapsed << 18); cq_context->flags = cpu_to_be32(!!collapsed << 18);
if (timestamp_en) if (timestamp_en)
cq_context->flags |= cpu_to_be32(1 << 19); cq_context->flags |= cpu_to_be32(1 << 19);
......
...@@ -44,12 +44,23 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) ...@@ -44,12 +44,23 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
int mlx4_en_create_cq(struct mlx4_en_priv *priv, int mlx4_en_create_cq(struct mlx4_en_priv *priv,
struct mlx4_en_cq *cq, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode) int entries, int ring, enum cq_type mode,
int node)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
int err; int err;
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
if (!cq) {
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
en_err(priv, "Failed to allocate CQ structure\n");
return -ENOMEM;
}
}
cq->size = entries; cq->size = entries;
cq->buf_size = cq->size * mdev->dev->caps.cqe_size; cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
...@@ -57,17 +68,30 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, ...@@ -57,17 +68,30 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->is_tx = mode; cq->is_tx = mode;
spin_lock_init(&cq->lock); spin_lock_init(&cq->lock);
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
*/
set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE); cq->buf_size, 2 * PAGE_SIZE);
set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err) if (err)
return err; goto err_cq;
err = mlx4_en_map_buffer(&cq->wqres.buf); err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err) if (err)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); goto err_res;
else
cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
*pcq = cq;
return 0;
err_res:
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
kfree(cq);
*pcq = NULL;
return err; return err;
} }
...@@ -117,12 +141,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -117,12 +141,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_cq *rx_cq; struct mlx4_en_cq *rx_cq;
cq_idx = cq_idx % priv->rx_ring_num; cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = &priv->rx_cq[cq_idx]; rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector; cq->vector = rx_cq->vector;
} }
if (!cq->is_tx) if (!cq->is_tx)
cq->size = priv->rx_ring[cq->ring].actual_size; cq->size = priv->rx_ring[cq->ring]->actual_size;
if ((cq->is_tx && priv->hwtstamp_config.tx_type) || if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
(!cq->is_tx && priv->hwtstamp_config.rx_filter)) (!cq->is_tx && priv->hwtstamp_config.rx_filter))
...@@ -146,9 +170,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -146,9 +170,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
return 0; return 0;
} }
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq = *pcq;
mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
...@@ -157,6 +182,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) ...@@ -157,6 +182,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
cq->vector = 0; cq->vector = 0;
cq->buf_size = 0; cq->buf_size = 0;
cq->buf = NULL; cq->buf = NULL;
kfree(cq);
*pcq = NULL;
} }
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
......
...@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) ...@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
int err = 0; int err = 0;
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_cq[i].moder_cnt = priv->tx_frames; priv->tx_cq[i]->moder_cnt = priv->tx_frames;
priv->tx_cq[i].moder_time = priv->tx_usecs; priv->tx_cq[i]->moder_time = priv->tx_usecs;
if (priv->port_up) { if (priv->port_up) {
err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
if (err) if (err)
return err; return err;
} }
...@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) ...@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
return 0; return 0;
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_cq[i].moder_cnt = priv->rx_frames; priv->rx_cq[i]->moder_cnt = priv->rx_frames;
priv->rx_cq[i].moder_time = priv->rx_usecs; priv->rx_cq[i]->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
if (priv->port_up) { if (priv->port_up) {
err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
if (err) if (err)
return err; return err;
} }
...@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, ...@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
} }
} }
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
data[index++] = priv->tx_ring[i].packets; data[index++] = priv->tx_ring[i]->packets;
data[index++] = priv->tx_ring[i].bytes; data[index++] = priv->tx_ring[i]->bytes;
} }
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i].packets; data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i].bytes; data[index++] = priv->rx_ring[i]->bytes;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
data[index++] = priv->rx_ring[i].yields; data[index++] = priv->rx_ring[i]->yields;
data[index++] = priv->rx_ring[i].misses; data[index++] = priv->rx_ring[i]->misses;
data[index++] = priv->rx_ring[i].cleaned; data[index++] = priv->rx_ring[i]->cleaned;
#endif #endif
} }
spin_unlock_bh(&priv->stats_lock); spin_unlock_bh(&priv->stats_lock);
...@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev, ...@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size : if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0].size) && priv->rx_ring[0]->size) &&
tx_size == priv->tx_ring[0].size) tx_size == priv->tx_ring[0]->size)
return 0; return 0;
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
...@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev, ...@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ? param->rx_pending = priv->port_up ?
priv->rx_ring[0].actual_size : priv->rx_ring[0].size; priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
param->tx_pending = priv->tx_ring[0].size; param->tx_pending = priv->tx_ring[0]->size;
} }
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
......
...@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi) ...@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev; struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
int done; int done;
if (!priv->port_up) if (!priv->port_up)
...@@ -102,6 +102,7 @@ struct mlx4_en_filter { ...@@ -102,6 +102,7 @@ struct mlx4_en_filter {
struct list_head next; struct list_head next;
struct work_struct work; struct work_struct work;
u8 ip_proto;
__be32 src_ip; __be32 src_ip;
__be32 dst_ip; __be32 dst_ip;
__be16 src_port; __be16 src_port;
...@@ -120,14 +121,26 @@ struct mlx4_en_filter { ...@@ -120,14 +121,26 @@ struct mlx4_en_filter {
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
{
switch (ip_proto) {
case IPPROTO_UDP:
return MLX4_NET_TRANS_RULE_ID_UDP;
case IPPROTO_TCP:
return MLX4_NET_TRANS_RULE_ID_TCP;
default:
return -EPROTONOSUPPORT;
}
};
static void mlx4_en_filter_work(struct work_struct *work) static void mlx4_en_filter_work(struct work_struct *work)
{ {
struct mlx4_en_filter *filter = container_of(work, struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter, struct mlx4_en_filter,
work); work);
struct mlx4_en_priv *priv = filter->priv; struct mlx4_en_priv *priv = filter->priv;
struct mlx4_spec_list spec_tcp = { struct mlx4_spec_list spec_tcp_udp = {
.id = MLX4_NET_TRANS_RULE_ID_TCP, .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
{ {
.tcp_udp = { .tcp_udp = {
.dst_port = filter->dst_port, .dst_port = filter->dst_port,
...@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work) ...@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc; int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (spec_tcp_udp.id < 0) {
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
filter->ip_proto);
goto ignore;
}
list_add_tail(&spec_eth.list, &rule.list); list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list); list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp.list, &rule.list); list_add_tail(&spec_tcp_udp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
...@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work) ...@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
if (rc) if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc); en_err(priv, "Error attaching flow. err = %d\n", rc);
ignore:
mlx4_en_filter_rfs_expire(priv); mlx4_en_filter_rfs_expire(priv);
filter->activated = 1; filter->activated = 1;
...@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, ...@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
static struct mlx4_en_filter * static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
__be32 dst_ip, __be16 src_port, __be16 dst_port, __be32 dst_ip, u8 ip_proto, __be16 src_port,
u32 flow_id) __be16 dst_port, u32 flow_id)
{ {
struct mlx4_en_filter *filter = NULL; struct mlx4_en_filter *filter = NULL;
...@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, ...@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
filter->src_ip = src_ip; filter->src_ip = src_ip;
filter->dst_ip = dst_ip; filter->dst_ip = dst_ip;
filter->ip_proto = ip_proto;
filter->src_port = src_port; filter->src_port = src_port;
filter->dst_port = dst_port; filter->dst_port = dst_port;
...@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter) ...@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
static inline struct mlx4_en_filter * static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port) u8 ip_proto, __be16 src_port, __be16 dst_port)
{ {
struct mlx4_en_filter *filter; struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL; struct mlx4_en_filter *ret = NULL;
...@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, ...@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
filter_chain) { filter_chain) {
if (filter->src_ip == src_ip && if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip && filter->dst_ip == dst_ip &&
filter->ip_proto == ip_proto &&
filter->src_port == src_port && filter->src_port == src_port &&
filter->dst_port == dst_port) { filter->dst_port == dst_port) {
ret = filter; ret = filter;
...@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct mlx4_en_filter *filter; struct mlx4_en_filter *filter;
const struct iphdr *ip; const struct iphdr *ip;
const __be16 *ports; const __be16 *ports;
u8 ip_proto;
__be32 src_ip; __be32 src_ip;
__be32 dst_ip; __be32 dst_ip;
__be16 src_port; __be16 src_port;
...@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
if (ip_is_fragment(ip)) if (ip_is_fragment(ip))
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
ip_proto = ip->protocol;
src_ip = ip->saddr; src_ip = ip->saddr;
dst_ip = ip->daddr; dst_ip = ip->daddr;
src_port = ports[0]; src_port = ports[0];
dst_port = ports[1]; dst_port = ports[1];
if (ip->protocol != IPPROTO_TCP)
return -EPROTONOSUPPORT;
spin_lock_bh(&priv->filters_lock); spin_lock_bh(&priv->filters_lock);
filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port); filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
src_port, dst_port);
if (filter) { if (filter) {
if (filter->rxq_index == rxq_index) if (filter->rxq_index == rxq_index)
goto out; goto out;
...@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
filter->rxq_index = rxq_index; filter->rxq_index = rxq_index;
} else { } else {
filter = mlx4_en_filter_alloc(priv, rxq_index, filter = mlx4_en_filter_alloc(priv, rxq_index,
src_ip, dst_ip, src_ip, dst_ip, ip_proto,
src_port, dst_port, flow_id); src_port, dst_port, flow_id);
if (!filter) { if (!filter) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -332,8 +355,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -332,8 +355,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
return ret; return ret;
} }
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
struct mlx4_en_rx_ring *rx_ring)
{ {
struct mlx4_en_filter *filter, *tmp; struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list); LIST_HEAD(del_list);
...@@ -1219,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev) ...@@ -1219,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
int i; int i;
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i]; cq = priv->rx_cq[i];
spin_lock_irqsave(&cq->lock, flags); spin_lock_irqsave(&cq->lock, flags);
napi_synchronize(&cq->napi); napi_synchronize(&cq->napi);
mlx4_en_process_rx_cq(dev, cq, 0); mlx4_en_process_rx_cq(dev, cq, 0);
...@@ -1241,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev) ...@@ -1241,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue; continue;
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn, i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
priv->tx_ring[i].cons, priv->tx_ring[i].prod); priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
} }
priv->port_stats.tx_timeout++; priv->port_stats.tx_timeout++;
...@@ -1282,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) ...@@ -1282,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
/* Setup cq moderation params */ /* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i]; cq = priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames; cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs; cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
...@@ -1291,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) ...@@ -1291,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
} }
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
cq = &priv->tx_cq[i]; cq = priv->tx_cq[i];
cq->moder_cnt = priv->tx_frames; cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs; cq->moder_time = priv->tx_usecs;
} }
...@@ -1325,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) ...@@ -1325,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
for (ring = 0; ring < priv->rx_ring_num; ring++) { for (ring = 0; ring < priv->rx_ring_num; ring++) {
spin_lock_bh(&priv->stats_lock); spin_lock_bh(&priv->stats_lock);
rx_packets = priv->rx_ring[ring].packets; rx_packets = priv->rx_ring[ring]->packets;
rx_bytes = priv->rx_ring[ring].bytes; rx_bytes = priv->rx_ring[ring]->bytes;
spin_unlock_bh(&priv->stats_lock); spin_unlock_bh(&priv->stats_lock);
rx_pkt_diff = ((unsigned long) (rx_packets - rx_pkt_diff = ((unsigned long) (rx_packets -
...@@ -1355,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) ...@@ -1355,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
if (moder_time != priv->last_moder_time[ring]) { if (moder_time != priv->last_moder_time[ring]) {
priv->last_moder_time[ring] = moder_time; priv->last_moder_time[ring] = moder_time;
cq = &priv->rx_cq[ring]; cq = priv->rx_cq[ring];
cq->moder_time = moder_time; cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames; cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq); err = mlx4_en_set_cq_moder(priv, cq);
...@@ -1478,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1478,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
return err; return err;
} }
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i]; cq = priv->rx_cq[i];
mlx4_en_cq_init_lock(cq); mlx4_en_cq_init_lock(cq);
...@@ -1496,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1496,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
goto cq_err; goto cq_err;
} }
mlx4_en_arm_cq(priv, cq); mlx4_en_arm_cq(priv, cq);
priv->rx_ring[i].cqn = cq->mcq.cqn; priv->rx_ring[i]->cqn = cq->mcq.cqn;
++rx_index; ++rx_index;
} }
...@@ -1522,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1522,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */ /* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */ /* Configure cq */
cq = &priv->tx_cq[i]; cq = priv->tx_cq[i];
err = mlx4_en_activate_cq(priv, cq, i); err = mlx4_en_activate_cq(priv, cq, i);
if (err) { if (err) {
en_err(priv, "Failed allocating Tx CQ\n"); en_err(priv, "Failed allocating Tx CQ\n");
...@@ -1538,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1538,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq->buf->wqe_index = cpu_to_be16(0xffff); cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */ /* Configure ring */
tx_ring = &priv->tx_ring[i]; tx_ring = priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
i / priv->num_tx_rings_p_up); i / priv->num_tx_rings_p_up);
if (err) { if (err) {
...@@ -1608,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1608,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
tx_err: tx_err:
while (tx_index--) { while (tx_index--) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
} }
mlx4_en_destroy_drop_qp(priv); mlx4_en_destroy_drop_qp(priv);
rss_err: rss_err:
...@@ -1618,9 +1640,9 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1618,9 +1640,9 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_put_qp(priv); mlx4_en_put_qp(priv);
cq_err: cq_err:
while (rx_index--) while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
for (i = 0; i < priv->rx_ring_num; i++) for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
return err; /* need to close devices */ return err; /* need to close devices */
} }
...@@ -1716,13 +1738,13 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) ...@@ -1716,13 +1738,13 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Free TX Rings */ /* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
} }
msleep(10); msleep(10);
for (i = 0; i < priv->tx_ring_num; i++) for (i = 0; i < priv->tx_ring_num; i++)
mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
/* Free RSS qps */ /* Free RSS qps */
mlx4_en_release_rss_steer(priv); mlx4_en_release_rss_steer(priv);
...@@ -1734,7 +1756,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) ...@@ -1734,7 +1756,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Free RX Rings */ /* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
struct mlx4_en_cq *cq = &priv->rx_cq[i]; struct mlx4_en_cq *cq = priv->rx_cq[i];
local_bh_disable(); local_bh_disable();
while (!mlx4_en_cq_lock_napi(cq)) { while (!mlx4_en_cq_lock_napi(cq)) {
...@@ -1745,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) ...@@ -1745,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
msleep(1); msleep(1);
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq); mlx4_en_deactivate_cq(priv, cq);
} }
} }
...@@ -1783,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev) ...@@ -1783,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
memset(&priv->port_stats, 0, sizeof(priv->port_stats)); memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i].bytes = 0; priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i].packets = 0; priv->tx_ring[i]->packets = 0;
priv->tx_ring[i].tx_csum = 0; priv->tx_ring[i]->tx_csum = 0;
} }
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i].bytes = 0; priv->rx_ring[i]->bytes = 0;
priv->rx_ring[i].packets = 0; priv->rx_ring[i]->packets = 0;
priv->rx_ring[i].csum_ok = 0; priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i].csum_none = 0; priv->rx_ring[i]->csum_none = 0;
} }
} }
...@@ -1848,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) ...@@ -1848,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
#endif #endif
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
if (priv->tx_ring[i].tx_info) if (priv->tx_ring && priv->tx_ring[i])
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
if (priv->tx_cq[i].buf) if (priv->tx_cq && priv->tx_cq[i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
} }
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i].rx_info) if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size, priv->stride); priv->prof->rx_ring_size, priv->stride);
if (priv->rx_cq[i].buf) if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
} }
...@@ -1873,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) ...@@ -1873,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_port_profile *prof = priv->prof;
int i; int i;
int err; int err;
int node;
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) { if (err) {
...@@ -1882,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) ...@@ -1882,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
/* Create tx Rings */ /* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->tx_cq[i], if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
prof->tx_ring_size, i, TX)) prof->tx_ring_size, i, TX, node))
goto err; goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE)) prof->tx_ring_size, TXBB_SIZE, node))
goto err; goto err;
} }
/* Create rx Rings */ /* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->rx_cq[i], if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
prof->rx_ring_size, i, RX)) prof->rx_ring_size, i, RX, node))
goto err; goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, priv->stride)) prof->rx_ring_size, priv->stride,
node))
goto err; goto err;
} }
...@@ -1914,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) ...@@ -1914,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err: err:
en_err(priv, "Failed to allocate NIC resources\n"); en_err(priv, "Failed to allocate NIC resources\n");
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size,
priv->stride);
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
for (i = 0; i < priv->tx_ring_num; i++) {
if (priv->tx_ring[i])
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
if (priv->tx_cq[i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
return -ENOMEM; return -ENOMEM;
} }
...@@ -2207,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2207,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num; priv->tx_ring_num = prof->tx_ring_num;
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS, priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL); GFP_KERNEL);
if (!priv->tx_ring) { if (!priv->tx_ring) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS, priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
GFP_KERNEL); GFP_KERNEL);
if (!priv->tx_cq) { if (!priv->tx_cq) {
err = -ENOMEM; err = -ENOMEM;
......
...@@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) ...@@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
filter = mailbox->buf; filter = mailbox->buf;
memset(filter, 0, sizeof(*filter));
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0; entry = 0;
for (j = 0; j < 32; j++) for (j = 0; j < 32; j++)
...@@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) ...@@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED); MLX4_CMD_WRAPPED);
...@@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) ...@@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED); MLX4_CMD_WRAPPED);
...@@ -143,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) ...@@ -143,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0; priv->port_stats.rx_chksum_none = 0;
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i].packets; stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i].bytes; stats->rx_bytes += priv->rx_ring[i]->bytes;
priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok; priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none; priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
} }
stats->tx_packets = 0; stats->tx_packets = 0;
stats->tx_bytes = 0; stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0; priv->port_stats.tx_chksum_offload = 0;
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i].packets; stats->tx_packets += priv->tx_ring[i]->packets;
stats->tx_bytes += priv->tx_ring[i].bytes; stats->tx_bytes += priv->tx_ring[i]->bytes;
priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum; priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
} }
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
......
...@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) ...@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind]; ring = priv->rx_ring[ring_ind];
if (mlx4_en_prepare_rx_desc(priv, ring, if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size, ring->actual_size,
...@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) ...@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
reduce_rings: reduce_rings:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind]; ring = priv->rx_ring[ring_ind];
while (ring->actual_size > new_size) { while (ring->actual_size > new_size) {
ring->actual_size--; ring->actual_size--;
ring->prod--; ring->prod--;
...@@ -319,12 +319,23 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, ...@@ -319,12 +319,23 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
} }
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride) struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM; int err = -ENOMEM;
int tmp; int tmp;
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
en_err(priv, "Failed to allocate RX ring structure\n");
return -ENOMEM;
}
}
ring->prod = 0; ring->prod = 0;
ring->cons = 0; ring->cons = 0;
ring->size = size; ring->size = size;
...@@ -335,17 +346,25 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ...@@ -335,17 +346,25 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc)); sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vmalloc(tmp); ring->rx_info = vmalloc_node(tmp, node);
if (!ring->rx_info) if (!ring->rx_info) {
return -ENOMEM; ring->rx_info = vmalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
goto err_ring;
}
}
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp); ring->rx_info, tmp);
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE); ring->buf_size, 2 * PAGE_SIZE);
set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err) if (err)
goto err_ring; goto err_info;
err = mlx4_en_map_buffer(&ring->wqres.buf); err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) { if (err) {
...@@ -356,13 +375,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ...@@ -356,13 +375,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
*pring = ring;
return 0; return 0;
err_hwq: err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_ring: err_info:
vfree(ring->rx_info); vfree(ring->rx_info);
ring->rx_info = NULL; ring->rx_info = NULL;
err_ring:
kfree(ring);
*pring = NULL;
return err; return err;
} }
...@@ -376,12 +400,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) ...@@ -376,12 +400,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
DS_SIZE * priv->num_frags); DS_SIZE * priv->num_frags);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind]; ring = priv->rx_ring[ring_ind];
ring->prod = 0; ring->prod = 0;
ring->cons = 0; ring->cons = 0;
ring->actual_size = 0; ring->actual_size = 0;
ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride; ring->stride = stride;
if (ring->stride <= TXBB_SIZE) if (ring->stride <= TXBB_SIZE)
...@@ -412,7 +436,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) ...@@ -412,7 +436,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
goto err_buffers; goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind]; ring = priv->rx_ring[ring_ind];
ring->size_mask = ring->actual_size - 1; ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring); mlx4_en_update_rx_prod_db(ring);
...@@ -422,30 +446,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) ...@@ -422,30 +446,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err_buffers: err_buffers:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1; ring_ind = priv->rx_ring_num - 1;
err_allocator: err_allocator:
while (ring_ind >= 0) { while (ring_ind >= 0) {
if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
priv->rx_ring[ring_ind].buf -= TXBB_SIZE; priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
ring_ind--; ring_ind--;
} }
return err; return err;
} }
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride) struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info); vfree(ring->rx_info);
ring->rx_info = NULL; ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
mlx4_en_cleanup_filters(priv, ring); mlx4_en_cleanup_filters(priv);
#endif #endif
} }
...@@ -592,7 +620,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -592,7 +620,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_cqe *cqe; struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags; struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc; struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -991,7 +1019,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) ...@@ -991,7 +1019,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
qpn = rss_map->base_qpn + i; qpn = rss_map->base_qpn + i;
err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
&rss_map->state[i], &rss_map->state[i],
&rss_map->qps[i]); &rss_map->qps[i]);
if (err) if (err)
...@@ -1008,7 +1036,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) ...@@ -1008,7 +1036,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
} }
rss_map->indir_qp.event = mlx4_en_sqp_event; rss_map->indir_qp.event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, -1, &context); priv->rx_ring[0]->cqn, -1, &context);
if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
rss_rings = priv->rx_ring_num; rss_rings = priv->rx_ring_num;
......
...@@ -156,7 +156,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) ...@@ -156,7 +156,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
* since we turned the carrier off */ * since we turned the carrier off */
msleep(200); msleep(200);
for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
tx_ring = &priv->tx_ring[i]; tx_ring = priv->tx_ring[i];
if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
goto retry_tx; goto retry_tx;
} }
......
...@@ -54,13 +54,23 @@ module_param_named(inline_thold, inline_thold, int, 0444); ...@@ -54,13 +54,23 @@ module_param_named(inline_thold, inline_thold, int, 0444);
MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, int qpn, u32 size, struct mlx4_en_tx_ring **pring, int qpn, u32 size,
u16 stride) u16 stride, int node)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
int tmp; int tmp;
int err; int err;
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
en_err(priv, "Failed allocating TX ring\n");
return -ENOMEM;
}
}
ring->size = size; ring->size = size;
ring->size_mask = size - 1; ring->size_mask = size - 1;
ring->stride = stride; ring->stride = stride;
...@@ -68,22 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -68,22 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
inline_thold = min(inline_thold, MAX_INLINE); inline_thold = min(inline_thold, MAX_INLINE);
tmp = size * sizeof(struct mlx4_en_tx_info); tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc(tmp); ring->tx_info = vmalloc_node(tmp, node);
if (!ring->tx_info) if (!ring->tx_info) {
return -ENOMEM; ring->tx_info = vmalloc(tmp);
if (!ring->tx_info) {
err = -ENOMEM;
goto err_ring;
}
}
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp); ring->tx_info, tmp);
ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
if (!ring->bounce_buf) { if (!ring->bounce_buf) {
err = -ENOMEM; ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
goto err_tx; if (!ring->bounce_buf) {
err = -ENOMEM;
goto err_info;
}
} }
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE); 2 * PAGE_SIZE);
set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err) { if (err) {
en_err(priv, "Failed allocating hwq resources\n"); en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce; goto err_bounce;
...@@ -109,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -109,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
} }
ring->qp.event = mlx4_en_sqp_event; ring->qp.event = mlx4_en_sqp_event;
err = mlx4_bf_alloc(mdev->dev, &ring->bf); err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) { if (err) {
en_dbg(DRV, priv, "working without blueflame (%d)", err); en_dbg(DRV, priv, "working without blueflame (%d)", err);
ring->bf.uar = &mdev->priv_uar; ring->bf.uar = &mdev->priv_uar;
...@@ -120,6 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -120,6 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
*pring = ring;
return 0; return 0;
err_map: err_map:
...@@ -129,16 +151,20 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -129,16 +151,20 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err_bounce: err_bounce:
kfree(ring->bounce_buf); kfree(ring->bounce_buf);
ring->bounce_buf = NULL; ring->bounce_buf = NULL;
err_tx: err_info:
vfree(ring->tx_info); vfree(ring->tx_info);
ring->tx_info = NULL; ring->tx_info = NULL;
err_ring:
kfree(ring);
*pring = NULL;
return err; return err;
} }
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring) struct mlx4_en_tx_ring **pring)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring = *pring;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
if (ring->bf_enabled) if (ring->bf_enabled)
...@@ -151,6 +177,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, ...@@ -151,6 +177,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
ring->bounce_buf = NULL; ring->bounce_buf = NULL;
vfree(ring->tx_info); vfree(ring->tx_info);
ring->tx_info = NULL; ring->tx_info = NULL;
kfree(ring);
*pring = NULL;
} }
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
...@@ -330,7 +358,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ...@@ -330,7 +358,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq; struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe; struct mlx4_cqe *cqe;
u16 index; u16 index;
u16 new_index, ring_index, stamp_index; u16 new_index, ring_index, stamp_index;
...@@ -622,7 +650,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -622,7 +650,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
} }
tx_ind = skb->queue_mapping; tx_ind = skb->queue_mapping;
ring = &priv->tx_ring[tx_ind]; ring = priv->tx_ring[tx_ind];
if (vlan_tx_tag_present(skb)) if (vlan_tx_tag_present(skb))
vlan_tag = vlan_tx_tag_get(skb); vlan_tag = vlan_tx_tag_get(skb);
......
...@@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, ...@@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
if (err) if (err)
goto err_out_free_mtt; goto err_out_free_mtt;
memset(eq_context, 0, sizeof *eq_context);
eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
MLX4_EQ_STATE_ARMED); MLX4_EQ_STATE_ARMED);
eq_context->log_eq_size = ilog2(eq->nent); eq_context->log_eq_size = ilog2(eq->nent);
......
...@@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) ...@@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
inbox = mailbox->buf; inbox = mailbox->buf;
memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
...@@ -967,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) ...@@ -967,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
mailbox = mlx4_alloc_cmd_mailbox(dev); mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
pages = mailbox->buf; pages = mailbox->buf;
for (mlx4_icm_first(icm, &iter); for (mlx4_icm_first(icm, &iter);
...@@ -1316,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) ...@@ -1316,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
inbox = mailbox->buf; inbox = mailbox->buf;
memset(inbox, 0, INIT_HCA_IN_SIZE);
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
...@@ -1616,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) ...@@ -1616,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
inbox = mailbox->buf; inbox = mailbox->buf;
memset(inbox, 0, INIT_PORT_IN_SIZE);
flags = 0; flags = 0;
flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
......
...@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) ...@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
kfree(icm); kfree(icm);
} }
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
gfp_t gfp_mask, int node)
{ {
struct page *page; struct page *page;
page = alloc_pages(gfp_mask, order); page = alloc_pages_node(node, gfp_mask, order);
if (!page) if (!page) {
return -ENOMEM; page = alloc_pages(gfp_mask, order);
if (!page)
return -ENOMEM;
}
sg_set_page(mem, page, PAGE_SIZE << order, 0); sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0; return 0;
...@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, ...@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
/* We use sg_set_buf for coherent allocs, which assumes low memory */ /* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); icm = kmalloc_node(sizeof(*icm),
if (!icm) gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
return NULL; dev->numa_node);
if (!icm) {
icm = kmalloc(sizeof(*icm),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!icm)
return NULL;
}
icm->refcount = 0; icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list); INIT_LIST_HEAD(&icm->chunk_list);
...@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, ...@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (npages > 0) { while (npages > 0) {
if (!chunk) { if (!chunk) {
chunk = kmalloc(sizeof *chunk, chunk = kmalloc_node(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); gfp_mask & ~(__GFP_HIGHMEM |
if (!chunk) __GFP_NOWARN),
goto fail; dev->numa_node);
if (!chunk) {
chunk = kmalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN));
if (!chunk)
goto fail;
}
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0; chunk->npages = 0;
...@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, ...@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
cur_order, gfp_mask); cur_order, gfp_mask);
else else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask); cur_order, gfp_mask,
dev->numa_node);
if (ret) { if (ret) {
if (--cur_order < 0) if (--cur_order < 0)
......
...@@ -2191,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) ...@@ -2191,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mutex_init(&priv->bf_mutex); mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision; dev->rev_id = pdev->revision;
dev->numa_node = dev_to_node(&pdev->dev);
/* Detect if this device is a virtual function */ /* Detect if this device is a virtual function */
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly /* When acting as pf, we normally skip vfs unless explicitly
......
...@@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, ...@@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
goto out_list; goto out_list;
} }
mgm = mailbox->buf; mgm = mailbox->buf;
memset(mgm, 0, sizeof *mgm);
members_count = 0; members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
...@@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev, ...@@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
trans_rule_ctrl_to_hw(rule, mailbox->buf); trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
......
...@@ -530,10 +530,10 @@ struct mlx4_en_priv { ...@@ -530,10 +530,10 @@ struct mlx4_en_priv {
u16 num_frags; u16 num_frags;
u16 log_rx_info; u16 log_rx_info;
struct mlx4_en_tx_ring *tx_ring; struct mlx4_en_tx_ring **tx_ring;
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq *tx_cq; struct mlx4_en_cq **tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp; struct mlx4_qp drop_qp;
struct work_struct rx_mode_task; struct work_struct rx_mode_task;
struct work_struct watchdog_task; struct work_struct watchdog_task;
...@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) ...@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
if ((cq->state & MLX4_CQ_LOCKED)) { if ((cq->state & MLX4_CQ_LOCKED)) {
struct net_device *dev = cq->dev; struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
rc = false; rc = false;
...@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach); ...@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv); void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode); int entries, int ring, enum cq_type mode, int node);
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx); int cq_idx);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
...@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq); ...@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
int qpn, u32 size, u16 stride); struct mlx4_en_tx_ring **pring,
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); int qpn, u32 size, u16 stride, int node);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, struct mlx4_en_tx_ring *ring,
int cq, int user_prio); int cq, int user_prio);
...@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, ...@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring); struct mlx4_en_tx_ring *ring);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_ring **pring,
u32 size, u16 stride); u32 size, u16 stride, int node);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_ring **pring,
u32 size, u16 stride); u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
...@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops; ...@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
int mlx4_en_setup_tc(struct net_device *dev, u8 up); int mlx4_en_setup_tc(struct net_device *dev, u8 up);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
struct mlx4_en_rx_ring *rx_ring);
#endif #endif
#define MLX4_EN_NUM_SELF_TEST 5 #define MLX4_EN_NUM_SELF_TEST 5
......
...@@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
goto err_table; goto err_table;
} }
mpt_entry = mailbox->buf; mpt_entry = mailbox->buf;
memset(mpt_entry, 0, sizeof *mpt_entry);
mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION | MLX4_MPT_FLAG_REGION |
mr->access); mr->access);
...@@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) ...@@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
} }
mpt_entry = mailbox->buf; mpt_entry = mailbox->buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
* off, thus creating a memory window and not a memory region. * off, thus creating a memory window and not a memory region.
*/ */
......
...@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) ...@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
} }
EXPORT_SYMBOL_GPL(mlx4_uar_free); EXPORT_SYMBOL_GPL(mlx4_uar_free);
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_uar *uar; struct mlx4_uar *uar;
...@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) ...@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
uar = kmalloc(sizeof *uar, GFP_KERNEL); uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
if (!uar) { if (!uar) {
err = -ENOMEM; uar = kmalloc(sizeof(*uar), GFP_KERNEL);
goto out; if (!uar) {
err = -ENOMEM;
goto out;
}
} }
err = mlx4_uar_alloc(dev, uar); err = mlx4_uar_alloc(dev, uar);
if (err) if (err)
......
...@@ -469,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) ...@@ -469,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
inbuf = inmailbox->buf; inbuf = inmailbox->buf;
outbuf = outmailbox->buf; outbuf = outmailbox->buf;
memset(inbuf, 0, 256);
memset(outbuf, 0, 256);
inbuf[0] = 1; inbuf[0] = 1;
inbuf[1] = 1; inbuf[1] = 1;
inbuf[2] = 1; inbuf[2] = 1;
...@@ -653,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) ...@@ -653,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
memset(mailbox->buf, 0, 256);
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
...@@ -692,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, ...@@ -692,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
context = mailbox->buf; context = mailbox->buf;
memset(context, 0, sizeof *context);
context->flags = SET_PORT_GEN_ALL_VALID; context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu); context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7; context->pptx = (pptx * (!pfctx)) << 7;
...@@ -727,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, ...@@ -727,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
context = mailbox->buf; context = mailbox->buf;
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn); context->base_qpn = cpu_to_be32(base_qpn);
context->n_mac = dev->caps.log_num_macs; context->n_mac = dev->caps.log_num_macs;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
...@@ -761,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) ...@@ -761,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
context = mailbox->buf; context = mailbox->buf;
memset(context, 0, sizeof *context);
for (i = 0; i < MLX4_NUM_UP; i += 2) for (i = 0; i < MLX4_NUM_UP; i += 2)
context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
...@@ -788,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, ...@@ -788,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
context = mailbox->buf; context = mailbox->buf;
memset(context, 0, sizeof *context);
for (i = 0; i < MLX4_NUM_TC; i++) { for (i = 0; i < MLX4_NUM_TC; i++) {
struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
......
...@@ -110,7 +110,14 @@ struct res_qp { ...@@ -110,7 +110,14 @@ struct res_qp {
int local_qpn; int local_qpn;
atomic_t ref_count; atomic_t ref_count;
u32 qpc_flags; u32 qpc_flags;
/* saved qp params before VST enforcement in order to restore on VGT */
u8 sched_queue; u8 sched_queue;
__be32 param3;
u8 vlan_control;
u8 fvl_rx;
u8 pri_path_fl;
u8 vlan_index;
u8 feup;
}; };
enum res_mtt_states { enum res_mtt_states {
...@@ -2568,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2568,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return err; return err;
qp->local_qpn = local_qpn; qp->local_qpn = local_qpn;
qp->sched_queue = 0; qp->sched_queue = 0;
qp->param3 = 0;
qp->vlan_control = 0;
qp->fvl_rx = 0;
qp->pri_path_fl = 0;
qp->vlan_index = 0;
qp->feup = 0;
qp->qpc_flags = be32_to_cpu(qpc->flags); qp->qpc_flags = be32_to_cpu(qpc->flags);
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
...@@ -3294,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3294,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff; int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp; struct res_qp *qp;
u8 orig_sched_queue; u8 orig_sched_queue;
__be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
u8 orig_vlan_index = qpc->pri_path.vlan_index;
u8 orig_feup = qpc->pri_path.feup;
err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
if (err) if (err)
...@@ -3321,9 +3340,15 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3321,9 +3340,15 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
* essentially the QOS value provided by the VF. This will be useful * essentially the QOS value provided by the VF. This will be useful
* if we allow dynamic changes from VST back to VGT * if we allow dynamic changes from VST back to VGT
*/ */
if (!err) if (!err) {
qp->sched_queue = orig_sched_queue; qp->sched_queue = orig_sched_queue;
qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
qp->vlan_index = orig_vlan_index;
qp->feup = orig_feup;
}
put_res(dev, slave, qpn, RES_QP); put_res(dev, slave, qpn, RES_QP);
return err; return err;
} }
...@@ -4437,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) ...@@ -4437,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
&tracker->slave_list[work->slave].res_list[RES_QP]; &tracker->slave_list[work->slave].res_list[RES_QP];
struct res_qp *qp; struct res_qp *qp;
struct res_qp *tmp; struct res_qp *tmp;
u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | u64 qp_path_mask_vlan_ctrl =
((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
(1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
(1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
int err; int err;
...@@ -4475,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) ...@@ -4475,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf; upd_context = mailbox->buf;
upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask); upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
upd_context->qp_context.pri_path.vlan_control = vlan_control;
upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) { list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
...@@ -4495,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) ...@@ -4495,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
continue; continue;
} }
upd_context->qp_context.pri_path.sched_queue = if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
qp->sched_queue & 0xC7; upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
upd_context->qp_context.pri_path.sched_queue |= else
((work->qos & 0x7) << 3); upd_context->primary_addr_path_mask =
cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
if (work->vlan_id == MLX4_VGT) {
upd_context->qp_context.param3 = qp->param3;
upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
upd_context->qp_context.pri_path.feup = qp->feup;
upd_context->qp_context.pri_path.sched_queue =
qp->sched_queue;
} else {
upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
upd_context->qp_context.pri_path.vlan_control = vlan_control;
upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
upd_context->qp_context.pri_path.fvl_rx =
qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.fl =
qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
upd_context->qp_context.pri_path.feup =
qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.sched_queue =
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
}
err = mlx4_cmd(dev, mailbox->dma, err = mlx4_cmd(dev, mailbox->dma,
qp->local_qpn & 0xffffff, qp->local_qpn & 0xffffff,
......
...@@ -189,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, ...@@ -189,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
} }
srq_context = mailbox->buf; srq_context = mailbox->buf;
memset(srq_context, 0, sizeof *srq_context);
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
srq->srqn); srq->srqn);
srq_context->logstride = srq->wqe_shift - 4; srq_context->logstride = srq->wqe_shift - 4;
......
...@@ -662,6 +662,7 @@ struct mlx4_dev { ...@@ -662,6 +662,7 @@ struct mlx4_dev {
u8 rev_id; u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN]; char board_id[MLX4_BOARD_ID_LEN];
int num_vfs; int num_vfs;
int numa_node;
int oper_log_mgm_entry_size; int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
...@@ -834,7 +835,7 @@ void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); ...@@ -834,7 +835,7 @@ void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册