提交 e8e96081 编写于 作者: D David S. Miller

Merge tag 'mlx5-fixes-2018-04-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2018-04-26

This pull request includes fixes for mlx5 core and netdev driver.

Please pull and let me know if there's any problems.

For -stable v4.12
    net/mlx5e: TX, Use correct counter in dma_map error flow
For -stable v4.13
    net/mlx5: Avoid cleaning flow steering table twice during error flow
For -stable v4.14
    net/mlx5e: Allow offloading ipv4 header re-write for icmp
For -stable v4.15
    net/mlx5e: DCBNL fix min inline header size for dscp
For -stable v4.16
    net/mlx5: Fix mlx5_get_vector_affinity function
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -4757,7 +4757,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
return mlx5_get_vector_affinity(dev->mdev, comp_vector);
return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */
......
......@@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto out;
new_channels.params = priv->channels.params;
mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto out;
}
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
......
......@@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
};
static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
struct mlx5e_params *params, u16 mtu)
{
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->sw_mtu = mtu;
params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
......@@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
priv->channels.params.num_channels = profile->max_nch(mdev);
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
......
......@@ -1864,7 +1864,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
}
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
return false;
}
......
......@@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
return -ENOMEM;
goto dma_unmap_wqe_err;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
......@@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
return -ENOMEM;
goto dma_unmap_wqe_err;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
......@@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
return num_dma;
dma_unmap_wqe_err:
mlx5e_dma_unmap_wqe_err(sq, num_dma);
return -ENOMEM;
}
static inline void
......@@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
if (unlikely(num_dma < 0))
goto dma_unmap_wqe_err;
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
num_bytes, num_dma, wi, cseg);
return NETDEV_TX_OK;
dma_unmap_wqe_err:
err_drop:
sq->stats.dropped++;
mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
......@@ -645,17 +647,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
if (unlikely(num_dma < 0))
goto dma_unmap_wqe_err;
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
num_bytes, num_dma, wi, cseg);
return NETDEV_TX_OK;
dma_unmap_wqe_err:
err_drop:
sq->stats.dropped++;
mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
......
......@@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node);
static void del_sw_hw_rule(struct fs_node *node);
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2);
static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
static struct mlx5_flow_rule *
find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest);
......@@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node)
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
update_fte = true;
goto out;
......@@ -2351,23 +2353,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
static int init_root_ns(struct mlx5_flow_steering *steering)
{
int err;
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
if (!steering->root_ns)
goto cleanup;
return -ENOMEM;
if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
goto cleanup;
err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
if (err)
goto out_err;
set_prio_attrs(steering->root_ns);
if (create_anchor_flow_table(steering))
goto cleanup;
err = create_anchor_flow_table(steering);
if (err)
goto out_err;
return 0;
cleanup:
mlx5_cleanup_fs(steering->dev);
return -ENOMEM;
out_err:
cleanup_root_ns(steering->root_ns);
steering->root_ns = NULL;
return err;
}
static void clean_tree(struct fs_node *node)
......
......@@ -1284,25 +1284,19 @@ enum {
};
static inline const struct cpumask *
mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
{
const struct cpumask *mask;
struct irq_desc *desc;
unsigned int irq;
int eqn;
int err;
err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
if (err)
return NULL;
desc = irq_to_desc(irq);
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
#else
mask = desc->irq_common_data.affinity;
#endif
return mask;
return desc->affinity_hint;
}
#endif /* MLX5_DRIVER_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册