提交 722943a5 编写于 作者: D David S. Miller

Merge tag 'mlx5-fixes-2020-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2020-01-24

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

Merge conflict: once merge with net-next, a contextual conflict will
appear in drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
since the code moved in net-next.
To resolve, just delete ALL of the conflicting hunk from net.
So sorry for the small mess ..

For -stable v5.4:
 ('net/mlx5: Update the list of the PCI supported devices')
 ('net/mlx5: Fix lowest FDB pool size')
 ('net/mlx5e: kTLS, Fix corner-case checks in TX resync flow')
 ('net/mlx5e: kTLS, Do not send decrypted-marked SKBs via non-accel path')
 ('net/mlx5: Eswitch, Prevent ingress rate configuration of uplink rep')
 ('net/mlx5e: kTLS, Remove redundant posts in TX resync flow')
 ('net/mlx5: DR, Enable counter on non-fwd-dest objects')
 ('net/mlx5: DR, use non preemptible call to get the current cpu number')
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct tx_sync_info {
u64 rcd_sn;
s32 sync_len;
u32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
......@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 tcp_seq, struct tx_sync_info *info)
u32 tcp_seq, int datalen, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
bool ends_before;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
......@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto out;
}
if (unlikely(tcp_seq < tls_record_start_seq(record))) {
ret = tls_record_is_start_marker(record) ?
MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
/* There are the following cases:
* 1. packet ends before start marker: bypass offload.
* 2. packet starts before start marker and ends after it: drop,
* not supported, breaks contract with kernel.
* 3. packet ends before tls record info starts: drop,
* this packet was already acknowledged and its record info
* was released.
*/
ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
if (unlikely(tls_record_is_start_marker(record))) {
ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
goto out;
} else if (ends_before) {
ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
......@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u8 num_wqebbs;
int i = 0;
ret = tx_sync_info_get(priv_tx, seq, &info);
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
......@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto err_out;
}
if (unlikely(info.sync_len < 0)) {
if (likely(datalen <= -info.sync_len))
return MLX5E_KTLS_SYNC_DONE;
stats->tls_drop_bypass_req++;
goto err_out;
}
stats->tls_ooo++;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
......@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
......@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
if (likely(ret == MLX5E_KTLS_SYNC_DONE))
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
else if (ret == MLX5E_KTLS_SYNC_FAIL)
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
/* fall-through */
default: /* MLX5E_KTLS_SYNC_FAIL */
goto err_out;
else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
goto out;
}
}
priv_tx->expected_seq = seq + datalen;
......
......@@ -4036,6 +4036,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
if (vport_num >= MLX5_VPORT_ECPF) {
NL_SET_ERR_MSG_MOD(extack,
"Ingress rate limit is supported only for Eswitch ports connected to VFs");
return -EOPNOTSUPP;
}
esw = priv->mdev->priv.eswitch;
/* rate is given in bytes/sec.
* First convert to bits/sec and then round to the nearest mbit/secs.
......@@ -4044,8 +4051,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
vport_num = rpriv->rep->vport;
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
......
......@@ -1928,8 +1928,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
}
/* Public E-Switch API */
......
......@@ -866,7 +866,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
*/
#define ESW_SIZE (16 * 1024 * 1024)
const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
64 * 1024, 4 * 1024 };
64 * 1024, 128 };
static int
get_sz_from_pool(struct mlx5_eswitch *esw)
......@@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
mlx5_eswitch_disable(esw, false);
mlx5_eswitch_disable(esw, true);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
......@@ -2220,7 +2220,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
struct mlx5_vport *vport;
int err, i;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
......@@ -2237,6 +2238,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_vports;
......@@ -2266,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
mlx5_eswitch_disable(esw, false);
mlx5_eswitch_disable(esw, true);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
......
......@@ -1563,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/smp.h>
#include "dr_types.h"
#define QUEUE_SIZE 128
......@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
......
......@@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
id = dst->dest_attr.counter_id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
break;
switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
tmp_action = create_ft_action(dev, dst);
if (!tmp_action) {
......@@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
list_for_each_entry(dst, &fte->node.children, node.list) {
u32 id;
if (dst->dest_attr.type !=
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
id = dst->dest_attr.counter_id;
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
}
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册