提交 cb010083 编写于 作者: A Aviad Yehezkel 提交者: Saeed Mahameed

net/mlx5: IPSec, Add support for ESN

Currently ESN is not supported with IPSec device offload.

This patch adds ESN support to IPsec device offload.
Implementing new xfrm device operation to synchronize offloading device
ESN with xfrm received SN. New QP command to update SA state at the
following:

           ESN 1                    ESN 2                  ESN 3
|-----------*-----------|-----------*-----------|-----------*
^           ^           ^           ^           ^           ^

^ - marks where QP command invoked to update the SA ESN state
    machine.
| - marks the start of the ESN scope (0-2^32-1). At this point move SA
    ESN overlap bit to zero and increment ESN.
* - marks the middle of the ESN scope (2^31). At this point move SA
    ESN overlap bit to one.
Signed-off-by: NAviad Yehezkel <aviadye@mellanox.com>
Signed-off-by: NYossef Efraim <yossefe@mellanox.com>
Signed-off-by: NSaeed Mahameed <saeedm@mellanox.com>
上级 75ef3f55
......@@ -38,18 +38,9 @@
#include <linux/module.h>
#include "en.h"
#include "accel/ipsec.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm *xfrm;
void *hw_context;
};
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
......@@ -121,6 +112,40 @@ static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
ida_simple_remove(&ipsec->halloc, sa_entry->handle);
}
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom;
u8 overlap;
u32 *esn;
if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
sa_entry->esn_state.trigger = 0;
return false;
}
replay_esn = sa_entry->x->replay_esn;
seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
overlap = sa_entry->esn_state.overlap;
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
htonl(seq_bottom));
esn = &sa_entry->esn_state.esn;
sa_entry->esn_state.trigger = 1;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
++(*esn);
sa_entry->esn_state.overlap = 0;
return true;
} else if (unlikely(!overlap &&
(seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
sa_entry->esn_state.overlap = 1;
return true;
}
return false;
}
static void
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
......@@ -152,6 +177,14 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
/* esn */
if (sa_entry->esn_state.trigger) {
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
attrs->esn = sa_entry->esn_state.esn;
if (sa_entry->esn_state.overlap)
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
}
/* rx handle */
attrs->sa_handle = sa_entry->handle;
......@@ -187,7 +220,9 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload compressed xfrm states\n");
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN) {
if (x->props.flags & XFRM_STATE_ESN &&
!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
......@@ -277,8 +312,14 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
goto err_entry;
}
} else {
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
}
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
/* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
sa_entry->xfrm =
......@@ -344,6 +385,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
return;
if (sa_entry->hw_context) {
flush_workqueue(sa_entry->ipsec->wq);
mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
......@@ -374,6 +416,12 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
ipsec->en_priv->ipsec = ipsec;
ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
kfree(ipsec);
return -ENOMEM;
}
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
}
......@@ -385,6 +433,9 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
drain_workqueue(ipsec->wq);
destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
......@@ -405,11 +456,58 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
struct mlx5e_ipsec_modify_state_work {
struct work_struct work;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_ipsec_sa_entry *sa_entry;
};
static void _update_xfrm_state(struct work_struct *work)
{
int ret;
struct mlx5e_ipsec_modify_state_work *modify_work =
container_of(work, struct mlx5e_ipsec_modify_state_work, work);
struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
&modify_work->attrs);
if (ret)
netdev_warn(sa_entry->ipsec->en_priv->netdev,
"Not an IPSec offload device\n");
kfree(modify_work);
}
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_modify_state_work *modify_work;
bool need_update;
if (!sa_entry)
return;
need_update = mlx5e_ipsec_update_esn_state(sa_entry);
if (!need_update)
return;
modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
if (!modify_work)
return;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
modify_work->sa_entry = sa_entry;
INIT_WORK(&modify_work->work, _update_xfrm_state);
WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
}
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
};
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
......
......@@ -40,7 +40,11 @@
#include <net/xfrm.h>
#include <linux/idr.h>
#include "accel/ipsec.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
......@@ -82,6 +86,25 @@ struct mlx5e_ipsec {
struct ida halloc;
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
};
struct mlx5e_ipsec_esn_state {
u32 esn;
u8 trigger: 1;
u8 overlap: 1;
};
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm *xfrm;
void *hw_context;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
};
void mlx5e_ipsec_build_inverse_table(void);
......
......@@ -176,7 +176,30 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
}
}
static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo)
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo)
{
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
__u32 oseq = replay_esn->oseq;
int iv_offset;
__be64 seqno;
u32 seq_hi;
if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
seq_hi = xo->seq.hi - 1;
} else {
seq_hi = xo->seq.hi;
}
/* Place the SN in the IV field */
seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
skb_store_bits(skb, iv_offset, &seqno, 8);
}
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo)
{
int iv_offset;
__be64 seqno;
......@@ -228,6 +251,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_state *x;
if (!xo)
......@@ -262,7 +286,8 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
mlx5e_ipsec_set_iv(skb, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo);
return skb;
......
......@@ -37,6 +37,7 @@
#ifdef CONFIG_MLX5_EN_IPSEC
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
......@@ -46,6 +47,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe *wqe,
struct sk_buff *skb);
......
......@@ -347,6 +347,11 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
}
return ret;
}
......@@ -470,6 +475,23 @@ mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
sizeof(aes_gcm->salt));
/* esn */
if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
hw_sa->ipsec_sa_v1.flags |=
(xfrm_attrs->flags &
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
hw_sa->esn = htonl(xfrm_attrs->esn);
} else {
hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
hw_sa->ipsec_sa_v1.flags &=
~(xfrm_attrs->flags &
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
hw_sa->esn = 0;
}
/* rx handle */
hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
......
......@@ -110,6 +110,8 @@ enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
};
#ifdef CONFIG_MLX5_ACCEL
......
......@@ -468,6 +468,8 @@ struct mlx5_ifc_fpga_ipsec_cmd_cap {
} __packed;
enum mlx5_ifc_fpga_ipsec_sa_flags {
MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册