提交 23f30c41 编写于 作者: D David S. Miller

Merge branch 'mlx5-TLS-TX-HW-offload-support'

Tariq Toukan says:

====================
mlx5 TLS TX HW offload support

This series from Eran and me, adds TLS TX HW offload support to
the mlx5 driver.

This offloads the kTLS encryption process from kernel to the
Mellanox NIC, saving CPU cycles and improving utilization.

Upon a new TLS connection request, driver is responsible to create
a dedicated HW context and configure it according to the crypto info,
so HW can do the encryption itself.

When the HW context gets out-of-sync (i.e. due to packets retransmission),
driver is responsible for the re-sync process.
This is done by posting special resync descriptors to the HW.

Feature is supported on Mellanox Connect-X 6DX, and newer.
Series was tested on SimX simulator.

Series generated against net-next commit [1], with Saeed's request pulled [2]:

[1] c4cde580 Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[2] git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux.git tags/mlx5-updates-2019-07-04-v2

Changes from last pull request:
Fixed comments from Jakub:
Patch 4:
- Replace zero  memset with a call to memzero_explicit().
Patch 11:
- Fix stats counters names.
- Drop TLS SKB with non-matching netdev.
====================
Acked-by: NJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -97,26 +97,60 @@ config MLX5_CORE_IPOIB
---help---
MLX5 IPoIB offloads & acceleration support.
config MLX5_FPGA_IPSEC
bool "Mellanox Technologies IPsec Innova support"
depends on MLX5_CORE
depends on MLX5_FPGA
default n
help
Build IPsec support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
and an FPGA chip on one board. If you select this option, the
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_EN_IPSEC
bool "IPSec XFRM cryptography-offload accelaration"
depends on MLX5_ACCEL
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC
default n
---help---
help
Build support for IPsec cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
config MLX5_EN_TLS
bool "TLS cryptography-offload accelaration"
config MLX5_FPGA_TLS
bool "Mellanox Technologies TLS Innova support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_FPGA
default n
help
Build TLS support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
and an FPGA chip on one board. If you select this option, the
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on MLX5_CORE_EN
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_ACCEL
select MLX5_ACCEL
default n
---help---
Build support for TLS cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
help
Build TLS support for the Connect-X family of network cards by Mellanox
Technologies.
config MLX5_EN_TLS
bool "TLS cryptography-offload accelaration"
depends on MLX5_CORE_EN
depends on MLX5_FPGA_TLS || MLX5_TLS
default y
help
Build support for TLS cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
......@@ -53,12 +53,14 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
en_accel/ktls.o en_accel/ktls_tx.o
......@@ -31,6 +31,8 @@
*
*/
#ifdef CONFIG_MLX5_FPGA_IPSEC
#include <linux/mlx5/device.h>
#include "accel/ipsec.h"
......@@ -74,6 +76,11 @@ int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
return mlx5_fpga_ipsec_init(mdev);
}
void mlx5_accel_ipsec_build_fs_cmds(void)
{
mlx5_fpga_ipsec_build_fs_cmds();
}
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_ipsec_cleanup(mdev);
......@@ -107,3 +114,5 @@ int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
return mlx5_fpga_esp_modify_xfrm(xfrm, attrs);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
#endif
......@@ -37,7 +37,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
#ifdef CONFIG_MLX5_ACCEL
#ifdef CONFIG_MLX5_FPGA_IPSEC
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE)
......@@ -54,6 +54,7 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_build_fs_cmds(void);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
#else
......@@ -79,6 +80,10 @@ static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
return 0;
}
static inline void mlx5_accel_ipsec_build_fs_cmds(void)
{
}
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
}
......
......@@ -35,6 +35,9 @@
#include "accel/tls.h"
#include "mlx5_core.h"
#include "lib/mlx5.h"
#ifdef CONFIG_MLX5_FPGA_TLS
#include "fpga/tls.h"
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
......@@ -61,7 +64,8 @@ int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_is_tls_device(mdev);
return mlx5_fpga_is_tls_device(mdev) ||
mlx5_accel_is_ktls_device(mdev);
}
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
......@@ -78,3 +82,42 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_tls_cleanup(mdev);
}
#endif
#ifdef CONFIG_MLX5_TLS
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id)
{
u32 sz_bytes;
void *key;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
case TLS_CIPHER_AES_GCM_256: {
struct tls12_crypto_info_aes_gcm_256 *info =
(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
default:
return -EINVAL;
}
return mlx5_create_encryption_key(mdev, key, sz_bytes, p_key_id);
}
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
{
mlx5_destroy_encryption_key(mdev, key_id);
}
#endif
......@@ -37,8 +37,51 @@
#include <linux/mlx5/driver.h>
#include <linux/tls.h>
#ifdef CONFIG_MLX5_ACCEL
#ifdef CONFIG_MLX5_TLS
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id);
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{
if (!MLX5_CAP_GEN(mdev, tls))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
}
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info)
{
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break;
}
return false;
}
#else
static inline int
mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id) { return -ENOTSUPP; }
static inline void
mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
static inline bool
mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
static inline bool
mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info) { return false; }
#endif
#ifdef CONFIG_MLX5_FPGA_TLS
enum {
MLX5_ACCEL_TLS_TX = BIT(0),
MLX5_ACCEL_TLS_RX = BIT(1),
......@@ -84,11 +127,13 @@ static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx) { }
static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
u32 seq, u64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_accel_is_ktls_device(mdev);
}
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
#endif
#endif /* __MLX5_ACCEL_TLS_H__ */
......@@ -209,7 +209,10 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
struct mlx5_mtt inline_mtts[0];
union {
struct mlx5_mtt inline_mtts[0];
u8 tls_static_params_ctx[0];
};
};
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
......@@ -333,6 +336,9 @@ struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
#ifdef CONFIG_MLX5_EN_TLS
skb_frag_t *resync_dump_frag;
#endif
};
enum mlx5e_dma_map_type {
......@@ -390,6 +396,7 @@ struct mlx5e_txqsq {
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
......@@ -549,12 +556,6 @@ struct mlx5e_icosq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
u32 offset;
......@@ -1023,102 +1024,6 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
}
struct mlx5e_swp_spec {
__be16 l3_proto;
u8 l4_proto;
u8 is_tun;
__be16 tun_l3_proto;
u8 tun_l4_proto;
};
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
{
/* SWP offsets are in 2-bytes words */
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
if (swp_spec->l4_proto) {
eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
if (swp_spec->l4_proto == IPPROTO_UDP)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
}
if (swp_spec->is_tun) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
}
switch (swp_spec->tun_l4_proto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
/* fall through */
case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
}
}
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
memset(*wqe, 0, sizeof(**wqe));
}
static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
memset(cseg, 0, sizeof(*cseg));
cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
(*pc)++;
return wqe;
}
static inline
void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
*wq->db = cpu_to_be32(pc);
/* ensure doorbell record is visible to device before ringing the
* doorbell
*/
wmb();
mlx5_write64((__be32 *)ctrl, uar_map);
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
struct mlx5_core_cq *mcq;
mcq = &cq->mcq;
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
......@@ -1154,8 +1059,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5e_create_tises(struct mlx5e_priv *priv);
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5_EN_TXRX_H___
#define __MLX5_EN_TXRX_H___
#include "en.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
#ifndef CONFIG_MLX5_EN_TLS
#define MLX5E_SQ_TLS_ROOM (0)
#else
/* TLS offload requires additional stop_room for:
* - a resync SKB.
* kTLS offload requires additional stop_room for:
* - static params WQE,
* - progress params WQE, and
* - resync DUMP per frag.
*/
#define MLX5E_SQ_TLS_ROOM \
(MLX5_SEND_WQE_MAX_WQEBBS + \
MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
#endif
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
static inline void *
mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
void *wqe;
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
memset(wqe, 0, size);
return wqe;
}
static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
memset(cseg, 0, sizeof(*cseg));
cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
(*pc)++;
return wqe;
}
static inline struct mlx5e_tx_wqe *
mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
memset(cseg, 0, sizeof(*cseg));
cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
(*pc)++;
return wqe;
}
static inline void
mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops)
{
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
edge_wi = wi + nnops;
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
for (; wi < edge_wi; wi++) {
wi->skb = NULL;
wi->num_wqebbs = 1;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
sq->stats->nop += nnops;
}
static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
*wq->db = cpu_to_be32(pc);
/* ensure doorbell record is visible to device before ringing the
* doorbell
*/
wmb();
mlx5_write64((__be32 *)ctrl, uar_map);
}
static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
{
return !!wqe->ctrl.tisn;
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
struct mlx5_core_cq *mcq;
mcq = &cq->mcq;
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
}
static inline struct mlx5e_sq_dma *
mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{
return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
}
static inline void
mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
enum mlx5e_dma_map_type map_type)
{
struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
dma->addr = addr;
dma->size = size;
dma->type = map_type;
}
static inline void
mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
{
switch (dma->type) {
case MLX5E_DMA_MAP_SINGLE:
dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
case MLX5E_DMA_MAP_PAGE:
dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
default:
WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
}
}
/* SW parser related functions */
struct mlx5e_swp_spec {
__be16 l3_proto;
u8 l4_proto;
u8 is_tun;
__be16 tun_l3_proto;
u8 tun_l4_proto;
};
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
{
/* SWP offsets are in 2-bytes words */
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
if (swp_spec->l4_proto) {
eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
if (swp_spec->l4_proto == IPPROTO_UDP)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
}
if (swp_spec->is_tun) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
}
switch (swp_spec->tun_l4_proto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
/* fall through */
case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
}
}
#endif
......@@ -33,6 +33,7 @@
#define __MLX5_EN_XDP_H__
#include "en.h"
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
......
......@@ -39,6 +39,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "en.h"
#include "en/txrx.h"
#if IS_ENABLED(CONFIG_GENEVE)
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
......
......@@ -39,6 +39,7 @@
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include "en.h"
#include "en/txrx.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
#include "en.h"
#include "en_accel/ktls.h"
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, tls_en, 1);
return mlx5e_create_tis(mdev, in, tisn);
}
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_ktls_offload_context_tx *tx_priv;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5_core_dev *mdev = priv->mdev;
int err;
if (WARN_ON(direction != TLS_OFFLOAD_CTX_DIR_TX))
return -EINVAL;
if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
return -EOPNOTSUPP;
tx_priv = kvzalloc(sizeof(*tx_priv), GFP_KERNEL);
if (!tx_priv)
return -ENOMEM;
tx_priv->expected_seq = start_offload_tcp_sn;
tx_priv->crypto_info = crypto_info;
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
/* tc and underlay_qpn values are not in use for tls tis */
err = mlx5e_ktls_create_tis(mdev, &tx_priv->tisn);
if (err)
goto create_tis_fail;
err = mlx5_ktls_create_key(mdev, crypto_info, &tx_priv->key_id);
if (err)
goto encryption_key_create_fail;
mlx5e_ktls_tx_offload_set_pending(tx_priv);
return 0;
encryption_key_create_fail:
mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
create_tis_fail:
kvfree(tx_priv);
return err;
}
static void mlx5e_ktls_del(struct net_device *netdev,
struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_ktls_offload_context_tx *tx_priv =
mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
kvfree(tx_priv);
}
static const struct tlsdev_ops mlx5e_ktls_ops = {
.tls_dev_add = mlx5e_ktls_add,
.tls_dev_del = mlx5e_ktls_del,
};
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
if (!mlx5_accel_is_ktls_device(priv->mdev))
return;
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "accel/tls.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params))
#define MLX5E_KTLS_STATIC_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_PROGRESS_WQE_SZ \
(sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params))
#define MLX5E_KTLS_PROGRESS_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD = 1,
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2,
};
enum {
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2,
};
struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
struct tls_crypto_info *crypto_info;
u32 expected_seq;
u32 tisn;
u32 key_id;
bool ctx_post_pending;
};
struct mlx5e_ktls_offload_context_tx_shadow {
struct tls_offload_context_tx tx_ctx;
struct mlx5e_ktls_offload_context_tx *priv_tx;
};
static inline void
mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_tx *priv_tx)
{
struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
struct mlx5e_ktls_offload_context_tx_shadow *shadow;
BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
shadow->priv_tx = priv_tx;
priv_tx->tx_ctx = tx_ctx;
}
static inline struct mlx5e_ktls_offload_context_tx *
mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
{
struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
struct mlx5e_ktls_offload_context_tx_shadow *shadow;
BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
return shadow->priv_tx;
}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe, u16 *pi);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
struct mlx5e_sq_dma *dma);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
#endif
#endif /* __MLX5E_TLS_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
#include <linux/tls.h>
#include "en.h"
#include "en/txrx.h"
#include "en_accel/ktls.h"
enum {
MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
};
enum {
MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
};
#define EXTRACT_INFO_FIELDS do { \
salt = info->salt; \
rec_seq = info->rec_seq; \
salt_sz = sizeof(info->salt); \
rec_seq_sz = sizeof(info->rec_seq); \
} while (0)
static void
fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{
struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
char *initial_rn, *gcm_iv;
u16 salt_sz, rec_seq_sz;
char *salt, *rec_seq;
u8 tls_version;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
EXTRACT_INFO_FIELDS;
break;
}
default:
WARN_ON(1);
return;
}
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
memcpy(gcm_iv, salt, salt_sz);
memcpy(initial_rn, rec_seq, rec_seq_sz);
tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
MLX5_SET(tls_static_params, ctx, const_1, 1);
MLX5_SET(tls_static_params, ctx, const_2, 2);
MLX5_SET(tls_static_params, ctx, encryption_standard,
MLX5E_ENCRYPTION_STANDARD_TLS);
MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
}
static void
build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
#define STATIC_PARAMS_DS_CNT \
DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
(MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
cseg->imm = cpu_to_be32(priv_tx->tisn);
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
}
static void
fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{
MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn);
MLX5_SET(tls_progress_params, ctx, record_tracker_state,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
MLX5_SET(tls_progress_params, ctx, auth_state,
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
}
static void
build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
#define PROGRESS_PARAMS_DS_CNT \
DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
cseg->opmod_idx_opcode =
cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
(MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
PROGRESS_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fill_progress_params_ctx(wqe->data, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
u16 pi, u8 num_wqebbs,
skb_frag_t *resync_dump_frag)
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
wi->skb = NULL;
wi->num_wqebbs = num_wqebbs;
wi->resync_dump_frag = resync_dump_frag;
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
{
priv_tx->ctx_post_pending = true;
}
static bool
mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
{
bool ret = priv_tx->ctx_post_pending;
priv_tx->ctx_post_pending = false;
return ret;
}
static void
post_static_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
struct mlx5e_umr_wqe *umr_wqe;
u16 pi;
umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
}
static void
post_progress_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
struct mlx5e_tx_wqe *wqe;
u16 pi;
wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
}
static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool skip_static_post, bool fence_first_post)
{
bool progress_fence = skip_static_post || !fence_first_post;
if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post);
post_progress_params(sq, priv_tx, progress_fence);
}
struct tx_sync_info {
u64 rcd_sn;
s32 sync_len;
int nr_frags;
skb_frag_t *frags[MAX_SKB_FRAGS];
};
static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 tcp_seq, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
bool ret = true;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
if (unlikely(!record)) {
ret = false;
goto out;
}
if (unlikely(tcp_seq < tls_record_start_seq(record))) {
if (!tls_record_is_start_marker(record))
ret = false;
goto out;
}
info->sync_len = tcp_seq - tls_record_start_seq(record);
remaining = info->sync_len;
while (remaining > 0) {
skb_frag_t *frag = &record->frags[i];
__skb_frag_ref(frag);
remaining -= skb_frag_size(frag);
info->frags[i++] = frag;
}
/* reduce the part which will be sent with the original SKB */
if (remaining < 0)
skb_frag_size_add(info->frags[i - 1], remaining);
info->nr_frags = i;
out:
spin_unlock_irqrestore(&tx_ctx->lock, flags);
return ret;
}
static void
tx_post_resync_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn)
{
struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post;
u16 rec_seq_sz;
char *rec_seq;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
rec_seq = info->rec_seq;
rec_seq_sz = sizeof(info->rec_seq);
break;
}
default:
WARN_ON(1);
}
skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
if (!skip_static_post)
memcpy(rec_seq, &rn_be, rec_seq_sz);
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
}
static int
tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb_frag_t *frag, u32 tisn, bool first)
{
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe *wqe;
dma_addr_t dma_addr = 0;
u16 ds_cnt, ds_cnt_inl;
u8 num_wqebbs;
u16 pi, ihs;
int fsz;
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
ds_cnt += ds_cnt_inl;
ds_cnt += 1; /* one frag */
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
cseg = &wqe->ctrl;
eseg = &wqe->eth;
dseg = wqe->data;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->imm = cpu_to_be32(tisn);
cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
eseg->inline_hdr.sz = cpu_to_be16(ihs);
memcpy(eseg->inline_hdr.start, skb->data, ihs);
dseg += ds_cnt_inl;
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
return -ENOMEM;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
tx_fill_wi(sq, pi, num_wqebbs, frag);
sq->pc += num_wqebbs;
WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
"unexpected DUMP num_wqebbs, %d > %d",
num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
return 0;
}
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
struct mlx5e_sq_dma *dma)
{
struct mlx5e_sq_stats *stats = sq->stats;
mlx5e_tx_dma_unmap(sq->pdev, dma);
__skb_frag_unref(wi->resync_dump_frag);
stats->tls_dump_packets++;
stats->tls_dump_bytes += wi->num_bytes;
}
static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
tx_fill_wi(sq, pi, 1, NULL);
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
}
static struct sk_buff *
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
u32 seq)
{
struct mlx5e_sq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
struct tx_sync_info info = {};
u16 contig_wqebbs_room, pi;
u8 num_wqebbs;
int i;
if (!tx_sync_info_get(priv_tx, seq, &info)) {
/* We might get here if a retransmission reaches the driver
* after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
stats->tls_drop_no_sync_data++;
goto err_out;
}
if (unlikely(info.sync_len < 0)) {
u32 payload;
int headln;
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload = skb->len - headln;
if (likely(payload <= -info.sync_len))
return skb;
stats->tls_drop_bypass_req++;
goto err_out;
}
stats->tls_ooo++;
num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
(info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
for (i = 0; i < info.nr_frags; i++)
if (tx_post_resync_dump(sq, skb, info.frags[i],
priv_tx->tisn, !i))
goto err_out;
/* If no dump WQE was sent, we need to have a fence NOP WQE before the
* actual data xmit.
*/
if (!info.nr_frags)
tx_post_fence_nop(sq);
return skb;
err_out:
dev_kfree_skb_any(skb);
return NULL;
}
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe, u16 *pi)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
struct mlx5_wqe_ctrl_seg *cseg;
struct tls_context *tls_ctx;
int datalen;
u32 seq;
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
goto out;
tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
stats->tls_ctx++;
}
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
if (unlikely(!skb))
goto out;
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
}
priv_tx->expected_seq = seq + datalen;
cseg = &(*wqe)->ctrl;
cseg->imm = cpu_to_be32(priv_tx->tisn);
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
out:
return skb;
err_out:
dev_kfree_skb_any(skb);
return NULL;
}
......@@ -190,6 +190,11 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
u32 caps;
if (mlx5_accel_is_ktls_device(priv->mdev)) {
mlx5e_ktls_build_netdev(priv);
return;
}
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
......
......@@ -33,8 +33,10 @@
#ifndef __MLX5E_TLS_H__
#define __MLX5E_TLS_H__
#ifdef CONFIG_MLX5_EN_TLS
#include "accel/tls.h"
#include "en_accel/ktls.h"
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "en.h"
......@@ -94,7 +96,12 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
#else
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { }
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
if (mlx5_accel_is_ktls_device(priv->mdev))
mlx5e_ktls_build_netdev(priv);
}
static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
......
......@@ -248,7 +248,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
mlx5e_sq_fetch_wqe(sq, wqe, pi);
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
return skb;
err_out:
......@@ -269,6 +269,11 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
goto out;
}
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out;
......
......@@ -38,6 +38,7 @@
#include <linux/skbuff.h>
#include "en.h"
#include "en/txrx.h"
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
......
......@@ -41,6 +41,7 @@
#include <net/xdp_sock.h>
#include "eswitch.h"
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
......@@ -62,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
......@@ -1124,11 +1126,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev))
if (mlx5_accel_is_tls_device(c->priv->mdev)) {
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
sq->stop_room += MLX5E_SQ_TLS_ROOM;
}
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
......@@ -3145,20 +3150,19 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
mlx5e_free_cq(&drop_rq->cq);
}
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn)
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, tc << 1);
MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
if (MLX5_GET(tisc, tisc, tls_en))
MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
}
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
......@@ -3172,7 +3176,14 @@ int mlx5e_create_tises(struct mlx5e_priv *priv)
int tc;
for (tc = 0; tc < priv->profile->max_tc; tc++) {
err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, tc << 1);
err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]);
if (err)
goto err_close_tises;
}
......
......@@ -48,8 +48,15 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
......@@ -271,8 +278,15 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_csum_none += sq_stats->csum_none;
s->tx_csum_partial += sq_stats->csum_partial;
#ifdef CONFIG_MLX5_EN_TLS
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
#endif
s->tx_cqes += sq_stats->cqes;
}
......@@ -1293,6 +1307,16 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
#endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
......
......@@ -122,8 +122,15 @@ struct mlx5e_sw_stats {
u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
u64 tx_tls_ooo;
u64 tx_tls_resync_bytes;
u64 tx_tls_drop_no_sync_data;
u64 tx_tls_drop_bypass_req;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
#endif
u64 rx_xsk_packets;
......@@ -256,8 +263,15 @@ struct mlx5e_sq_stats {
u64 added_vlan_packets;
u64 nop;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets;
u64 tls_encrypted_bytes;
u64 tls_ctx;
u64 tls_ooo;
u64 tls_resync_bytes;
u64 tls_drop_no_sync_data;
u64 tls_drop_bypass_req;
u64 tls_dump_packets;
u64 tls_dump_bytes;
#endif
/* less likely accessed in data path */
u64 csum_none;
......
......@@ -35,55 +35,12 @@
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
#include "en/txrx.h"
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
#include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#ifndef CONFIG_MLX5_EN_TLS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
#else
/* TLS offload requires MLX5E_SQ_STOP_ROOM to have
* enough room for a resync SKB, a normal SKB and a NOP
*/
#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
#endif
static inline void mlx5e_tx_dma_unmap(struct device *pdev,
struct mlx5e_sq_dma *dma)
{
switch (dma->type) {
case MLX5E_DMA_MAP_SINGLE:
dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
case MLX5E_DMA_MAP_PAGE:
dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
default:
WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
}
}
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{
return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
}
static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
dma->addr = addr;
dma->size = size;
dma->type = map_type;
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
int i;
......@@ -277,23 +234,6 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
return -ENOMEM;
}
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops)
{
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
edge_wi = wi + nnops;
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
for (; wi < edge_wi; wi++) {
wi->skb = NULL;
wi->num_wqebbs = 1;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
sq->stats->nop += nnops;
}
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
......@@ -315,7 +255,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
sq->pc += wi->num_wqebbs;
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) {
netif_tx_stop_queue(sq->txq);
sq->stats->stopped++;
}
......@@ -326,8 +266,6 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
......@@ -354,9 +292,12 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ?
MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
opcode = MLX5_OPCODE_SEND;
mss = 0;
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
ihs = mlx5e_calc_min_inline(mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
stats->packets++;
}
......@@ -380,11 +321,17 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
#endif
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
#ifdef CONFIG_MLX5_EN_IPSEC
wqe->eth = cur_eth;
#endif
#ifdef CONFIG_MLX5_EN_TLS
wqe->ctrl = cur_ctrl;
#endif
}
......@@ -444,7 +391,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
/* might send skbs and update wqe and pi */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
......@@ -532,8 +479,16 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wi = &sq->db.wqe_info[ci];
skb = wi->skb;
if (unlikely(!skb)) { /* nop */
sqcc++;
if (unlikely(!skb)) {
#ifdef CONFIG_MLX5_EN_TLS
if (wi->resync_dump_frag) {
struct mlx5e_sq_dma *dma =
mlx5e_dma_get(sq, dma_fifo_cc++);
mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
}
#endif
sqcc += wi->num_wqebbs;
continue;
}
......@@ -575,8 +530,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
MLX5E_SQ_STOP_ROOM) &&
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
stats->wake++;
......
......@@ -37,8 +37,6 @@
#include "accel/ipsec.h"
#include "fs_cmd.h"
#ifdef CONFIG_MLX5_FPGA
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
......@@ -66,77 +64,4 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
#else
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return 0;
}
static inline unsigned int
mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
return 0;
}
static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
u64 *counters)
{
return 0;
}
static inline void *
mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return NULL;
}
static inline void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{
}
static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{
return 0;
}
static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
}
static inline void mlx5_fpga_ipsec_build_fs_cmds(void)
{
}
static inline struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
}
static inline int
mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return -EOPNOTSUPP;
}
static inline const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{
return mlx5_fs_cmd_get_default(type);
}
#endif /* CONFIG_MLX5_FPGA */
#endif /* __MLX5_FPGA_SADB_H__ */
......@@ -239,6 +239,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
if (MLX5_CAP_GEN(dev, tls)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err)
return err;
}
return 0;
}
......
......@@ -258,6 +258,18 @@ void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *
mlx5_core_destroy_qp(mdev, qp);
}
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
return mlx5e_create_tis(mdev, in, tisn);
}
static int mlx5i_init_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
......@@ -269,7 +281,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
......
......@@ -59,6 +59,8 @@ struct mlx5i_priv {
char *mlx5e_priv[0];
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
/* Underlay QP create/destroy functions */
int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
......
......@@ -210,7 +210,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
err = mlx5e_create_tis(mdev, 0 /* tc */, ipriv->qp.qpn, &epriv->tisn[0]);
err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
#include "mlx5_core.h"
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes,
u32 *p_key_id)
{
u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 sz_bits = sz_bytes * BITS_PER_BYTE;
u8 general_obj_key_size;
u64 general_obj_types;
void *obj, *key_p;
int err;
obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
key_p = MLX5_ADDR_OF(encryption_key_obj, obj, key);
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
return -EINVAL;
switch (sz_bits) {
case 128:
general_obj_key_size =
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
break;
case 256:
general_obj_key_size =
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
break;
default:
return -EINVAL;
}
memcpy(key_p, key, sz_bytes);
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.pdn);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*p_key_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
/* avoid leaking key on the stack */
memzero_explicit(in, sizeof(in));
return err;
}
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, key_id);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
......@@ -79,4 +79,9 @@ struct mlx5_pme_stats {
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
/* Crypto */
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
#endif
......@@ -1600,7 +1600,7 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
mlx5_fpga_ipsec_build_fs_cmds();
mlx5_accel_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
......
......@@ -114,7 +114,7 @@ enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
};
#ifdef CONFIG_MLX5_ACCEL
#ifdef CONFIG_MLX5_FPGA_IPSEC
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册