diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 353ac6daa3dc3eb3e777c3f24b3b046b6b5f6957..30cad07be2b51a6c07a5489a0cf2a967f7bd22a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -57,24 +57,12 @@ #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) -#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu)) -#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu)) +#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) +#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) #define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_NUM_TC 8 -#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 -#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa -#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd - -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 -#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa -#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd - -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 -#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 - #define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -95,11 +83,29 @@ #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) -#define MLX5E_REQUIRED_MTTS(wqes) \ - (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) -#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) +#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) +#define MLX5E_MAX_RQ_NUM_MTTS \ + ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ +#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ + (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \ + (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ + (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) + +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 +#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ + MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) @@ -124,8 +130,13 @@ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ -#define MLX5E_ICOSQ_MAX_WQEBBS \ - (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) +#define MLX5E_UMR_WQE_INLINE_SZ \ + (sizeof(struct mlx5e_umr_wqe) + \ + ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \ + MLX5_UMR_MTT_ALIGNMENT)) +#define MLX5E_UMR_WQEBBS \ + (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) +#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) #define MLX5E_XDP_TX_DS_COUNT \ @@ -155,26 +166,6 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) } } -static inline int mlx5_min_log_rq_size(int wq_type) -{ - switch (wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; - default: - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - } -} - -static inline int mlx5_max_log_rq_size(int wq_type) -{ - switch (wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW; - default: - return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; - } -} - static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) { return is_kdump_kernel() ? @@ -197,7 +188,7 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; - struct mlx5_wqe_data_seg data; + struct mlx5_mtt inline_mtts[0]; }; extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; @@ -233,7 +224,7 @@ enum mlx5e_priv_flag { struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; - u8 log_rq_size; + u8 log_rq_mtu_frames; u16 num_channels; u8 num_tc; bool rx_cqe_compress_def; @@ -251,6 +242,8 @@ struct mlx5e_params { u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; + unsigned int sw_mtu; + int hard_mtu; }; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -433,7 +426,6 @@ struct mlx5e_icosq { void __iomem *uar_map; u32 sqn; u16 edge; - __be32 mkey_be; unsigned long state; /* control path */ @@ -458,16 +450,13 @@ struct mlx5e_wqe_frag_info { }; struct mlx5e_umr_dma_info { - __be64 *mtt; - dma_addr_t mtt_addr; struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; - struct mlx5e_umr_wqe wqe; }; struct mlx5e_mpw_info { struct mlx5e_umr_dma_info umr; u16 consumed_strides; - u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; + DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); }; /* a single cache unit is capable to serve one napi call (for non-striding rq) @@ -484,9 +473,16 @@ struct mlx5e_page_cache { struct mlx5e_rq; typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); +typedef struct sk_buff * +(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); +enum mlx5e_rq_flag { + MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -497,12 +493,12 @@ struct mlx5e_rq { u32 frag_sz; /* max possible skb frag_sz */ union { bool page_reuse; - bool xdp_xmit; }; } wqe; struct { + struct mlx5e_umr_wqe umr_wqe; struct mlx5e_mpw_info *info; - void *mtt_no_align; + mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; u16 num_strides; u8 log_stride_sz; bool umr_in_progress; @@ -534,7 +530,9 @@ struct mlx5e_rq { /* XDP */ struct bpf_prog *xdp_prog; + unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; + DECLARE_BITMAP(flags, 8); /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -767,7 +765,6 @@ struct mlx5e_priv { struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; u32 tx_rates[MLX5E_MAX_NUM_SQS]; - int hard_mtu; struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@ -846,11 +843,12 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); - -u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, - struct mlx5e_params *params); -u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, - struct mlx5e_params *params); +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); void mlx5e_update_stats(struct mlx5e_priv *priv); @@ -981,11 +979,6 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); } -static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) -{ - return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; @@ -1111,7 +1104,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 max_channels); + u16 max_channels, u16 mtu); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c57c929d797369c20a0d0028cddc196b003bd791..a87d46bc22991700c2a08b0e676b0a570cc00dce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -220,60 +220,12 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, mlx5e_ethtool_get_ethtool_stats(priv, stats, data); } -static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, - int num_wqe) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_wqe; - - stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params); - num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params); - wqe_size = stride_size * num_strides; - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1)); -} - -static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, - int num_packets) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - int num_wqes; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_packets; - - stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params); - num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params); - wqe_size = stride_size * num_strides; - - num_packets = (1 << order_base_2(num_packets)); - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe); - return 1 << (order_base_2(num_wqes)); -} - void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; - - param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_max_log_rq_size(rq_wq_type)); + param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; - param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << priv->channels.params.log_rq_size); + param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->tx_pending = 1 << priv->channels.params.log_sq_size; } @@ -288,13 +240,9 @@ static void mlx5e_get_ringparam(struct net_device *dev, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; struct mlx5e_channels new_channels = {}; - u32 rx_pending_wqes; - u32 min_rq_size; u8 log_rq_size; u8 log_sq_size; - u32 num_mtts; int err = 0; if (param->rx_jumbo_pending) { @@ -308,23 +256,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, return -EINVAL; } - min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_min_log_rq_size(rq_wq_type)); - rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, - param->rx_pending); - - if (param->rx_pending < min_rq_size) { + if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - min_rq_size); - return -EINVAL; - } - - num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); - if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - !MLX5E_VALID_NUM_MTTS(num_mtts)) { - netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", - __func__, param->rx_pending); + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); return -EINVAL; } @@ -335,17 +270,17 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, return -EINVAL; } - log_rq_size = order_base_2(rx_pending_wqes); + log_rq_size = order_base_2(param->rx_pending); log_sq_size = order_base_2(param->tx_pending); - if (log_rq_size == priv->channels.params.log_rq_size && + if (log_rq_size == priv->channels.params.log_rq_mtu_frames && log_sq_size == priv->channels.params.log_sq_size) return 0; mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params; - new_channels.params.log_rq_size = log_rq_size; + new_channels.params.log_rq_mtu_frames = log_rq_size; new_channels.params.log_sq_size = log_sq_size; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1b48dec67abf5dd1e739e9caea54f86128fe9204..0339609cfa564999de1c6cee8816a2dfa006221f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -73,26 +73,89 @@ struct mlx5e_channel_param { bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { - return MLX5_CAP_GEN(mdev, striding_rq) && + bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && MLX5_CAP_ETH(mdev, reg_umr_sq); + u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); + bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; + + if (!striding_rq_umr) + return false; + if (!inline_umr) { + mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n", + (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap); + return false; + } + return true; +} + +static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) +{ + if (!params->xdp_prog) { + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; + + return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); + } + + return PAGE_SIZE; } -u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) +static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) { + u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + + return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); +} + +static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + s8 signed_log_num_strides_param; + u8 log_num_strides; + + if (params->lro_en || frag_sz > PAGE_SIZE) + return false; + + if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) + return true; + + log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz); + signed_log_num_strides_param = + (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; + + return signed_log_num_strides_param >= 0; +} + +static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) +{ + if (params->log_rq_mtu_frames < + mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + + return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params); +} + +static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params)); + return MLX5E_MPWQE_STRIDE_SZ(mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } -u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) +static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) { return MLX5_MPWRQ_LOG_WQE_SZ - mlx5e_mpwqe_get_log_stride_size(mdev, params); } -static u16 mlx5e_get_rq_headroom(struct mlx5e_params *params) +static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) { u16 linear_rq_headroom = params->xdp_prog ? XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; @@ -102,6 +165,9 @@ static u16 mlx5e_get_rq_headroom(struct mlx5e_params *params) if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) return linear_rq_headroom; + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return linear_rq_headroom; + return 0; } @@ -109,25 +175,23 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + params->log_rq_mtu_frames = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - /* Extra room needed for build_skb */ - params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) + + params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, - BIT(params->log_rq_size), + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + BIT(mlx5e_mpwqe_get_log_rq_size(params)) : + BIT(params->log_rq_mtu_frames), BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } @@ -136,7 +200,8 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { return mlx5e_check_fragmented_striding_rq_cap(mdev) && - !params->xdp_prog && !MLX5_IPSEC_DEV(mdev); + !MLX5_IPSEC_DEV(mdev) && + !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params)); } void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) @@ -239,107 +304,38 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC)); } -static inline int mlx5e_get_wqe_mtt_sz(void) -{ - /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. - * To avoid copying garbage after the mtt array, we allocate - * a little more. - */ - return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), - MLX5_UMR_MTT_ALIGNMENT); -} - static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, - struct mlx5e_umr_wqe *wqe, - u16 ix) + struct mlx5e_umr_wqe *wqe) { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; - struct mlx5_wqe_data_seg *dseg = &wqe->data; - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); - u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); + u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS); cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->imm = rq->mkey_be; - ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; + ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); - ucseg->bsf_octowords = - cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); - - dseg->lkey = sq->mkey_be; - dseg->addr = cpu_to_be64(wi->umr.mtt_addr); } static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; - int i; rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) - goto err_out; - - /* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, - cpu_to_node(c->cpu)); - if (unlikely(!rq->mpwqe.mtt_no_align)) - goto err_free_wqe_info; - - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc, - MLX5_UMR_ALIGN); - wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz, - PCI_DMA_TODEVICE); - if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr))) - goto err_unmap_mtts; + return -ENOMEM; - mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i); - } + mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe); return 0; - -err_unmap_mtts: - while (--i >= 0) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); -err_free_wqe_info: - kfree(rq->mpwqe.info); - -err_out: - return -ENOMEM; -} - -static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) -{ - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int i; - - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); - kfree(rq->mpwqe.info); } static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, @@ -351,9 +347,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, u32 *in; int err; - if (!MLX5E_VALID_NUM_MTTS(npages)) - return -EINVAL; - in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -386,6 +379,11 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } +static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) +{ + return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@ -419,6 +417,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@ -432,11 +431,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - rq->buff.headroom = mlx5e_get_rq_headroom(params); + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; @@ -454,6 +452,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } + rq->mpwqe.skb_from_cqe_mpwrq = + mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ? + mlx5e_skb_from_cqe_mpwrq_linear : + mlx5e_skb_from_cqe_mpwrq_nonlinear; rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); @@ -494,7 +496,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, byte_count = params->lro_en ? params->lro_wqe_sz : - MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); + MLX5E_SW2HW_MTU(params, params->sw_mtu); #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) byte_count += MLX5E_METADATA_ETHER_LEN; @@ -514,9 +516,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT; + u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); - wqe->data.addr = cpu_to_be64(dma_offset); + wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); } wqe->data.byte_count = cpu_to_be32(byte_count); @@ -562,7 +564,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - mlx5e_rq_free_mpwqe_info(rq); + kfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@ -901,7 +903,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; int err; - sq->mkey_be = c->mkey_be; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; @@ -1867,18 +1868,21 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: MLX5_SET(wq, wq, log_wqe_num_of_strides, - mlx5e_mpwqe_get_log_num_strides(mdev, params) - 9); + mlx5e_mpwqe_get_log_num_strides(mdev, params) - + MLX5_MPWQE_LOG_NUM_STRIDES_BASE); MLX5_SET(wq, wq, log_wqe_stride_size, - mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6); + mlx5e_mpwqe_get_log_stride_size(mdev, params) - + MLX5_MPWQE_LOG_STRIDE_SZ_BASE); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); } MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); - MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); @@ -1938,16 +1942,17 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_cq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *cqc = param->cqc; u8 log_cq_size; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = params->log_rq_size + - mlx5e_mpwqe_get_log_num_strides(priv->mdev, params); + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + + mlx5e_mpwqe_get_log_num_strides(mdev, params); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - log_cq_size = params->log_rq_size; + log_cq_size = params->log_rq_mtu_frames; } MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); @@ -2498,10 +2503,10 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); } -static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) +static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 mtu) { - struct mlx5_core_dev *mdev = priv->mdev; - u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu); + u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu); int err; err = mlx5_set_port_mtu(mdev, hw_mtu, 1); @@ -2513,9 +2518,9 @@ static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) return 0; } -static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) +static void mlx5e_query_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 *mtu) { - struct mlx5_core_dev *mdev = priv->mdev; u16 hw_mtu = 0; int err; @@ -2523,25 +2528,27 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) if (err || !hw_mtu) /* fallback to port oper mtu */ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); - *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu); + *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); } static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) { + struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; u16 mtu; int err; - err = mlx5e_set_mtu(priv, netdev->mtu); + err = mlx5e_set_mtu(mdev, params, params->sw_mtu); if (err) return err; - mlx5e_query_mtu(priv, &mtu); - if (mtu != netdev->mtu) + mlx5e_query_mtu(mdev, params, &mtu); + if (mtu != params->sw_mtu) netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", - __func__, mtu, netdev->mtu); + __func__, mtu, params->sw_mtu); - netdev->mtu = mtu; + params->sw_mtu = mtu; return 0; } @@ -3222,20 +3229,28 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; + struct mlx5e_params *old_params; int err = 0; bool reset; mutex_lock(&priv->state_lock); - reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST); - reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); + old_params = &priv->channels.params; + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - new_channels.params = priv->channels.params; + new_channels.params = *old_params; new_channels.params.lro_en = enable; + if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) + reset = false; + } + if (!reset) { - priv->channels.params = new_channels.params; + *old_params = new_channels.params; err = mlx5e_modify_tirs_lro(priv); goto out; } @@ -3411,34 +3426,40 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; - int curr_mtu; + struct mlx5e_params *params; int err = 0; bool reset; mutex_lock(&priv->state_lock); - reset = !priv->channels.params.lro_en && - (priv->channels.params.rq_wq_type != - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + params = &priv->channels.params; + reset = !params->lro_en; reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); - curr_mtu = netdev->mtu; - netdev->mtu = new_mtu; + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; + + if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); + + reset = reset && (ppw_old != ppw_new); + } if (!reset) { + params->sw_mtu = new_mtu; mlx5e_set_dev_port_mtu(priv); + netdev->mtu = params->sw_mtu; goto out; } - new_channels.params = priv->channels.params; err = mlx5e_open_channels(priv, &new_channels); - if (err) { - netdev->mtu = curr_mtu; + if (err) goto out; - } mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + netdev->mtu = new_channels.params.sw_mtu; out: mutex_unlock(&priv->state_lock); @@ -3728,21 +3749,11 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - int irqn_not_used, eqn; - struct mlx5_eq *eq; + struct mlx5_eq *eq = sq->cq.mcq.eq; u32 eqe_count; - if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used)) - return false; - - eq = mlx5_eqn2eq(mdev, eqn); - if (IS_ERR(eq)) - return false; - netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eqn, eq->cons_index, eq->irqn); + eq->eqn, eq->cons_index, eq->irqn); eqe_count = mlx5_eq_poll_irq_disabled(eq); if (!eqe_count) @@ -4121,10 +4132,12 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 max_channels) + u16 max_channels, u16 mtu) { u8 cq_period_mode = 0; + params->sw_mtu = mtu; + params->hard_mtu = MLX5E_ETH_HARD_MTU; params->num_channels = max_channels; params->num_tc = 1; @@ -4152,7 +4165,8 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - params->lro_en = !slow_pci_heuristic(mdev); + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + params->lro_en = !slow_pci_heuristic(mdev); params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ @@ -4185,9 +4199,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->profile = profile; priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; - priv->hard_mtu = MLX5E_ETH_HARD_MTU; - mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + mlx5e_build_nic_params(mdev, &priv->channels.params, + profile->max_nch(mdev), netdev->mtu); mutex_init(&priv->state_lock); @@ -4464,7 +4478,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); mlx5e_set_dev_port_mtu(priv); mlx5_lag_add(mdev, netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index dd32f3e390ffea334050b985f87ed1e47ad1abf4..8e70fa9ef39a331238dede59bccb4d02ff3af12f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -877,9 +877,10 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + params->hard_mtu = MLX5E_ETH_HARD_MTU; params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; - params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + params->log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); @@ -926,8 +927,6 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, priv->channels.params.num_channels = profile->max_nch(mdev); - priv->hard_mtu = MLX5E_ETH_HARD_MTU; - mlx5e_build_rep_params(mdev, &priv->channels.params); mlx5e_build_rep_netdev(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 781b8f21d6d1ac7f43446a22312b907f74cd0626..176645762e491687dae7ae0fb28da282fa29c49f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -296,37 +296,28 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) mlx5e_free_rx_wqe(rq, wi); } -static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) -{ - return rq->mpwqe.num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; -} - static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5e_mpw_info *wi, - u32 page_idx, u32 frag_offset, - u32 len) + struct mlx5e_dma_info *di, + u32 frag_offset, u32 len) { unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz)); dma_sync_single_for_cpu(rq->pdev, - wi->umr.dma_info[page_idx].addr + frag_offset, + di->addr + frag_offset, len, DMA_FROM_DEVICE); - wi->skbs_frags[page_idx]++; + page_ref_inc(di->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - wi->umr.dma_info[page_idx].page, frag_offset, - len, truesize); + di->page, frag_offset, len, truesize); } static inline void mlx5e_copy_skb_header_mpwqe(struct device *pdev, struct sk_buff *skb, - struct mlx5e_mpw_info *wi, - u32 page_idx, u32 offset, - u32 headlen) + struct mlx5e_dma_info *dma_info, + u32 offset, u32 headlen) { u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx]; unsigned int len; /* Aligning len to sizeof(long) optimizes memcpy performance */ @@ -347,14 +338,49 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, } } -static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) +void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) +{ + const bool no_xdp_xmit = + bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); + struct mlx5e_dma_info *dma_info = wi->umr.dma_info; + int i; + + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) + if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) + mlx5e_page_release(rq, &dma_info[i], true); +} + +static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + rq->mpwqe.umr_in_progress = false; + + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); +} + +static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) +{ + return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; +} + +static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; + struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_umr_wqe *wqe; - u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); + struct mlx5e_umr_wqe *umr_wqe; + u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); + int err; u16 pi; + int i; /* fill sq edge with nops to avoid wqe wrap around */ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { @@ -362,90 +388,44 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) mlx5e_post_nop(wq, sq->sqn, &sq->pc); } - wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); - wqe->ctrl.opmod_idx_opcode = - cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | - MLX5_OPCODE_UMR); - - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; - sq->pc += num_wqebbs; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); -} - -static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, - u16 ix) -{ - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - int pg_strides = mlx5e_mpwqe_strides_per_page(rq); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; - int err; - int i; + umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); + if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2)) + memcpy(umr_wqe, &rq->mpwqe.umr_wqe, + offsetof(struct mlx5e_umr_wqe, inline_mtts)); for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { err = mlx5e_page_alloc_mapped(rq, dma_info); if (unlikely(err)) goto err_unmap; - wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR); - page_ref_add(dma_info->page, pg_strides); + umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); } - memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); + bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); wi->consumed_strides = 0; + rq->mpwqe.umr_in_progress = true; + + umr_wqe->ctrl.opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_UMR); + umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); + + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; + sq->pc += MLX5E_UMR_WQEBBS; + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); + return 0; err_unmap: while (--i >= 0) { dma_info--; - page_ref_sub(dma_info->page, pg_strides); mlx5e_page_release(rq, dma_info, true); } + rq->stats.buff_alloc_err++; return err; } -void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) -{ - int pg_strides = mlx5e_mpwqe_strides_per_page(rq); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; - int i; - - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { - page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); - mlx5e_page_release(rq, dma_info, true); - } -} - -static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) -{ - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - - rq->mpwqe.umr_in_progress = false; - - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); - - /* ensure wqes are visible to device before updating doorbell record */ - dma_wmb(); - - mlx5_wq_ll_update_db_record(wq); -} - -static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) -{ - int err; - - err = mlx5e_alloc_rx_umr_mpwqe(rq, ix); - if (unlikely(err)) { - rq->stats.buff_alloc_err++; - return err; - } - rq->mpwqe.umr_in_progress = true; - mlx5e_post_umr_wqe(rq, ix); - return 0; -} - void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -544,7 +524,7 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (!rq->mpwqe.umr_in_progress) mlx5e_alloc_rx_mpwqe(rq, wq->head); - return true; + return false; } static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) @@ -766,8 +746,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, prefetchw(wqe); - if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || - MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) { + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { rq->stats.xdp_drop++; return false; } @@ -806,7 +785,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, /* move page to reference to sq responsibility, * and mark so it's not put back in page-cache. */ - rq->wqe.xdp_xmit = true; + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ sq->db.di[pi] = *di; sq->pc++; @@ -854,6 +833,24 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, } } +static inline +struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; + return NULL; + } + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + static inline struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) @@ -885,18 +882,13 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, if (consumed) return NULL; /* page/packet was consumed by XDP */ - skb = build_skb(va, frag_size); - if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); + if (unlikely(!skb)) return NULL; - } /* queue up for recycling/reuse */ page_ref_inc(di->page); - skb_reserve(skb, rx_headroom); - skb_put(skb, cqe_bcnt); - return skb; } @@ -918,9 +910,8 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ - if (rq->wqe.xdp_xmit) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; - rq->wqe.xdp_xmit = false; /* do not return page to cache, it will be returned on XDP_TX completion */ goto wq_ll_pop; } @@ -960,9 +951,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { - if (rq->wqe.xdp_xmit) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; - rq->wqe.xdp_xmit = false; /* do not return page to cache, it will be returned on XDP_TX completion */ goto wq_ll_pop; } @@ -985,23 +975,28 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } #endif -static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe, - struct mlx5e_mpw_info *wi, - u32 cqe_bcnt, - struct sk_buff *skb) +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx) { - u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); - u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; - u32 head_offset = wqe_offset & (PAGE_SIZE - 1); - u32 page_idx = wqe_offset >> PAGE_SHIFT; - u32 head_page_idx = page_idx; u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u32 frag_offset = head_offset + headlen; - u16 byte_cnt = cqe_bcnt - headlen; + u32 byte_cnt = cqe_bcnt - headlen; + struct mlx5e_dma_info *head_di = di; + struct sk_buff *skb; + + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); if (unlikely(frag_offset >= PAGE_SIZE)) { - page_idx++; + di++; frag_offset -= PAGE_SIZE; } @@ -1009,18 +1004,59 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); - mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset, + mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset, pg_consumed_bytes); byte_cnt -= pg_consumed_bytes; frag_offset = 0; - page_idx++; + di++; } /* copy header */ - mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx, + mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; + + return skb; +} + +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx) +{ + struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; + u16 rx_headroom = rq->buff.headroom; + u32 cqe_bcnt32 = cqe_bcnt; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + bool consumed; + + va = page_address(di->page) + head_offset; + data = va + rx_headroom; + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); + + dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, + frag_size, DMA_FROM_DEVICE); + prefetch(data); + + rcu_read_lock(); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32); + rcu_read_unlock(); + if (consumed) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) + __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ + return NULL; /* page/packet was consumed by XDP */ + } + + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + page_ref_inc(di->page); + + return skb; } void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) @@ -1028,7 +1064,11 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); + u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); + u32 page_idx = wqe_offset >> PAGE_SHIFT; + struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; u16 cqe_bcnt; @@ -1044,18 +1084,13 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto mpwrq_cqe_out; } - skb = napi_alloc_skb(rq->cq.napi, - ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, - sizeof(long))); - if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; - goto mpwrq_cqe_out; - } - - prefetchw(skb->data); cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); - mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); + skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset, + page_idx); + if (!skb) + goto mpwrq_cqe_out; + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); @@ -1063,6 +1098,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); mlx5e_free_rx_mpwqe(rq, wi); mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index d7bb10ab2173aba1b2adc1180e8d37a272f402a8..70066975f1b57c986252b79b450a00cf44cb2ae2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -245,7 +245,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) force_state = MLX5_GET(teardown_hca_out, out, force_state); if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { - mlx5_core_err(dev, "teardown with force mode failed\n"); + mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); return -EIO; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index a35608faf8d25d8296ec36b24317efeb35d65c25..af3bb2f7a5048240d615344770f3c07ca250649b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -61,11 +61,12 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, mlx5e_init_rq_type_params(mdev, params); /* RQ size in ipoib by default is 512 */ - params->log_rq_size = is_kdump_kernel() ? + params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; params->lro_en = false; + params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; } /* Called directly after IPoIB netdevice was created to initialize SW structs */ @@ -81,10 +82,10 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->netdev = netdev; priv->profile = profile; priv->ppriv = ppriv; - priv->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; mutex_init(&priv->state_lock); - mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + mlx5e_build_nic_params(mdev, &priv->channels.params, + profile->max_nch(mdev), netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5e_timestamp_init(priv); @@ -368,25 +369,27 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); struct mlx5e_channels new_channels = {}; - int curr_mtu; + struct mlx5e_params *params; int err = 0; mutex_lock(&priv->state_lock); - curr_mtu = netdev->mtu; - netdev->mtu = new_mtu; + params = &priv->channels.params; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + params->sw_mtu = new_mtu; + netdev->mtu = params->sw_mtu; goto out; + } - new_channels.params = priv->channels.params; + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; err = mlx5e_open_channels(priv, &new_channels); - if (err) { - netdev->mtu = curr_mtu; + if (err) goto out; - } mlx5e_switch_priv_channels(priv, &new_channels, NULL); + netdev->mtu = new_channels.params.sw_mtu; out: mutex_unlock(&priv->state_lock); @@ -540,7 +543,7 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); if (err) - mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n", + mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index b69e9d847a6b3af331cf776eb0dd4e56e2cfd94c..54a188f41f90ca329f1f50e591a6e8fca85731fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -290,7 +290,7 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops; /* Use dummy rqs */ - priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; } /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index c64957b5ef47e9cd81111744379a1f9537af7361..dae1c5c5d27c5abfeac951465f8fab240bd69ff0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -354,27 +354,6 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) -{ - u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0}; - void *srqc; - void *xrc_srqc; - int err; - - MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); - MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); - if (!err) { - xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, - xrc_srq_context_entry); - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); - memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); - } - - return err; -} - int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) { u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 4b5939c78cddd25479e4eb853ab679ae2e449bff..12758595459bd379571768d9e7db17f5ffc5b95f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -782,6 +782,9 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } +#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) +#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) + struct mpwrq_cqe_bc { __be16 filler_consumed_strides; __be16 byte_cnt; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c19e611d27826165695aa5017d2bfa1e187651ba..d25011f8481504505950b80e03e4ec35906f09ec 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1038,7 +1038,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_at_3a0[0x3]; + u8 ext_stride_num_range[0x1]; + u8 reserved_at_3a1[0x2]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; @@ -1205,9 +1206,9 @@ struct mlx5_ifc_wq_bits { u8 log_hairpin_num_packets[0x5]; u8 reserved_at_128[0x3]; u8 log_hairpin_data_sz[0x5]; - u8 reserved_at_130[0x5]; - u8 log_wqe_num_of_strides[0x3]; + u8 reserved_at_130[0x4]; + u8 log_wqe_num_of_strides[0x4]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3]; diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 80d7aa8b2831ddb8a8f8b5376ec1777c217b91b8..83a33a1873a6823be1a033dec3f0743a08732f88 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -67,7 +67,6 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn); int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,