en.h 34.8 KB
Newer Older
1
/*
2
 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
32 33
#ifndef __MLX5_EN_H__
#define __MLX5_EN_H__
34 35 36

#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
37 38
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
39
#include <linux/ptp_clock_kernel.h>
40
#include <linux/crash_dump.h>
41 42 43
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
44
#include <linux/mlx5/port.h>
45
#include <linux/mlx5/vport.h>
46
#include <linux/mlx5/transobj.h>
47
#include <linux/mlx5/fs.h>
48
#include <linux/rhashtable.h>
49
#include <net/switchdev.h>
50
#include <net/xdp.h>
51
#include <linux/net_dim.h>
52
#include <linux/bits.h>
53 54
#include "wq.h"
#include "mlx5_core.h"
55
#include "en_stats.h"
56
#include "en/fs.h"
57

58
extern const struct net_device_ops mlx5e_netdev_ops;
59 60
struct page_pool;

61 62 63
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8

64 65
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)

66 67
#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)

68 69
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
70

71
#define MLX5E_MAX_PRIORITY      8
72
#define MLX5E_MAX_DSCP          64
73 74
#define MLX5E_MAX_NUM_TC	8

75
#define MLX5_RX_HEADROOM NET_SKB_PAD
76 77
#define MLX5_SKB_FRAG_SZ(len)	(SKB_DATA_ALIGN(len) +	\
				 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
78

79 80
#define MLX5E_RX_MAX_HEAD (256)

81 82 83 84
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
	(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
	max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
85 86
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
	MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
87

88
#define MLX5_MPWRQ_LOG_WQE_SZ			18
89 90 91
#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
				    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE		BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
92 93

#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
94
#define MLX5E_REQUIRED_WQE_MTTS		(ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
95
#define MLX5E_LOG_ALIGNED_MPWQE_PPW	(ilog2(MLX5E_REQUIRED_WQE_MTTS))
96 97 98 99 100 101 102 103 104 105
#define MLX5E_REQUIRED_MTTS(wqes)	(wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS	\
	((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW	\
		(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
	(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
	 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))

106 107 108 109
#define MLX5E_MIN_SKB_FRAG_SZ		(MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK	\
	(ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))

110 111 112 113
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd

114
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
115 116 117 118 119
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd,	\
					       MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)

#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
120

A
Achiad Shochat 已提交
121
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
122 123 124
#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4

125
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
T
Tariq Toukan 已提交
126
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
127 128
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
129
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
130 131
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
132
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
133

134 135
#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
136
#define MLX5E_MIN_NUM_CHANNELS         0x1
137
#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
138
#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
139
#define MLX5E_TX_CQ_POLL_BUDGET        128
140
#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
141

142 143 144 145 146 147
#define MLX5E_UMR_WQE_INLINE_SZ \
	(sizeof(struct mlx5e_umr_wqe) + \
	 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
	       MLX5_UMR_MTT_ALIGNMENT))
#define MLX5E_UMR_WQEBBS \
	(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
148

149 150 151 152 153 154 155 156 157 158
#define MLX5E_MSG_LEVEL			NETIF_MSG_LINK

#define mlx5e_dbg(mlevel, priv, format, ...)                    \
do {                                                            \
	if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
		netdev_warn(priv->netdev, format,               \
			    ##__VA_ARGS__);                     \
} while (0)


159 160 161 162 163 164 165 166 167 168 169 170
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
	switch (wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
			     wq_size / 2);
	default:
		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
			     wq_size / 2);
	}
}

171
/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
172 173 174 175
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
	return is_kdump_kernel() ?
		MLX5E_MIN_NUM_CHANNELS :
176
		min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
177 178
}

179 180 181 182 183 184 185
/* Use this function to get max num channels after netdev was created */
static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
{
	return min_t(unsigned int, netdev->num_rx_queues,
		     netdev->num_tx_queues);
}

186 187 188
struct mlx5e_tx_wqe {
	struct mlx5_wqe_ctrl_seg ctrl;
	struct mlx5_wqe_eth_seg  eth;
189
	struct mlx5_wqe_data_seg data[0];
190 191
};

192
struct mlx5e_rx_wqe_ll {
193
	struct mlx5_wqe_srq_next_seg  next;
194 195 196 197 198
	struct mlx5_wqe_data_seg      data[0];
};

struct mlx5e_rx_wqe_cyc {
	struct mlx5_wqe_data_seg      data[0];
199
};
200

201 202 203 204
struct mlx5e_umr_wqe {
	struct mlx5_wqe_ctrl_seg       ctrl;
	struct mlx5_wqe_umr_ctrl_seg   uctrl;
	struct mlx5_mkey_seg           mkc;
205
	struct mlx5_mtt                inline_mtts[0];
206 207
};

208 209
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];

210
enum mlx5e_priv_flag {
211 212 213 214 215
	MLX5E_PFLAG_RX_CQE_BASED_MODER,
	MLX5E_PFLAG_TX_CQE_BASED_MODER,
	MLX5E_PFLAG_RX_CQE_COMPRESS,
	MLX5E_PFLAG_RX_STRIDING_RQ,
	MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
216
	MLX5E_PFLAG_XDP_TX_MPWQE,
217
	MLX5E_NUM_PFLAGS, /* Keep last */
218 219
};

220
#define MLX5E_SET_PFLAG(params, pflag, enable)			\
221 222
	do {							\
		if (enable)					\
223
			(params)->pflags |= BIT(pflag);		\
224
		else						\
225
			(params)->pflags &= ~(BIT(pflag));	\
226 227
	} while (0)

228
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
229

230 231 232 233
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif

234 235
struct mlx5e_params {
	u8  log_sq_size;
236
	u8  rq_wq_type;
237
	u8  log_rq_mtu_frames;
238 239
	u16 num_channels;
	u8  num_tc;
240
	bool rx_cqe_compress_def;
241 242
	struct net_dim_cq_moder rx_cq_moderation;
	struct net_dim_cq_moder tx_cq_moderation;
243
	bool lro_en;
244
	u8  tx_min_inline_mode;
245
	bool vlan_strip_disable;
246
	bool scatter_fcs_en;
247
	bool rx_dim_enabled;
248
	bool tx_dim_enabled;
249
	u32 lro_timeout;
250
	u32 pflags;
251
	struct bpf_prog *xdp_prog;
252 253
	unsigned int sw_mtu;
	int hard_mtu;
254 255
};

H
Huy Nguyen 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_cee_config {
	/* bw pct for priority group */
	u8                         pg_bw_pct[CEE_DCBX_MAX_PGS];
	u8                         prio_to_pg_map[CEE_DCBX_MAX_PRIO];
	bool                       pfc_setting[CEE_DCBX_MAX_PRIO];
	bool                       pfc_enable;
};

enum {
	MLX5_DCB_CHG_RESET,
	MLX5_DCB_NO_CHG,
	MLX5_DCB_CHG_NO_RESET,
};

struct mlx5e_dcbx {
272
	enum mlx5_dcbx_oper_mode   mode;
H
Huy Nguyen 已提交
273
	struct mlx5e_cee_config    cee_cfg; /* pending configuration */
274
	u8                         dscp_app_cnt;
275 276 277

	/* The only setting that cannot be read from FW */
	u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
278
	u8                         cap;
279 280

	/* Buffer configuration */
281
	bool                       manual_buffer;
282 283
	u32                        cable_len;
	u32                        xoff;
H
Huy Nguyen 已提交
284
};
285 286 287 288 289

struct mlx5e_dcbx_dp {
	u8                         dscp2prio[MLX5E_MAX_DSCP];
	u8                         trust_state;
};
H
Huy Nguyen 已提交
290 291
#endif

292
enum {
293
	MLX5E_RQ_STATE_ENABLED,
294
	MLX5E_RQ_STATE_AM,
295
	MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
296 297 298 299 300 301 302
};

struct mlx5e_cq {
	/* data path - accessed per cqe */
	struct mlx5_cqwq           wq;

	/* data path - accessed per napi poll */
303
	u16                        event_ctr;
304 305 306 307
	struct napi_struct        *napi;
	struct mlx5_core_cq        mcq;
	struct mlx5e_channel      *channel;

308 309 310 311 312 313
	/* control */
	struct mlx5_core_dev      *mdev;
	struct mlx5_wq_ctrl        wq_ctrl;
} ____cacheline_aligned_in_smp;

struct mlx5e_cq_decomp {
T
Tariq Toukan 已提交
314 315 316 317
	/* cqe decompression */
	struct mlx5_cqe64          title;
	struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
	u8                         mini_arr_idx;
318 319
	u16                        left;
	u16                        wqe_counter;
320 321
} ____cacheline_aligned_in_smp;

322
struct mlx5e_tx_wqe_info {
S
Saeed Mahameed 已提交
323
	struct sk_buff *skb;
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	u32 num_bytes;
	u8  num_wqebbs;
	u8  num_dma;
};

enum mlx5e_dma_map_type {
	MLX5E_DMA_MAP_SINGLE,
	MLX5E_DMA_MAP_PAGE
};

struct mlx5e_sq_dma {
	dma_addr_t              addr;
	u32                     size;
	enum mlx5e_dma_map_type type;
};

enum {
	MLX5E_SQ_STATE_ENABLED,
342
	MLX5E_SQ_STATE_RECOVERING,
343
	MLX5E_SQ_STATE_IPSEC,
344
	MLX5E_SQ_STATE_AM,
345
	MLX5E_SQ_STATE_TLS,
346 347 348 349 350
};

struct mlx5e_sq_wqe_info {
	u8  opcode;
};
351

S
Saeed Mahameed 已提交
352
struct mlx5e_txqsq {
353 354 355 356 357
	/* data path */

	/* dirtied @completion */
	u16                        cc;
	u32                        dma_fifo_cc;
358
	struct net_dim             dim; /* Adaptive Moderation */
359 360 361 362 363 364 365 366 367 368

	/* dirtied @xmit */
	u16                        pc ____cacheline_aligned_in_smp;
	u32                        dma_fifo_pc;

	struct mlx5e_cq            cq;

	/* read only */
	struct mlx5_wq_cyc         wq;
	u32                        dma_fifo_mask;
369
	struct mlx5e_sq_stats     *stats;
370 371 372 373
	struct {
		struct mlx5e_sq_dma       *dma_fifo;
		struct mlx5e_tx_wqe_info  *wqe_info;
	} db;
374 375 376 377 378 379 380
	void __iomem              *uar_map;
	struct netdev_queue       *txq;
	u32                        sqn;
	u8                         min_inline_mode;
	struct device             *pdev;
	__be32                     mkey_be;
	unsigned long              state;
381 382
	struct hwtstamp_config    *tstamp;
	struct mlx5_clock         *clock;
383 384 385 386

	/* control path */
	struct mlx5_wq_ctrl        wq_ctrl;
	struct mlx5e_channel      *channel;
387
	int                        txq_ix;
388
	u32                        rate_limit;
389
	struct work_struct         recover_work;
S
Saeed Mahameed 已提交
390 391
} ____cacheline_aligned_in_smp;

392 393 394 395 396 397 398 399 400 401 402
struct mlx5e_dma_info {
	struct page     *page;
	dma_addr_t      addr;
};

struct mlx5e_xdp_info {
	struct xdp_frame      *xdpf;
	dma_addr_t            dma_addr;
	struct mlx5e_dma_info di;
};

403 404 405 406 407 408 409
struct mlx5e_xdp_info_fifo {
	struct mlx5e_xdp_info *xi;
	u32 *cc;
	u32 *pc;
	u32 mask;
};

410 411 412 413 414
struct mlx5e_xdp_wqe_info {
	u8 num_wqebbs;
	u8 num_ds;
};

415 416 417 418 419 420 421 422 423 424
struct mlx5e_xdp_mpwqe {
	/* Current MPWQE session */
	struct mlx5e_tx_wqe *wqe;
	u8                   ds_count;
	u8                   max_ds_count;
};

struct mlx5e_xdpsq;
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq*,
					struct mlx5e_xdp_info*);
S
Saeed Mahameed 已提交
425 426 427
struct mlx5e_xdpsq {
	/* data path */

428
	/* dirtied @completion */
429
	u32                        xdpi_fifo_cc;
S
Saeed Mahameed 已提交
430
	u16                        cc;
431
	bool                       redirect_flush;
S
Saeed Mahameed 已提交
432

433
	/* dirtied @xmit */
434 435
	u32                        xdpi_fifo_pc ____cacheline_aligned_in_smp;
	u16                        pc;
436
	struct mlx5_wqe_ctrl_seg   *doorbell_cseg;
437
	struct mlx5e_xdp_mpwqe     mpwqe;
S
Saeed Mahameed 已提交
438

439
	struct mlx5e_cq            cq;
S
Saeed Mahameed 已提交
440 441 442

	/* read only */
	struct mlx5_wq_cyc         wq;
T
Tariq Toukan 已提交
443
	struct mlx5e_xdpsq_stats  *stats;
444
	mlx5e_fp_xmit_xdp_frame    xmit_xdp_frame;
445
	struct {
446
		struct mlx5e_xdp_wqe_info *wqe_info;
447
		struct mlx5e_xdp_info_fifo xdpi_fifo;
448
	} db;
S
Saeed Mahameed 已提交
449 450 451 452 453 454
	void __iomem              *uar_map;
	u32                        sqn;
	struct device             *pdev;
	__be32                     mkey_be;
	u8                         min_inline_mode;
	unsigned long              state;
455
	unsigned int               hw_mtu;
S
Saeed Mahameed 已提交
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483

	/* control path */
	struct mlx5_wq_ctrl        wq_ctrl;
	struct mlx5e_channel      *channel;
} ____cacheline_aligned_in_smp;

struct mlx5e_icosq {
	/* data path */

	/* dirtied @xmit */
	u16                        pc ____cacheline_aligned_in_smp;

	struct mlx5e_cq            cq;

	/* write@xmit, read@completion */
	struct {
		struct mlx5e_sq_wqe_info *ico_wqe;
	} db;

	/* read only */
	struct mlx5_wq_cyc         wq;
	void __iomem              *uar_map;
	u32                        sqn;
	unsigned long              state;

	/* control path */
	struct mlx5_wq_ctrl        wq_ctrl;
	struct mlx5e_channel      *channel;
484 485
} ____cacheline_aligned_in_smp;

486 487
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
488
{
489
	return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
490
}
491

492
struct mlx5e_wqe_frag_info {
493
	struct mlx5e_dma_info *di;
494
	u32 offset;
495
	bool last_in_page;
496 497
};

498 499 500 501 502 503 504
struct mlx5e_umr_dma_info {
	struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
};

struct mlx5e_mpw_info {
	struct mlx5e_umr_dma_info umr;
	u16 consumed_strides;
505
	DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
506 507
};

508 509
#define MLX5E_MAX_RX_FRAGS 4

510 511 512 513 514
/* a single cache unit is capable to serve one napi call (for non-striding rq)
 * or a MPWQE (for striding rq).
 */
#define MLX5E_CACHE_UNIT	(MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
				 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
515
#define MLX5E_CACHE_SIZE	(4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
516 517 518 519 520 521
struct mlx5e_page_cache {
	u32 head;
	u32 tail;
	struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
};

522 523
struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
524 525 526
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
			       u16 cqe_bcnt, u32 head_offset, u32 page_idx);
527 528 529
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
			 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
530
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
531 532
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);

533 534 535 536
enum mlx5e_rq_flag {
	MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
};

537 538 539 540 541 542 543 544 545 546 547 548
struct mlx5e_rq_frag_info {
	int frag_size;
	int frag_stride;
};

struct mlx5e_rq_frags_info {
	struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
	u8 num_frags;
	u8 log_num_frags;
	u8 wqe_bulk;
};

549 550
struct mlx5e_rq {
	/* data path */
551
	union {
552
		struct {
553 554 555 556 557
			struct mlx5_wq_cyc          wq;
			struct mlx5e_wqe_frag_info *frags;
			struct mlx5e_dma_info      *di;
			struct mlx5e_rq_frags_info  info;
			mlx5e_fp_skb_from_cqe       skb_from_cqe;
558
		} wqe;
559
		struct {
560
			struct mlx5_wq_ll      wq;
561
			struct mlx5e_umr_wqe   umr_wqe;
562
			struct mlx5e_mpw_info *info;
563
			mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
564
			u16                    num_strides;
565
			u8                     log_stride_sz;
566
			bool                   umr_in_progress;
567 568
		} mpwqe;
	};
569
	struct {
570
		u16            headroom;
571
		u8             map_dir;   /* dma map direction */
572
	} buff;
573

574
	struct mlx5e_channel  *channel;
575 576
	struct device         *pdev;
	struct net_device     *netdev;
577
	struct mlx5e_rq_stats *stats;
578
	struct mlx5e_cq        cq;
579
	struct mlx5e_cq_decomp cqd;
580
	struct mlx5e_page_cache page_cache;
581 582
	struct hwtstamp_config *tstamp;
	struct mlx5_clock      *clock;
583

584
	mlx5e_fp_handle_rx_cqe handle_rx_cqe;
585
	mlx5e_fp_post_rx_wqes  post_wqes;
586
	mlx5e_fp_dealloc_wqe   dealloc_wqe;
587 588 589

	unsigned long          state;
	int                    ix;
590
	unsigned int           hw_mtu;
591

592
	struct net_dim         dim; /* Dynamic Interrupt Moderation */
593 594

	/* XDP */
595
	struct bpf_prog       *xdp_prog;
S
Saeed Mahameed 已提交
596
	struct mlx5e_xdpsq     xdpsq;
597
	DECLARE_BITMAP(flags, 8);
598
	struct page_pool      *page_pool;
599

600 601
	/* control */
	struct mlx5_wq_ctrl    wq_ctrl;
602
	__be32                 mkey_be;
603
	u8                     wq_type;
604
	u32                    rqn;
605
	struct mlx5_core_dev  *mdev;
T
Tariq Toukan 已提交
606
	struct mlx5_core_mkey  umr_mkey;
607 608 609

	/* XDP read-mostly */
	struct xdp_rxq_info    xdp_rxq;
610 611 612 613 614
} ____cacheline_aligned_in_smp;

struct mlx5e_channel {
	/* data path */
	struct mlx5e_rq            rq;
S
Saeed Mahameed 已提交
615 616
	struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
	struct mlx5e_icosq         icosq;   /* internal control operations */
617
	bool                       xdp;
618 619 620 621 622 623
	struct napi_struct         napi;
	struct device             *pdev;
	struct net_device         *netdev;
	__be32                     mkey_be;
	u8                         num_tc;

624 625 626
	/* XDP_REDIRECT */
	struct mlx5e_xdpsq         xdpsq;

627 628
	/* data path - accessed per napi poll */
	struct irq_desc *irq_desc;
629
	struct mlx5e_ch_stats     *stats;
630 631 632

	/* control */
	struct mlx5e_priv         *priv;
633
	struct mlx5_core_dev      *mdev;
634
	struct hwtstamp_config    *tstamp;
635
	int                        ix;
636
	int                        cpu;
637
	cpumask_var_t              xps_cpumask;
638 639
};

640 641 642
struct mlx5e_channels {
	struct mlx5e_channel **c;
	unsigned int           num;
643
	struct mlx5e_params    params;
644 645
};

646 647 648 649
struct mlx5e_channel_stats {
	struct mlx5e_ch_stats ch;
	struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
	struct mlx5e_rq_stats rq;
T
Tariq Toukan 已提交
650
	struct mlx5e_xdpsq_stats rq_xdpsq;
651
	struct mlx5e_xdpsq_stats xdpsq;
652 653
} ____cacheline_aligned_in_smp;

654 655 656
enum {
	MLX5E_STATE_OPENED,
	MLX5E_STATE_DESTROYING,
657
	MLX5E_STATE_XDP_TX_ENABLED,
658 659
};

660
struct mlx5e_rqt {
T
Tariq Toukan 已提交
661
	u32              rqtn;
662 663 664 665 666 667 668
	bool		 enabled;
};

struct mlx5e_tir {
	u32		  tirn;
	struct mlx5e_rqt  rqt;
	struct list_head  list;
T
Tariq Toukan 已提交
669 670
};

671 672 673 674 675
enum {
	MLX5E_TC_PRIO = 0,
	MLX5E_NIC_PRIO
};

676 677
struct mlx5e_rss_params {
	u32	indirection_rqt[MLX5E_INDIR_RQT_SIZE];
678
	u32	rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
679 680 681 682
	u8	toeplitz_hash_key[40];
	u8	hfunc;
};

683 684 685 686 687 688 689
struct mlx5e_modify_sq_param {
	int curr_state;
	int next_state;
	int rl_update;
	int rl_index;
};

690 691
struct mlx5e_priv {
	/* priv data path fields - start */
692 693
	struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
	int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
694 695 696
#ifdef CONFIG_MLX5_CORE_EN_DCB
	struct mlx5e_dcbx_dp       dcbx_dp;
#endif
697 698
	/* priv data path fields - end */

699
	u32                        msglevel;
700 701
	unsigned long              state;
	struct mutex               state_lock; /* Protects Interface state */
702
	struct mlx5e_rq            drop_rq;
703

704
	struct mlx5e_channels      channels;
705
	u32                        tisn[MLX5E_MAX_NUM_TC];
706
	struct mlx5e_rqt           indir_rqt;
707
	struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
708
	struct mlx5e_tir           inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
709
	struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
710
	struct mlx5e_rss_params    rss_params;
711
	u32                        tx_rates[MLX5E_MAX_NUM_SQS];
712

713
	struct mlx5e_flow_steering fs;
714

715
	struct workqueue_struct    *wq;
716 717
	struct work_struct         update_carrier_work;
	struct work_struct         set_rx_mode_work;
718
	struct work_struct         tx_timeout_work;
719
	struct work_struct         update_stats_work;
720 721
	struct work_struct         monitor_counters_work;
	struct mlx5_nb             monitor_counters_nb;
722 723 724 725

	struct mlx5_core_dev      *mdev;
	struct net_device         *netdev;
	struct mlx5e_stats         stats;
726 727
	struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
	u8                         max_opened_tc;
728
	struct hwtstamp_config     tstamp;
729 730
	u16                        q_counter;
	u16                        drop_rq_q_counter;
731 732
	struct notifier_block      events_nb;

H
Huy Nguyen 已提交
733 734 735 736
#ifdef CONFIG_MLX5_CORE_EN_DCB
	struct mlx5e_dcbx          dcbx;
#endif

737
	const struct mlx5e_profile *profile;
738
	void                      *ppriv;
739 740 741
#ifdef CONFIG_MLX5_EN_IPSEC
	struct mlx5e_ipsec        *ipsec;
#endif
742 743 744
#ifdef CONFIG_MLX5_EN_TLS
	struct mlx5e_tls          *tls;
#endif
745
	struct devlink_health_reporter *tx_reporter;
746 747
};

748
struct mlx5e_profile {
749
	int	(*init)(struct mlx5_core_dev *mdev,
750 751 752 753 754 755 756 757 758 759
			struct net_device *netdev,
			const struct mlx5e_profile *profile, void *ppriv);
	void	(*cleanup)(struct mlx5e_priv *priv);
	int	(*init_rx)(struct mlx5e_priv *priv);
	void	(*cleanup_rx)(struct mlx5e_priv *priv);
	int	(*init_tx)(struct mlx5e_priv *priv);
	void	(*cleanup_tx)(struct mlx5e_priv *priv);
	void	(*enable)(struct mlx5e_priv *priv);
	void	(*disable)(struct mlx5e_priv *priv);
	void	(*update_stats)(struct mlx5e_priv *priv);
760
	void	(*update_carrier)(struct mlx5e_priv *priv);
761 762 763 764
	struct {
		mlx5e_fp_handle_rx_cqe handle_rx_cqe;
		mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
	} rx_handlers;
765 766 767
	int	max_tc;
};

768 769
void mlx5e_build_ptys2ethtool_map(void);

770
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
771
		       struct net_device *sb_dev);
772
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
773
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
774
			  struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
775 776 777 778

void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
779
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
E
Eric Dumazet 已提交
780
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
S
Saeed Mahameed 已提交
781
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
782

783 784 785 786
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
				struct mlx5e_params *params);

787
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
788 789
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
			bool recycle);
790
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
791
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
792
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
793
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
794 795
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
796 797 798 799 800 801
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
				u16 cqe_bcnt, u32 head_offset, u32 page_idx);
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
				   u16 cqe_bcnt, u32 head_offset, u32 page_idx);
802 803 804 805 806 807
struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
			  struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
			     struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
808

809
void mlx5e_update_stats(struct mlx5e_priv *priv);
810
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
811
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
812

813
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
814 815 816
int mlx5e_self_test_num(struct mlx5e_priv *priv);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
		     u64 *buf);
817 818
void mlx5e_set_rx_mode_work(struct work_struct *work);

819 820
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
821
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
822

823 824 825 826
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
			  u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
			   u16 vid);
827
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
828

829 830 831 832 833 834 835 836 837 838 839 840 841
struct mlx5e_redirect_rqt_param {
	bool is_rss;
	union {
		u32 rqn; /* Direct RQN (Non-RSS) */
		struct {
			u8 hfunc;
			struct mlx5e_channels *channels;
		} rss; /* RSS data */
	};
};

int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
		       struct mlx5e_redirect_rqt_param rrp);
842
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
843
				    const struct mlx5e_tirc_config *ttconfig,
844
				    void *tirc, bool inner);
845
void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
846
struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
847

848 849
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
850 851 852 853

int mlx5e_open_channels(struct mlx5e_priv *priv,
			struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs);
854 855 856 857 858

/* Function pointer to be used to modify WH settings while
 * switching channels
 */
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
859 860 861
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
			       struct mlx5e_channels *new_chs,
			       mlx5e_fp_hw_modify hw_modify);
862 863
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
864

865
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
866
				   int num_channels);
867 868
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
				 u8 cq_period_mode);
T
Tariq Toukan 已提交
869 870
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
				 u8 cq_period_mode);
871
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
872
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
873
			       struct mlx5e_params *params);
T
Tariq Toukan 已提交
874

875 876 877 878 879
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
		    struct mlx5e_modify_sq_param *p);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_tx_disable_queue(struct netdev_queue *txq);

880 881 882 883 884 885
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
	return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
		MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}

886 887 888 889 890 891
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
	return MLX5_CAP_ETH(mdev, swp) &&
		MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
}

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
struct mlx5e_swp_spec {
	__be16 l3_proto;
	u8 l4_proto;
	u8 is_tun;
	__be16 tun_l3_proto;
	u8 tun_l4_proto;
};

static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
		   struct mlx5e_swp_spec *swp_spec)
{
	/* SWP offsets are in 2-bytes words */
	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
	if (swp_spec->l3_proto == htons(ETH_P_IPV6))
		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
	if (swp_spec->l4_proto) {
		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
		if (swp_spec->l4_proto == IPPROTO_UDP)
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
	}

	if (swp_spec->is_tun) {
		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
		if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
	} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
		if (swp_spec->l3_proto == htons(ETH_P_IPV6))
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
	}
	switch (swp_spec->tun_l4_proto) {
	case IPPROTO_UDP:
		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
		/* fall through */
	case IPPROTO_TCP:
		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
		break;
	}
}

933 934 935 936
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
				      struct mlx5e_tx_wqe **wqe,
				      u16 *pi)
{
937
	struct mlx5_wq_cyc *wq = &sq->wq;
938

939
	*pi  = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
940 941 942 943
	*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
	memset(*wqe, 0, sizeof(**wqe));
}

944 945
static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
946
{
947
	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;

	memset(cseg, 0, sizeof(*cseg));

	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);

	(*pc)++;

	return wqe;
}

static inline
void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
		     void __iomem *uar_map,
		     struct mlx5_wqe_ctrl_seg *ctrl)
{
	ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
967 968 969
	/* ensure wqe is visible to device before updating doorbell record */
	dma_wmb();

970
	*wq->db = cpu_to_be32(pc);
971 972 973 974 975 976

	/* ensure doorbell record is visible to device before ringing the
	 * doorbell
	 */
	wmb();

977
	mlx5_write64((__be32 *)ctrl, uar_map);
978 979 980 981 982 983 984
}

static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
	struct mlx5_core_cq *mcq;

	mcq = &cq->mcq;
985
	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
986 987 988
}

extern const struct ethtool_ops mlx5e_ethtool_ops;
989 990 991
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
992
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
993 994
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
995 996
#endif

997 998 999 1000
int mlx5e_create_tir(struct mlx5_core_dev *mdev,
		     struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
		       struct mlx5e_tir *tir);
1001 1002
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1003
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
1004

1005
/* common netdev helpers */
1006 1007 1008 1009 1010 1011
void mlx5e_create_q_counters(struct mlx5e_priv *priv);
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
		       struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);

1012 1013
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);

1014 1015
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
1016

1017
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
1018
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
1019 1020
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
1021 1022
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);

1023 1024 1025 1026
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
		     u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);

1027
int mlx5e_create_tises(struct mlx5e_priv *priv);
1028
void mlx5e_update_carrier(struct mlx5e_priv *priv);
1029 1030
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
1031
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
1032

1033
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1034 1035
int mlx5e_bits_invert(unsigned long a, int size);

1036
typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
1037
int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1038 1039 1040
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
		     change_hw_mtu_cb set_mtu_cb);

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
			       struct ethtool_drvinfo *drvinfo);
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
			       uint32_t stringset, uint8_t *data);
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
				     struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
				 struct ethtool_ringparam *param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
				struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
				struct ethtool_channels *ch);
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
			       struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
			       struct ethtool_coalesce *coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
			       struct ethtool_coalesce *coal);
1061 1062 1063 1064
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
				     struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
				     const struct ethtool_link_ksettings *link_ksettings);
1065 1066
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1067 1068
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
			      struct ethtool_ts_info *info);
1069 1070
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
			       struct ethtool_flash *flash);
1071 1072 1073 1074
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
				  struct ethtool_pauseparam *pauseparam);
int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
				 struct ethtool_pauseparam *pauseparam);
1075

1076
/* mlx5e generic netdev management API */
1077 1078 1079 1080 1081
int mlx5e_netdev_init(struct net_device *netdev,
		      struct mlx5e_priv *priv,
		      struct mlx5_core_dev *mdev,
		      const struct mlx5e_profile *profile,
		      void *ppriv);
1082
void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
1083 1084
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1085
		    int nch, void *ppriv);
1086 1087 1088
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1089
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1090
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
1091
			    struct mlx5e_rss_params *rss_params,
1092
			    struct mlx5e_params *params,
1093
			    u16 max_channels, u16 mtu);
1094 1095
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
			   struct mlx5e_params *params);
1096 1097
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
			    u16 num_channels);
1098
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
1099
void mlx5e_rx_dim_work(struct work_struct *work);
1100
void mlx5e_tx_dim_work(struct work_struct *work);
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112

void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
				       struct net_device *netdev,
				       netdev_features_t features);
#ifdef CONFIG_MLX5_ESWITCH
int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
#endif
1113
#endif /* __MLX5_EN_H__ */