qed_init_fw_funcs.c 33.3 KB
Newer Older
1
/* QLogic qed NIC Driver
M
Mintz, Yuval 已提交
2
 * Copyright (c) 2015-2017  QLogic Corporation
3
 *
M
Mintz, Yuval 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and /or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
31 32 33 34 35 36 37 38 39 40 41 42
 */

#include <linux/types.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"

M
Mintz, Yuval 已提交
43
/* General constants */
44 45 46 47 48
#define QM_PQ_MEM_4KB(pq_size)	(pq_size ? DIV_ROUND_UP((pq_size + 1) *	\
							QM_PQ_ELEMENT_SIZE, \
							0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size)	(pq_size ? DIV_ROUND_UP(pq_size, \
								0x100) - 1 : 0)
49 50
#define QM_INVALID_PQ_ID		0xffff

M
Mintz, Yuval 已提交
51
/* Feature enable */
52 53 54
#define QM_BYPASS_EN	1
#define QM_BYTE_CRD_EN	1

M
Mintz, Yuval 已提交
55
/* Other PQ constants */
56 57
#define QM_OTHER_PQS_PER_PF	4

58
/* WFQ constants */
59 60 61 62 63 64 65 66

/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
#define QM_WFQ_UPPER_BOUND	62500000

/* Bit  of VOQ in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_VOQ_SHIFT	0

/* Bit  of PF in WFQ VP PQ map */
67
#define QM_WFQ_VP_PQ_PF_E4_SHIFT	5
68 69 70 71 72 73

/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight)	((weight) * 0x9000)

/* Max WFQ increment value is 0.7 * upper bound */
#define QM_WFQ_MAX_INC_VAL	43750000
Y
Yuval Mintz 已提交
74

75
/* RL constants */
76 77 78 79 80 81 82 83

/* Period in us */
#define QM_RL_PERIOD	5

/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M	(25 * QM_RL_PERIOD)

/* RL increment value - rate is specified in mbps */
84
#define QM_RL_INC_VAL(rate)		max_t(u32,	\
Y
Yuval Mintz 已提交
85 86 87 88
					      (u32)(((rate ? rate : \
						      1000000) *    \
						     QM_RL_PERIOD * \
						     101) / (8 * 100)), 1)
89 90 91 92 93 94 95

/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_RL_UPPER_BOUND	62500000

/* Max PF RL increment value is 0.7 * upper bound */
#define QM_RL_MAX_INC_VAL	43750000

96
/* AFullOprtnstcCrdMask constants */
97 98 99 100
#define QM_OPPOR_LINE_VOQ_DEF	1
#define QM_OPPOR_FW_STOP_DEF	0
#define QM_OPPOR_PQ_EMPTY_DEF	1

101
/* Command Queue constants */
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES	150

#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
	 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))

#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
	 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))

#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
	((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)

119
/* BTB: blocks constants (block size = 256B) */
120 121 122 123 124 125 126 127 128 129 130

/* 256B blocks in 9700B packet */
#define BTB_JUMBO_PKT_BLOCKS	38

/* Headroom per-port */
#define BTB_HEADROOM_BLOCKS	BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR	10

/* Factored (hence really 0.7) */
#define BTB_PURE_LB_RATIO	7

131
/* QM stop command constants */
132 133 134 135 136 137 138 139 140 141 142 143 144 145
#define QM_STOP_PQ_MASK_WIDTH		32
#define QM_STOP_CMD_ADDR		2
#define QM_STOP_CMD_STRUCT_SIZE		2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET	0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT	0
#define QM_STOP_CMD_PAUSE_MASK_MASK	-1
#define QM_STOP_CMD_GROUP_ID_OFFSET	1
#define QM_STOP_CMD_GROUP_ID_SHIFT	16
#define QM_STOP_CMD_GROUP_ID_MASK	15
#define QM_STOP_CMD_PQ_TYPE_OFFSET	1
#define QM_STOP_CMD_PQ_TYPE_SHIFT	24
#define QM_STOP_CMD_PQ_TYPE_MASK	1
#define QM_STOP_CMD_MAX_POLL_COUNT	100
#define QM_STOP_CMD_POLL_PERIOD_US	500
M
Mintz, Yuval 已提交
146

147
/* QM command macros */
148 149 150 151 152
#define QM_CMD_STRUCT_SIZE(cmd)	cmd ## _STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
	SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
		  cmd ## _ ## field, \
		  value)
153
/* QM: VOQ macros */
Y
Yuval Mintz 已提交
154 155 156
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *	\
						   (max_phys_tcs_per_port) + \
						   (tc))
157 158 159 160 161 162 163 164 165
#define LB_VOQ(port)				( \
		MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phy_tcs_pr_port)	\
	((tc) <		\
	 LB_TC ? PHYS_VOQ(port,		\
			  tc,			 \
			  max_phy_tcs_pr_port) \
		: LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/
166

167
/* Prepare PF RL enable/disable runtime init values */
Y
Yuval Mintz 已提交
168
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
169 170 171
{
	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
	if (pf_rl_en) {
M
Mintz, Yuval 已提交
172
		/* Enable RLs for all VOQs */
173
		STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
174
			     (1 << MAX_NUM_VOQS_E4) - 1);
M
Mintz, Yuval 已提交
175
		/* Write RL period */
176
		STORE_RT_REG(p_hwfn,
Y
Yuval Mintz 已提交
177
			     QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
178 179 180
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLPFPERIODTIMER_RT_OFFSET,
			     QM_RL_PERIOD_CLK_25M);
M
Mintz, Yuval 已提交
181 182

		/* Set credit threshold for QM bypass flow */
183 184 185 186 187 188 189 190
		if (QM_BYPASS_EN)
			STORE_RT_REG(p_hwfn,
				     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
				     QM_RL_UPPER_BOUND);
	}
}

/* Prepare PF WFQ enable/disable runtime init values */
Y
Yuval Mintz 已提交
191
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
192 193
{
	STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
M
Mintz, Yuval 已提交
194 195

	/* Set credit threshold for QM bypass flow */
196 197 198 199 200 201 202
	if (pf_wfq_en && QM_BYPASS_EN)
		STORE_RT_REG(p_hwfn,
			     QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
			     QM_WFQ_UPPER_BOUND);
}

/* Prepare VPORT RL enable/disable runtime init values */
Y
Yuval Mintz 已提交
203
static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
204 205 206 207
{
	STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
		     vport_rl_en ? 1 : 0);
	if (vport_rl_en) {
M
Mintz, Yuval 已提交
208
		/* Write RL period (use timer 0 only) */
209 210 211 212 213 214
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
			     QM_RL_PERIOD_CLK_25M);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
			     QM_RL_PERIOD_CLK_25M);
M
Mintz, Yuval 已提交
215 216

		/* Set credit threshold for QM bypass flow */
217 218 219 220 221 222 223 224
		if (QM_BYPASS_EN)
			STORE_RT_REG(p_hwfn,
				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
				     QM_RL_UPPER_BOUND);
	}
}

/* Prepare VPORT WFQ enable/disable runtime init values */
Y
Yuval Mintz 已提交
225
static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
226 227 228
{
	STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
		     vport_wfq_en ? 1 : 0);
M
Mintz, Yuval 已提交
229 230

	/* Set credit threshold for QM bypass flow */
231 232 233 234 235 236 237
	if (vport_wfq_en && QM_BYPASS_EN)
		STORE_RT_REG(p_hwfn,
			     QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
			     QM_WFQ_UPPER_BOUND);
}

/* Prepare runtime init values to allocate PBF command queue lines for
M
Mintz, Yuval 已提交
238
 * the specified VOQ.
239 240
 */
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
241
				       u8 voq, u16 cmdq_lines)
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
{
	u32 qm_line_crd;

	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
			 (u32)cmdq_lines);
	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
		     qm_line_crd);
}

/* Prepare runtime init values to allocate PBF command queue lines. */
static void qed_cmdq_lines_rt_init(
	struct qed_hwfn *p_hwfn,
	u8 max_ports_per_engine,
	u8 max_phys_tcs_per_port,
	struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
Y
Yuval Mintz 已提交
260
	u8 tc, voq, port_id, num_tcs_in_port;
261

M
Mintz, Yuval 已提交
262
	/* Clear PBF lines for all VOQs */
263
	for (voq = 0; voq < MAX_NUM_VOQS_E4; voq++)
264 265 266 267 268
		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
		if (port_params[port_id].active) {
			u16 phys_lines, phys_lines_per_tc;

Y
Yuval Mintz 已提交
269
			/* find #lines to divide between active phys TCs */
270 271 272
			phys_lines = port_params[port_id].num_pbf_cmd_lines -
				     PBF_CMDQ_PURE_LB_LINES;
			/* find #lines per active physical TC */
Y
Yuval Mintz 已提交
273 274 275 276 277 278 279 280
			num_tcs_in_port = 0;
			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
				if (((port_params[port_id].active_phys_tcs >>
				      tc) & 0x1) == 1)
					num_tcs_in_port++;
			}

			phys_lines_per_tc = phys_lines / num_tcs_in_port;
281
			/* init registers per active TC */
Y
Yuval Mintz 已提交
282 283 284 285 286
			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
				if (((port_params[port_id].active_phys_tcs >>
				      tc) & 0x1) != 1)
					continue;

287 288 289 290 291
				voq = PHYS_VOQ(port_id, tc,
					       max_phys_tcs_per_port);
				qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
							   phys_lines_per_tc);
			}
Y
Yuval Mintz 已提交
292

293 294 295 296 297 298 299 300 301 302 303 304 305 306
			/* init registers for pure LB TC */
			qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
						   PBF_CMDQ_PURE_LB_LINES);
		}
	}
}

static void qed_btb_blocks_rt_init(
	struct qed_hwfn *p_hwfn,
	u8 max_ports_per_engine,
	u8 max_phys_tcs_per_port,
	struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
	u32 usable_blocks, pure_lb_blocks, phys_blocks;
Y
Yuval Mintz 已提交
307
	u8 tc, voq, port_id, num_tcs_in_port;
308 309 310 311 312 313 314

	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
		u32 temp;

		if (!port_params[port_id].active)
			continue;

M
Mintz, Yuval 已提交
315
		/* Subtract headroom blocks */
316 317 318
		usable_blocks = port_params[port_id].num_btb_blocks -
				BTB_HEADROOM_BLOCKS;

Y
Yuval Mintz 已提交
319 320 321 322 323 324 325 326
		/* find blocks per physical TC */
		num_tcs_in_port = 0;
		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
			if (((port_params[port_id].active_phys_tcs >>
			      tc) & 0x1) == 1)
				num_tcs_in_port++;
		}

327
		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
Y
Yuval Mintz 已提交
328
				 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
329 330 331
				  BTB_PURE_LB_RATIO);
		pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
				       pure_lb_blocks / BTB_PURE_LB_FACTOR);
Y
Yuval Mintz 已提交
332 333
		phys_blocks = (usable_blocks - pure_lb_blocks) /
			      num_tcs_in_port;
334

M
Mintz, Yuval 已提交
335
		/* Init physical TCs */
Y
Yuval Mintz 已提交
336 337 338 339 340 341 342
		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
			if (((port_params[port_id].active_phys_tcs >>
			      tc) & 0x1) != 1)
				continue;

			voq = PHYS_VOQ(port_id, tc,
				       max_phys_tcs_per_port);
343 344 345 346
			STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
				     phys_blocks);
		}

M
Mintz, Yuval 已提交
347
		/* Init pure LB TC */
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
		temp = LB_VOQ(port_id);
		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
			     pure_lb_blocks);
	}
}

/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void qed_tx_pq_map_rt_init(
	struct qed_hwfn *p_hwfn,
	struct qed_ptt *p_ptt,
	struct qed_qm_pf_rt_init_params *p_params,
	u32 base_mem_addr_4kb)
{
	struct init_qm_vport_params *vport_params = p_params->vport_params;
	u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
			    QM_PF_QUEUE_GROUP_SIZE;
	u16 i, pq_id, pq_group;

M
Mintz, Yuval 已提交
368
	/* A bit per Tx PQ indicating if the PQ is associated with a VF */
369
	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
M
Mintz, Yuval 已提交
370
	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
371 372 373 374
	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
	u32 mem_addr_4kb = base_mem_addr_4kb;

M
Mintz, Yuval 已提交
375
	/* Set mapping from PQ group to PF */
376 377 378
	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
			     (u32)(p_params->pf_id));
M
Mintz, Yuval 已提交
379
	/* Set PQ sizes */
380 381 382 383 384
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
		     QM_PQ_SIZE_256B(p_params->num_vf_cids));

M
Mintz, Yuval 已提交
385
	/* Go over all Tx PQs */
386 387 388 389
	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
			     p_params->max_phys_tcs_per_port);
		bool is_vf_pq = (i >= p_params->num_pf_pqs);
390
		struct qm_rf_pq_map_e4 tx_pq_map;
391

M
Mintz, Yuval 已提交
392 393 394 395
		bool rl_valid = p_params->pq_params[i].rl_valid &&
				(p_params->pq_params[i].vport_id <
				 MAX_QM_GLOBAL_RLS);

M
Mintz, Yuval 已提交
396
		/* Update first Tx PQ of VPORT/TC */
397 398 399 400 401 402
		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
				    p_params->start_vport;
		u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
		u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];

		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
M
Mintz, Yuval 已提交
403
			/* Create new VP PQ */
404 405
			pq_ids[p_params->pq_params[i].tc_id] = pq_id;
			first_tx_pq_id = pq_id;
M
Mintz, Yuval 已提交
406 407

			/* Map VP PQ to VOQ and PF */
408 409 410 411 412
			STORE_RT_REG(p_hwfn,
				     QM_REG_WFQVPMAP_RT_OFFSET +
				     first_tx_pq_id,
				     (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
				     (p_params->pf_id <<
413
				      QM_WFQ_VP_PQ_PF_E4_SHIFT));
414
		}
M
Mintz, Yuval 已提交
415 416 417 418

		if (p_params->pq_params[i].rl_valid && !rl_valid)
			DP_NOTICE(p_hwfn,
				  "Invalid VPORT ID for rate limiter configuration");
M
Mintz, Yuval 已提交
419
		/* Fill PQ map entry */
420
		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
421
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_PQ_VALID, 1);
M
Mintz, Yuval 已提交
422
		SET_FIELD(tx_pq_map.reg,
423 424 425 426
			  QM_RF_PQ_MAP_E4_RL_VALID, rl_valid ? 1 : 0);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_VP_PQ_ID,
			  first_tx_pq_id);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_RL_ID,
M
Mintz, Yuval 已提交
427
			  rl_valid ?
Y
Yuval Mintz 已提交
428
			  p_params->pq_params[i].vport_id : 0);
429 430
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_VOQ, voq);
		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP,
431
			  p_params->pq_params[i].wrr_group);
M
Mintz, Yuval 已提交
432
		/* Write PQ map entry to CAM */
433 434
		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
			     *((u32 *)&tx_pq_map));
M
Mintz, Yuval 已提交
435
		/* Set base address */
436 437 438
		STORE_RT_REG(p_hwfn,
			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
			     mem_addr_4kb);
M
Mintz, Yuval 已提交
439 440

		/* If VF PQ, add indication to PQ VF mask */
441
		if (is_vf_pq) {
M
Mintz, Yuval 已提交
442 443 444
			tx_pq_vf_mask[pq_id /
				      QM_PF_QUEUE_GROUP_SIZE] |=
			    BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
445 446 447 448 449 450
			mem_addr_4kb += vport_pq_mem_4kb;
		} else {
			mem_addr_4kb += pq_mem_4kb;
		}
	}

M
Mintz, Yuval 已提交
451 452 453 454 455
	/* Store Tx PQ VF mask to size select register */
	for (i = 0; i < num_tx_pq_vf_masks; i++)
		if (tx_pq_vf_mask[i])
			STORE_RT_REG(p_hwfn,
				     QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
Y
Yuval Mintz 已提交
456
				     tx_pq_vf_mask[i]);
457 458 459 460 461 462 463
}

/* Prepare Other PQ mapping runtime init values for the specified PF */
static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
				     u8 port_id,
				     u8 pf_id,
				     u32 num_pf_cids,
Y
Yuval Mintz 已提交
464
				     u32 num_tids, u32 base_mem_addr_4kb)
465
{
M
Mintz, Yuval 已提交
466 467
	u32 pq_size, pq_mem_4kb, mem_addr_4kb;
	u16 i, pq_id, pq_group;
468 469 470 471

	/* a single other PQ group is used in each PF,
	 * where PQ group i is used in PF i.
	 */
M
Mintz, Yuval 已提交
472 473 474 475
	pq_group = pf_id;
	pq_size = num_pf_cids + num_tids;
	pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
	mem_addr_4kb = base_mem_addr_4kb;
476

M
Mintz, Yuval 已提交
477
	/* Map PQ group to PF */
478 479
	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
		     (u32)(pf_id));
M
Mintz, Yuval 已提交
480
	/* Set PQ sizes */
481 482
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
		     QM_PQ_SIZE_256B(pq_size));
M
Mintz, Yuval 已提交
483 484

	/* Set base address */
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
		STORE_RT_REG(p_hwfn,
			     QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
			     mem_addr_4kb);
		mem_addr_4kb += pq_mem_4kb;
	}
}

/* Prepare PF WFQ runtime init values for the specified PF.
 * Return -1 on error.
 */
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
			      struct qed_qm_pf_rt_init_params *p_params)
{
	u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
	u32 crd_reg_offset;
	u32 inc_val;
	u16 i;

	if (p_params->pf_id < MAX_NUM_PFS_BB)
		crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
	else
M
Mintz, Yuval 已提交
508 509
		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
	crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
510 511

	inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
Y
Yuval Mintz 已提交
512
	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
513
		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
514 515 516 517 518 519 520 521 522 523 524 525
		return -1;
	}

	for (i = 0; i < num_tx_pqs; i++) {
		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
			     p_params->max_phys_tcs_per_port);

		OVERWRITE_RT_REG(p_hwfn,
				 crd_reg_offset + voq * MAX_NUM_PFS_BB,
				 QM_WFQ_CRD_REG_SIGN_BIT);
	}

Y
Yuval Mintz 已提交
526 527 528
	STORE_RT_REG(p_hwfn,
		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
		     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
M
Mintz, Yuval 已提交
529 530
	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
		     inc_val);
531 532 533 534 535 536
	return 0;
}

/* Prepare PF RL runtime init values for the specified PF.
 * Return -1 on error.
 */
Y
Yuval Mintz 已提交
537
static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
538 539 540 541
{
	u32 inc_val = QM_RL_INC_VAL(pf_rl);

	if (inc_val > QM_RL_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
542
		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
		return -1;
	}
	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
		     QM_RL_CRD_REG_SIGN_BIT);
	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
		     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
	return 0;
}

/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
 * Return -1 on error.
 */
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
			      u8 num_vports,
			      struct init_qm_vport_params *vport_params)
{
	u32 inc_val;
Y
Yuval Mintz 已提交
561
	u8 tc, i;
562

M
Mintz, Yuval 已提交
563
	/* Go over all PF VPORTs */
Y
Yuval Mintz 已提交
564
	for (i = 0; i < num_vports; i++) {
565 566 567 568 569 570 571

		if (!vport_params[i].vport_wfq)
			continue;

		inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
		if (inc_val > QM_WFQ_MAX_INC_VAL) {
			DP_NOTICE(p_hwfn,
M
Mintz, Yuval 已提交
572
				  "Invalid VPORT WFQ weight configuration\n");
573 574 575 576 577 578 579
			return -1;
		}

		/* each VPORT can have several VPORT PQ IDs for
		 * different TCs
		 */
		for (tc = 0; tc < NUM_OF_TCS; tc++) {
Y
Yuval Mintz 已提交
580
			u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
581 582 583 584 585 586

			if (vport_pq_id != QM_INVALID_PQ_ID) {
				STORE_RT_REG(p_hwfn,
					     QM_REG_WFQVPCRD_RT_OFFSET +
					     vport_pq_id,
					     QM_WFQ_CRD_REG_SIGN_BIT);
Y
Yuval Mintz 已提交
587 588 589
				STORE_RT_REG(p_hwfn,
					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
					     vport_pq_id, inc_val);
590 591 592 593 594 595 596 597 598 599 600 601 602 603
			}
		}
	}

	return 0;
}

static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
				u8 start_vport,
				u8 num_vports,
				struct init_qm_vport_params *vport_params)
{
	u8 i, vport_id;

M
Mintz, Yuval 已提交
604 605
	if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
		DP_NOTICE(p_hwfn,
M
Mintz, Yuval 已提交
606
			  "Invalid VPORT ID for rate limiter configuration\n");
M
Mintz, Yuval 已提交
607 608 609
		return -1;
	}

M
Mintz, Yuval 已提交
610
	/* Go over all PF VPORTs */
611 612 613 614 615
	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);

		if (inc_val > QM_RL_MAX_INC_VAL) {
			DP_NOTICE(p_hwfn,
M
Mintz, Yuval 已提交
616
				  "Invalid VPORT rate-limit configuration\n");
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
			return -1;
		}

		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
			     QM_RL_CRD_REG_SIGN_BIT);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
			     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
			     inc_val);
	}

	return 0;
}

static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
				     struct qed_ptt *p_ptt)
{
	u32 reg_val, i;

	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
	     i++) {
		udelay(QM_STOP_CMD_POLL_PERIOD_US);
		reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
	}

M
Mintz, Yuval 已提交
645
	/* Check if timeout while waiting for SDM command ready */
646 647 648 649 650 651 652 653 654 655 656
	if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
			   "Timeout when waiting for QM SDM command ready signal\n");
		return false;
	}

	return true;
}

static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
			    struct qed_ptt *p_ptt,
Y
Yuval Mintz 已提交
657
			    u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
{
	if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
		return false;

	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);

	return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}

/******************** INTERFACE IMPLEMENTATION *********************/
u32 qed_qm_pf_mem_size(u8 pf_id,
		       u32 num_pf_cids,
		       u32 num_vf_cids,
Y
Yuval Mintz 已提交
675
		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
{
	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
	       QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
	       QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
}

int qed_qm_common_rt_init(
	struct qed_hwfn *p_hwfn,
	struct qed_qm_common_rt_init_params *p_params)
{
	/* init AFullOprtnstcCrdMask */
	u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
		    QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
		   (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
		   (p_params->pf_wfq_en <<
		    QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
		   (p_params->vport_wfq_en <<
		    QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
		   (p_params->pf_rl_en <<
		    QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
		   (p_params->vport_rl_en <<
		    QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
		   (QM_OPPOR_FW_STOP_DEF <<
		    QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
		   (QM_OPPOR_PQ_EMPTY_DEF <<
		    QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);

	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
	qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
	qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
	qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
	qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
	qed_cmdq_lines_rt_init(p_hwfn,
			       p_params->max_ports_per_engine,
			       p_params->max_phys_tcs_per_port,
			       p_params->port_params);
	qed_btb_blocks_rt_init(p_hwfn,
			       p_params->max_ports_per_engine,
			       p_params->max_phys_tcs_per_port,
			       p_params->port_params);
	return 0;
}

int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
		      struct qed_ptt *p_ptt,
		      struct qed_qm_pf_rt_init_params *p_params)
{
	struct init_qm_vport_params *vport_params = p_params->vport_params;
	u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
					       p_params->num_tids) *
				 QM_OTHER_PQS_PER_PF;
	u8 tc, i;

M
Mintz, Yuval 已提交
729
	/* Clear first Tx PQ ID array for each VPORT */
730 731 732 733
	for (i = 0; i < p_params->num_vports; i++)
		for (tc = 0; tc < NUM_OF_TCS; tc++)
			vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;

M
Mintz, Yuval 已提交
734
	/* Map Other PQs (if any) */
735 736 737
	qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
				 p_params->num_pf_cids, p_params->num_tids, 0);

M
Mintz, Yuval 已提交
738
	/* Map Tx PQs */
739 740 741 742 743 744 745 746 747
	qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);

	if (p_params->pf_wfq)
		if (qed_pf_wfq_rt_init(p_hwfn, p_params))
			return -1;

	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
		return -1;

Y
Yuval Mintz 已提交
748
	if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
749 750 751 752 753 754 755 756 757
		return -1;

	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
				 p_params->num_vports, vport_params))
		return -1;

	return 0;
}

758
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
759
		    struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
760 761 762 763
{
	u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);

	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
764
		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
765 766 767 768 769 770 771
		return -1;
	}

	qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
	return 0;
}

772
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
773
		   struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
774 775 776 777
{
	u32 inc_val = QM_RL_INC_VAL(pf_rl);

	if (inc_val > QM_RL_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
778
		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
779 780 781 782 783 784 785 786 787 788 789
		return -1;
	}

	qed_wr(p_hwfn, p_ptt,
	       QM_REG_RLPFCRD + pf_id * 4,
	       QM_RL_CRD_REG_SIGN_BIT);
	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);

	return 0;
}

790 791
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
		       struct qed_ptt *p_ptt,
Y
Yuval Mintz 已提交
792
		       u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
793
{
M
Mintz, Yuval 已提交
794 795
	u16 vport_pq_id;
	u32 inc_val;
796 797
	u8 tc;

M
Mintz, Yuval 已提交
798
	inc_val = QM_WFQ_INC_VAL(vport_wfq);
799
	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
800
		DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
801 802 803 804
		return -1;
	}

	for (tc = 0; tc < NUM_OF_TCS; tc++) {
M
Mintz, Yuval 已提交
805
		vport_pq_id = first_tx_pq_id[tc];
806 807 808 809 810 811 812 813 814
		if (vport_pq_id != QM_INVALID_PQ_ID)
			qed_wr(p_hwfn, p_ptt,
			       QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
			       inc_val);
	}

	return 0;
}

815
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
816
		      struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
817 818 819
{
	u32 inc_val = QM_RL_INC_VAL(vport_rl);

M
Mintz, Yuval 已提交
820 821
	if (vport_id >= MAX_QM_GLOBAL_RLS) {
		DP_NOTICE(p_hwfn,
M
Mintz, Yuval 已提交
822
			  "Invalid VPORT ID for rate limiter configuration\n");
M
Mintz, Yuval 已提交
823 824 825
		return -1;
	}

826
	if (inc_val > QM_RL_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
827
		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
828 829 830 831 832 833 834 835 836 837 838 839 840 841
		return -1;
	}

	qed_wr(p_hwfn, p_ptt,
	       QM_REG_RLGLBLCRD + vport_id * 4,
	       QM_RL_CRD_REG_SIGN_BIT);
	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);

	return 0;
}

bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt,
			  bool is_release_cmd,
Y
Yuval Mintz 已提交
842
			  bool is_tx_pq, u16 start_pq, u16 num_pqs)
843 844 845 846
{
	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
	u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;

M
Mintz, Yuval 已提交
847
	/* Set command's PQ type */
848 849 850
	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);

	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
M
Mintz, Yuval 已提交
851
		/* Set PQ bit in mask (stop command only) */
852 853 854
		if (!is_release_cmd)
			pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));

M
Mintz, Yuval 已提交
855
		/* If last PQ or end of PQ mask, write command */
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
		if ((pq_id == last_pq) ||
		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
		     (QM_STOP_PQ_MASK_WIDTH - 1))) {
			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
					 PAUSE_MASK, pq_mask);
			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
					 GROUP_ID,
					 pq_id / QM_STOP_PQ_MASK_WIDTH);
			if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
					     cmd_arr[0], cmd_arr[1]))
				return false;
			pq_mask = 0;
		}
	}

	return true;
}
873 874 875 876 877 878 879 880 881 882 883 884 885

static void
qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
{
	if (enable)
		set_bit(bit, var);
	else
		clear_bit(bit, var);
}

#define PRS_ETH_TUNN_FIC_FORMAT	-188897008

void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
886
			     struct qed_ptt *p_ptt, u16 dest_port)
887 888
{
	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
Y
Yuval Mintz 已提交
889
	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
890 891 892 893
	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}

void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
894
			  struct qed_ptt *p_ptt, bool vxlan_enable)
895 896 897 898 899 900 901 902 903 904 905
{
	unsigned long reg_val = 0;
	u8 shift;

	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
	shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);

	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);

	if (reg_val)
906
		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
		       PRS_ETH_TUNN_FIC_FORMAT);

	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);

	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
	       vxlan_enable ? 1 : 0);
}

void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
			bool eth_gre_enable, bool ip_gre_enable)
{
	unsigned long reg_val = 0;
	u8 shift;

	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);

	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
	if (reg_val)
933
		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
		       PRS_ETH_TUNN_FIC_FORMAT);

	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);

	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
	       eth_gre_enable ? 1 : 0);
	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
	       ip_gre_enable ? 1 : 0);
}

void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
951
			      struct qed_ptt *p_ptt, u16 dest_port)
952 953 954 955 956 957 958 959
{
	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}

void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt,
Y
Yuval Mintz 已提交
960
			   bool eth_geneve_enable, bool ip_geneve_enable)
961 962 963 964 965 966 967 968 969 970 971 972 973
{
	unsigned long reg_val = 0;
	u8 shift;

	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);

	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);

	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
	if (reg_val)
974
		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
975 976 977 978 979 980 981 982 983 984
		       PRS_ETH_TUNN_FIC_FORMAT);

	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
	       eth_geneve_enable ? 1 : 0);
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);

	/* EDPM with geneve tunnel not supported in BB_B0 */
	if (QED_IS_BB_B0(p_hwfn->cdev))
		return;

985
	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
986
	       eth_geneve_enable ? 1 : 0);
987
	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
988 989
	       ip_geneve_enable ? 1 : 0);
}
990

M
Mintz, Yuval 已提交
991 992
#define T_ETH_PACKET_ACTION_GFT_EVENTID  23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
993
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
M
Mintz, Yuval 已提交
994
#define PARSER_ETH_CONN_CM_HDR 0
995 996 997 998 999 1000 1001
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)

void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
			      struct qed_ptt *p_ptt, u16 pf_id)
{
M
Mintz, Yuval 已提交
1002 1003
	u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
		      pf_id * RAM_LINE_SIZE;
1004 1005 1006 1007

	/*stop using gft logic */
	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
M
Mintz, Yuval 已提交
1008 1009 1010
	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
	qed_wr(p_hwfn, p_ptt, hw_addr, 0);
	qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
1011 1012 1013 1014 1015 1016 1017 1018
}

void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
			     u16 pf_id, bool tcp, bool udp,
			     bool ipv4, bool ipv6)
{
	union gft_cam_line_union camline;
	struct gft_ram_line ramline;
M
Mintz, Yuval 已提交
1019
	u32 rfs_cm_hdr_event_id;
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

	rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);

	if (!ipv6 && !ipv4)
		DP_NOTICE(p_hwfn,
			  "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
	if (!tcp && !udp)
		DP_NOTICE(p_hwfn,
			  "set_rfs_mode_enable: must accept at least on of - udp or tcp");

	rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
					PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
	rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
					PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);

	/* Configure Registers for RFS mode */
	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
	qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
	camline.cam_line_mapped.camline = 0;

M
Mintz, Yuval 已提交
1041
	/* Cam line is now valid!! */
1042 1043 1044 1045 1046
	SET_FIELD(camline.cam_line_mapped.camline,
		  GFT_CAM_LINE_MAPPED_VALID, 1);

	/* filters are per PF!! */
	SET_FIELD(camline.cam_line_mapped.camline,
M
Mintz, Yuval 已提交
1047 1048
		  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
		  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1049 1050 1051 1052
	SET_FIELD(camline.cam_line_mapped.camline,
		  GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
	if (!(tcp && udp)) {
		SET_FIELD(camline.cam_line_mapped.camline,
M
Mintz, Yuval 已提交
1053 1054
			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
		if (tcp)
			SET_FIELD(camline.cam_line_mapped.camline,
				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
				  GFT_PROFILE_TCP_PROTOCOL);
		else
			SET_FIELD(camline.cam_line_mapped.camline,
				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
				  GFT_PROFILE_UDP_PROTOCOL);
	}

	if (!(ipv4 && ipv6)) {
		SET_FIELD(camline.cam_line_mapped.camline,
			  GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
		if (ipv4)
			SET_FIELD(camline.cam_line_mapped.camline,
				  GFT_CAM_LINE_MAPPED_IP_VERSION,
				  GFT_PROFILE_IPV4);
		else
			SET_FIELD(camline.cam_line_mapped.camline,
				  GFT_CAM_LINE_MAPPED_IP_VERSION,
				  GFT_PROFILE_IPV6);
	}

M
Mintz, Yuval 已提交
1078
	/* Write characteristics to cam */
1079 1080 1081 1082 1083 1084
	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
	       camline.cam_line_mapped.camline);
	camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
						 PRS_REG_GFT_CAM +
						 CAM_LINE_SIZE * pf_id);

M
Mintz, Yuval 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
	/* Write line to RAM - compare to filter 4 tuple */
	ramline.lo = 0;
	ramline.hi = 0;
	SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
	SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
	SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
	SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
	SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
	SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);

	/* Each iteration write to reg */
	qed_wr(p_hwfn, p_ptt,
	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
	       ramline.lo);
	qed_wr(p_hwfn, p_ptt,
	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
	       ramline.hi);

	/* Set default profile so that no filter match will happen */
	qed_wr(p_hwfn, p_ptt,
	       PRS_REG_GFT_PROFILE_MASK_RAM +
	       RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
	       ramline.lo);
	qed_wr(p_hwfn, p_ptt,
	       PRS_REG_GFT_PROFILE_MASK_RAM +
	       RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
	       ramline.hi);
1112
}