qed_init_fw_funcs.c 57.0 KB
Newer Older
1
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2
/* QLogic qed NIC Driver
M
Mintz, Yuval 已提交
3
 * Copyright (c) 2015-2017  QLogic Corporation
4
 * Copyright (c) 2019-2021 Marvell International Ltd.
5 6 7
 */

#include <linux/types.h>
T
Tomer Tayar 已提交
8
#include <linux/crc8.h>
9 10 11 12 13 14 15
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
16
#include "qed_iro_hsi.h"
17 18
#include "qed_reg_addr.h"

19
#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
T
Tomer Tayar 已提交
20

S
Shai Malin 已提交
21
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
22 23 24
	{400, 336, 352, 368, 304, 384, 416, 352},	/* region 3 offsets */
	{528, 496, 416, 512, 448, 512, 544, 480},	/* region 4 offsets */
	{608, 544, 496, 576, 576, 592, 624, 560}	/* region 5 offsets */
T
Tomer Tayar 已提交
25 26
};

S
Shai Malin 已提交
27
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
T
Tomer Tayar 已提交
28 29 30
	{240, 240, 112, 0, 0, 0, 0, 96}	/* region 1 offsets */
};

M
Mintz, Yuval 已提交
31
/* General constants */
32 33 34 35 36
#define QM_PQ_MEM_4KB(pq_size)	(pq_size ? DIV_ROUND_UP((pq_size + 1) *	\
							QM_PQ_ELEMENT_SIZE, \
							0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size)	(pq_size ? DIV_ROUND_UP(pq_size, \
								0x100) - 1 : 0)
37 38
#define QM_INVALID_PQ_ID		0xffff

39 40 41
/* Max link speed (in Mbps) */
#define QM_MAX_LINK_SPEED               100000

M
Mintz, Yuval 已提交
42
/* Feature enable */
43 44 45
#define QM_BYPASS_EN	1
#define QM_BYTE_CRD_EN	1

46 47
/* Initial VOQ byte credit */
#define QM_INITIAL_VOQ_BYTE_CRD         98304
M
Mintz, Yuval 已提交
48
/* Other PQ constants */
49 50
#define QM_OTHER_PQS_PER_PF	4

51 52 53 54
/* VOQ constants */
#define MAX_NUM_VOQS	(MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
#define VOQS_BIT_MASK	(BIT(MAX_NUM_VOQS) - 1)

55
/* WFQ constants */
56

57 58 59 60 61 62 63 64 65 66 67 68 69 70
/* PF WFQ increment value, 0x9000 = 4*9*1024 */
#define QM_PF_WFQ_INC_VAL(weight)       ((weight) * 0x9000)

/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
#define QM_PF_WFQ_UPPER_BOUND           62500000

/* PF WFQ max increment value, 0.7 * upper bound */
#define QM_PF_WFQ_MAX_INC_VAL           ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)

/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
#define QM_PF_WFQ_CRD_E5_NUM_VOQS       16

/* VP WFQ increment value */
#define QM_VP_WFQ_INC_VAL(weight)       ((weight) * QM_VP_WFQ_MIN_INC_VAL)
71

72 73
/* VP WFQ min increment value */
#define QM_VP_WFQ_MIN_INC_VAL           10800
74

75 76
/* VP WFQ max increment value, 2^30 */
#define QM_VP_WFQ_MAX_INC_VAL           0x40000000
77

78 79
/* VP WFQ bypass threshold */
#define QM_VP_WFQ_BYPASS_THRESH         (QM_VP_WFQ_MIN_INC_VAL - 100)
80

81 82 83 84 85 86 87 88
/* VP RL credit task cost */
#define QM_VP_RL_CRD_TASK_COST          9700

/* Bit of VOQ in VP WFQ PQ map */
#define QM_VP_WFQ_PQ_VOQ_SHIFT          0

/* Bit of PF in VP WFQ PQ map */
#define QM_VP_WFQ_PQ_PF_SHIFT   5
Y
Yuval Mintz 已提交
89

90
/* RL constants */
91 92 93 94 95 96 97 98

/* Period in us */
#define QM_RL_PERIOD	5

/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M	(25 * QM_RL_PERIOD)

/* RL increment value - rate is specified in mbps */
99 100 101 102 103 104 105
#define QM_RL_INC_VAL(rate)                     ({	\
						typeof(rate) __rate = (rate); \
						max_t(u32,		\
						(u32)(((__rate ? __rate : \
						100000) *		\
						QM_RL_PERIOD *		\
						101) / (8 * 100)), 1); })
106 107

/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
T
Tomer Tayar 已提交
108
#define QM_PF_RL_UPPER_BOUND	62500000
109 110

/* Max PF RL increment value is 0.7 * upper bound */
T
Tomer Tayar 已提交
111 112
#define QM_PF_RL_MAX_INC_VAL	((QM_PF_RL_UPPER_BOUND * 7) / 10)

113 114 115 116 117 118 119
/* QCN RL Upper bound, speed is in Mpbs */
#define QM_GLOBAL_RL_UPPER_BOUND(speed)         ((u32)max_t( \
		u32,					    \
		(u32)(((speed) *			    \
		       QM_RL_PERIOD * 101) / (8 * 100)),    \
		QM_VP_RL_CRD_TASK_COST			    \
		+ 1000))
120

121
/* AFullOprtnstcCrdMask constants */
122 123 124 125
#define QM_OPPOR_LINE_VOQ_DEF	1
#define QM_OPPOR_FW_STOP_DEF	0
#define QM_OPPOR_PQ_EMPTY_DEF	1

126
/* Command Queue constants */
127 128 129 130 131 132 133 134 135 136 137 138 139 140

/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES	150

#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
	 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))

#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
	 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))

141 142 143
/* Returns the VOQ line credit for the specified number of PBF command lines.
 * PBF lines are specified in 256b units.
 */
144 145 146
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
	((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)

147
/* BTB: blocks constants (block size = 256B) */
148 149 150 151 152 153 154 155 156 157 158

/* 256B blocks in 9700B packet */
#define BTB_JUMBO_PKT_BLOCKS	38

/* Headroom per-port */
#define BTB_HEADROOM_BLOCKS	BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR	10

/* Factored (hence really 0.7) */
#define BTB_PURE_LB_RATIO	7

159
/* QM stop command constants */
160 161 162 163 164 165 166 167 168 169 170 171 172 173
#define QM_STOP_PQ_MASK_WIDTH		32
#define QM_STOP_CMD_ADDR		2
#define QM_STOP_CMD_STRUCT_SIZE		2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET	0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT	0
#define QM_STOP_CMD_PAUSE_MASK_MASK	-1
#define QM_STOP_CMD_GROUP_ID_OFFSET	1
#define QM_STOP_CMD_GROUP_ID_SHIFT	16
#define QM_STOP_CMD_GROUP_ID_MASK	15
#define QM_STOP_CMD_PQ_TYPE_OFFSET	1
#define QM_STOP_CMD_PQ_TYPE_SHIFT	24
#define QM_STOP_CMD_PQ_TYPE_MASK	1
#define QM_STOP_CMD_MAX_POLL_COUNT	100
#define QM_STOP_CMD_POLL_PERIOD_US	500
M
Mintz, Yuval 已提交
174

175
/* QM command macros */
176 177 178 179 180
#define QM_CMD_STRUCT_SIZE(cmd)	cmd ## _STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
	SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
		  cmd ## _ ## field, \
		  value)
T
Tomer Tayar 已提交
181

S
Shai Malin 已提交
182
#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid,	      \
183 184
			  rl_id, ext_voq, wrr)				      \
	do {								      \
185
		u32 __reg = 0;						      \
186
									      \
187
		BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg));	      \
188
		memset(&(map), 0, sizeof(map));				      \
S
Shai Malin 已提交
189 190
		SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1);	      \
		SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID,	      \
191
			  !!(rl_valid));				      \
S
Shai Malin 已提交
192 193 194 195
		SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
		SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id));	      \
		SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq));	      \
		SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,      \
196 197 198
			  (wrr));					      \
									      \
		STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id),    \
199 200
			     __reg);					      \
		(map).reg = cpu_to_le32(__reg);				      \
T
Tomer Tayar 已提交
201 202 203 204 205
	} while (0)

#define WRITE_PQ_INFO_TO_RAM	1
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
	(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
206 207 208
	((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
	(((rl) >> 8) << 9))

T
Tomer Tayar 已提交
209
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
210 211
	(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
	XSTORM_PQ_INFO_OFFSET(pq_id))
T
Tomer Tayar 已提交
212

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
static const char * const s_protocol_types[] = {
	"PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE",
	"PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP",
	"PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON",
	"PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI",
};

static const char *s_ramrod_cmd_ids[][28] = {
	{
	"ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC",
	 "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC",
	 "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN",
	 "ISCSI_RAMROD_CMD_ID_UPDATE_CONN",
	 "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN",
	 "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE",
	 "ISCSI_RAMROD_CMD_ID_CONN_STATS", },
	{ "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC",
	 "FCOE_RAMROD_CMD_ID_STAT_FUNC",
	 "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN",
	 "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", },
	{ "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
	 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
	 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
	 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
	 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
	 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
	 "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP",
	 "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP",
	 "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP",
	 "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE",
	 "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP",
	 "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP",
	 "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP",
	 "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", },
	{ "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START",
	 "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP",
	 "CORE_RAMROD_TX_QUEUE_STOP",
	 "CORE_RAMROD_RX_QUEUE_FLUSH",
	 "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", },
	{ "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START",
	 "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP",
	 "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP",
	 "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP",
	 "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE",
	 "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION",
	 "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER",
	 "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER",
	 "ETH_RAMROD_RX_ADD_UDP_FILTER",
	 "ETH_RAMROD_RX_DELETE_UDP_FILTER",
	 "ETH_RAMROD_RX_CREATE_GFT_ACTION",
	 "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE",
	 "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL",
	 "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL",
	 "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", },
	{ "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
	 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
	 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
	 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
	 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
	 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
	 "RDMA_RAMROD_STOP_NS_TRACKING",
	 "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD",
	 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD",
	 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR",
	 "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP",
	 "IWARP_RAMROD_CMD_ID_MODIFY_QP",
	 "IWARP_RAMROD_CMD_ID_DESTROY_QP",
	 "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", },
	{ NULL }, /*TOE*/
	{ NULL }, /*PREROCE*/
	{ "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START",
	     "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START",
	     "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE",
	     "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", }
};

289
/******************** INTERNAL IMPLEMENTATION *********************/
290

T
Tomer Tayar 已提交
291 292 293 294 295 296 297 298 299 300
/* Returns the external VOQ number */
static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
			  u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
{
	if (tc == PURE_LB_TC)
		return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
	else
		return port_id * max_phys_tcs_per_port + tc;
}

301
/* Prepare PF RL enable/disable runtime init values */
Y
Yuval Mintz 已提交
302
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
303 304 305
{
	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
	if (pf_rl_en) {
S
Shai Malin 已提交
306
		u8 num_ext_voqs = MAX_NUM_VOQS;
T
Tomer Tayar 已提交
307 308
		u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;

M
Mintz, Yuval 已提交
309
		/* Enable RLs for all VOQs */
T
Tomer Tayar 已提交
310 311 312 313
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLPFVOQENABLE_RT_OFFSET,
			     (u32)voq_bit_mask);

M
Mintz, Yuval 已提交
314
		/* Write RL period */
315
		STORE_RT_REG(p_hwfn,
Y
Yuval Mintz 已提交
316
			     QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
317 318 319
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLPFPERIODTIMER_RT_OFFSET,
			     QM_RL_PERIOD_CLK_25M);
M
Mintz, Yuval 已提交
320 321

		/* Set credit threshold for QM bypass flow */
322 323 324
		if (QM_BYPASS_EN)
			STORE_RT_REG(p_hwfn,
				     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
T
Tomer Tayar 已提交
325
				     QM_PF_RL_UPPER_BOUND);
326 327 328 329
	}
}

/* Prepare PF WFQ enable/disable runtime init values */
Y
Yuval Mintz 已提交
330
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
331 332
{
	STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
M
Mintz, Yuval 已提交
333 334

	/* Set credit threshold for QM bypass flow */
335 336 337
	if (pf_wfq_en && QM_BYPASS_EN)
		STORE_RT_REG(p_hwfn,
			     QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
338
			     QM_PF_WFQ_UPPER_BOUND);
339 340
}

341 342
/* Prepare global RL enable/disable runtime init values */
static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
343 344
{
	STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
345 346
		     global_rl_en ? 1 : 0);
	if (global_rl_en) {
M
Mintz, Yuval 已提交
347
		/* Write RL period (use timer 0 only) */
348 349 350 351 352 353
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
			     QM_RL_PERIOD_CLK_25M);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
			     QM_RL_PERIOD_CLK_25M);
M
Mintz, Yuval 已提交
354 355

		/* Set credit threshold for QM bypass flow */
356 357 358
		if (QM_BYPASS_EN)
			STORE_RT_REG(p_hwfn,
				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
359
				     QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
360 361 362 363
	}
}

/* Prepare VPORT WFQ enable/disable runtime init values */
Y
Yuval Mintz 已提交
364
static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
365 366 367
{
	STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
		     vport_wfq_en ? 1 : 0);
M
Mintz, Yuval 已提交
368 369

	/* Set credit threshold for QM bypass flow */
370 371 372
	if (vport_wfq_en && QM_BYPASS_EN)
		STORE_RT_REG(p_hwfn,
			     QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
373
			     QM_VP_WFQ_BYPASS_THRESH);
374 375 376
}

/* Prepare runtime init values to allocate PBF command queue lines for
M
Mintz, Yuval 已提交
377
 * the specified VOQ.
378 379
 */
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
T
Tomer Tayar 已提交
380
				       u8 ext_voq, u16 cmdq_lines)
381
{
T
Tomer Tayar 已提交
382
	u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
383

T
Tomer Tayar 已提交
384
	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
385
			 (u32)cmdq_lines);
T
Tomer Tayar 已提交
386 387 388
	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
		     qm_line_crd);
	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
389 390 391 392
		     qm_line_crd);
}

/* Prepare runtime init values to allocate PBF command queue lines. */
393 394 395 396 397
static void
qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
		       u8 max_ports_per_engine,
		       u8 max_phys_tcs_per_port,
		       struct init_qm_port_params port_params[MAX_NUM_PORTS])
398
{
T
Tomer Tayar 已提交
399
	u8 tc, ext_voq, port_id, num_tcs_in_port;
S
Shai Malin 已提交
400
	u8 num_ext_voqs = MAX_NUM_VOQS;
T
Tomer Tayar 已提交
401 402 403 404

	/* Clear PBF lines of all VOQs */
	for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
405 406

	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
T
Tomer Tayar 已提交
407 408 409 410
		u16 phys_lines, phys_lines_per_tc;

		if (!port_params[port_id].active)
			continue;
Y
Yuval Mintz 已提交
411

T
Tomer Tayar 已提交
412
		/* Find number of command queue lines to divide between the
413
		 * active physical TCs.
T
Tomer Tayar 已提交
414 415 416
		 */
		phys_lines = port_params[port_id].num_pbf_cmd_lines;
		phys_lines -= PBF_CMDQ_PURE_LB_LINES;
Y
Yuval Mintz 已提交
417

T
Tomer Tayar 已提交
418 419 420 421 422 423 424
		/* Find #lines per active physical TC */
		num_tcs_in_port = 0;
		for (tc = 0; tc < max_phys_tcs_per_port; tc++)
			if (((port_params[port_id].active_phys_tcs >>
			      tc) & 0x1) == 1)
				num_tcs_in_port++;
		phys_lines_per_tc = phys_lines / num_tcs_in_port;
Y
Yuval Mintz 已提交
425

T
Tomer Tayar 已提交
426 427 428 429 430 431 432 433 434 435
		/* Init registers per active TC */
		for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
			ext_voq = qed_get_ext_voq(p_hwfn,
						  port_id,
						  tc, max_phys_tcs_per_port);
			if (((port_params[port_id].active_phys_tcs >>
			      tc) & 0x1) == 1)
				qed_cmdq_lines_voq_rt_init(p_hwfn,
							   ext_voq,
							   phys_lines_per_tc);
436
		}
T
Tomer Tayar 已提交
437 438 439 440 441

		/* Init registers for pure LB TC */
		ext_voq = qed_get_ext_voq(p_hwfn,
					  port_id,
					  PURE_LB_TC, max_phys_tcs_per_port);
442 443
		qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
					   PBF_CMDQ_PURE_LB_LINES);
444 445 446
	}
}

447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
/* Prepare runtime init values to allocate guaranteed BTB blocks for the
 * specified port. The guaranteed BTB space is divided between the TCs as
 * follows (shared space Is currently not used):
 * 1. Parameters:
 *    B - BTB blocks for this port
 *    C - Number of physical TCs for this port
 * 2. Calculation:
 *    a. 38 blocks (9700B jumbo frame) are allocated for global per port
 *	 headroom.
 *    b. B = B - 38 (remainder after global headroom allocation).
 *    c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
 *    d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
 *    e. B/C blocks are allocated for each physical TC.
 * Assumptions:
 * - MTU is up to 9700 bytes (38 blocks)
 * - All TCs are considered symmetrical (same rate and packet size)
 * - No optimization for lossy TC (all are considered lossless). Shared space
 *   is not enabled and allocated for each TC.
 */
466 467 468 469 470
static void
qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
		       u8 max_ports_per_engine,
		       u8 max_phys_tcs_per_port,
		       struct init_qm_port_params port_params[MAX_NUM_PORTS])
471 472
{
	u32 usable_blocks, pure_lb_blocks, phys_blocks;
T
Tomer Tayar 已提交
473
	u8 tc, ext_voq, port_id, num_tcs_in_port;
474 475 476 477 478

	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
		if (!port_params[port_id].active)
			continue;

M
Mintz, Yuval 已提交
479
		/* Subtract headroom blocks */
480 481 482
		usable_blocks = port_params[port_id].num_btb_blocks -
				BTB_HEADROOM_BLOCKS;

T
Tomer Tayar 已提交
483 484 485
		/* Find blocks per physical TC. Use factor to avoid floating
		 * arithmethic.
		 */
Y
Yuval Mintz 已提交
486
		num_tcs_in_port = 0;
T
Tomer Tayar 已提交
487
		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
Y
Yuval Mintz 已提交
488 489 490 491
			if (((port_params[port_id].active_phys_tcs >>
			      tc) & 0x1) == 1)
				num_tcs_in_port++;

492
		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
Y
Yuval Mintz 已提交
493
				 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
494 495 496
				  BTB_PURE_LB_RATIO);
		pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
				       pure_lb_blocks / BTB_PURE_LB_FACTOR);
Y
Yuval Mintz 已提交
497 498
		phys_blocks = (usable_blocks - pure_lb_blocks) /
			      num_tcs_in_port;
499

M
Mintz, Yuval 已提交
500
		/* Init physical TCs */
Y
Yuval Mintz 已提交
501 502
		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
			if (((port_params[port_id].active_phys_tcs >>
T
Tomer Tayar 已提交
503 504 505 506 507 508 509 510 511 512
			      tc) & 0x1) == 1) {
				ext_voq =
					qed_get_ext_voq(p_hwfn,
							port_id,
							tc,
							max_phys_tcs_per_port);
				STORE_RT_REG(p_hwfn,
					     PBF_BTB_GUARANTEED_RT_OFFSET
					     (ext_voq), phys_blocks);
			}
513 514
		}

M
Mintz, Yuval 已提交
515
		/* Init pure LB TC */
T
Tomer Tayar 已提交
516 517 518 519
		ext_voq = qed_get_ext_voq(p_hwfn,
					  port_id,
					  PURE_LB_TC, max_phys_tcs_per_port);
		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
520 521 522 523
			     pure_lb_blocks);
	}
}

524 525 526 527 528 529
/* Prepare runtime init values for the specified RL.
 * Set max link speed (100Gbps) per rate limiter.
 * Return -1 on error.
 */
static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
{
530
	u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
			  (u32)QM_RL_CRD_REG_SIGN_BIT;
	u32 inc_val;
	u16 rl_id;

	/* Go over all global RLs */
	for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
		inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);

		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
			     (u32)QM_RL_CRD_REG_SIGN_BIT);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
			     upper_bound);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
	}

	return 0;
}

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
/* Returns the upper bound for the specified Vport RL parameters.
 * link_speed is in Mbps.
 * Returns 0 in case of error.
 */
static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
					u32 link_speed)
{
	switch (vport_rl_type) {
	case QM_RL_TYPE_NORMAL:
		return QM_INITIAL_VOQ_BYTE_CRD;
	case QM_RL_TYPE_QCN:
		return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
	default:
		return 0;
	}
}

/* Prepare VPORT RL runtime init values.
 * Return -1 on error.
 */
static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
				u16 start_rl,
				u16 num_rls,
				u32 link_speed,
				struct init_qm_rl_params *rl_params)
{
	u16 i, rl_id;

	if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
		DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
		return -1;
	}

	/* Go over all PF VPORTs */
	for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
		u32 upper_bound, inc_val;

		upper_bound =
		    qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
						 rl_params[i].vport_rl_type,
						 link_speed);

		inc_val =
		    QM_RL_INC_VAL(rl_params[i].vport_rl ?
				  rl_params[i].vport_rl : link_speed);
		if (inc_val > upper_bound) {
			DP_NOTICE(p_hwfn,
				  "Invalid RL rate - limit configuration\n");
			return -1;
		}

		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
			     (u32)QM_RL_CRD_REG_SIGN_BIT);
		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
			     upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
			     inc_val);
	}

	return 0;
}

614
/* Prepare Tx PQ mapping runtime init values for the specified PF */
615 616 617 618
static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
				 struct qed_ptt *p_ptt,
				 struct qed_qm_pf_rt_init_params *p_params,
				 u32 base_mem_addr_4kb)
619 620
{
	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
T
Tomer Tayar 已提交
621
	struct init_qm_vport_params *vport_params = p_params->vport_params;
M
Mintz, Yuval 已提交
622
	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
T
Tomer Tayar 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635
	u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
	struct init_qm_pq_params *pq_params = p_params->pq_params;
	u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;

	num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;

	first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
	last_pq_group = (p_params->start_pq + num_pqs - 1) /
			QM_PF_QUEUE_GROUP_SIZE;

	pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
	vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
	mem_addr_4kb = base_mem_addr_4kb;
636

M
Mintz, Yuval 已提交
637
	/* Set mapping from PQ group to PF */
638 639 640
	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
			     (u32)(p_params->pf_id));
T
Tomer Tayar 已提交
641

M
Mintz, Yuval 已提交
642
	/* Set PQ sizes */
643 644 645 646 647
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
		     QM_PQ_SIZE_256B(p_params->num_vf_cids));

M
Mintz, Yuval 已提交
648
	/* Go over all Tx PQs */
649
	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
650
		u16 *p_first_tx_pq_id, vport_id_in_pf;
S
Shai Malin 已提交
651
		struct qm_rf_pq_map tx_pq_map;
652 653 654
		u8 tc_id = pq_params[i].tc_id;
		bool is_vf_pq;
		u8 ext_voq;
655

T
Tomer Tayar 已提交
656
		ext_voq = qed_get_ext_voq(p_hwfn,
M
Michal Kalderon 已提交
657
					  pq_params[i].port_id,
T
Tomer Tayar 已提交
658 659 660
					  tc_id,
					  p_params->max_phys_tcs_per_port);
		is_vf_pq = (i >= p_params->num_pf_pqs);
M
Mintz, Yuval 已提交
661

M
Mintz, Yuval 已提交
662
		/* Update first Tx PQ of VPORT/TC */
T
Tomer Tayar 已提交
663 664 665 666 667
		vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
		p_first_tx_pq_id =
		    &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
		if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
			u32 map_val =
668 669
				(ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
				(p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
670

M
Mintz, Yuval 已提交
671
			/* Create new VP PQ */
T
Tomer Tayar 已提交
672
			*p_first_tx_pq_id = pq_id;
M
Mintz, Yuval 已提交
673 674

			/* Map VP PQ to VOQ and PF */
675 676
			STORE_RT_REG(p_hwfn,
				     QM_REG_WFQVPMAP_RT_OFFSET +
T
Tomer Tayar 已提交
677 678
				     *p_first_tx_pq_id,
				     map_val);
679
		}
M
Mintz, Yuval 已提交
680

T
Tomer Tayar 已提交
681 682 683 684 685
		/* Prepare PQ map entry */
		QM_INIT_TX_PQ_MAP(p_hwfn,
				  tx_pq_map,
				  pq_id,
				  *p_first_tx_pq_id,
686 687
				  pq_params[i].rl_valid,
				  pq_params[i].rl_id,
T
Tomer Tayar 已提交
688 689 690
				  ext_voq, pq_params[i].wrr_group);

		/* Set PQ base address */
691 692 693
		STORE_RT_REG(p_hwfn,
			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
			     mem_addr_4kb);
M
Mintz, Yuval 已提交
694

T
Tomer Tayar 已提交
695 696 697 698 699 700 701 702 703 704 705 706 707 708
		/* Clear PQ pointer table entry (64 bit) */
		if (p_params->is_pf_loading)
			for (j = 0; j < 2; j++)
				STORE_RT_REG(p_hwfn,
					     QM_REG_PTRTBLTX_RT_OFFSET +
					     (pq_id * 2) + j, 0);

		/* Write PQ info to RAM */
		if (WRITE_PQ_INFO_TO_RAM != 0) {
			u32 pq_info = 0;

			pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
						  p_params->pf_id,
						  tc_id,
M
Michal Kalderon 已提交
709
						  pq_params[i].port_id,
710 711
						  pq_params[i].rl_valid,
						  pq_params[i].rl_id);
T
Tomer Tayar 已提交
712 713 714 715
			qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
			       pq_info);
		}

M
Mintz, Yuval 已提交
716
		/* If VF PQ, add indication to PQ VF mask */
717
		if (is_vf_pq) {
M
Mintz, Yuval 已提交
718 719 720
			tx_pq_vf_mask[pq_id /
				      QM_PF_QUEUE_GROUP_SIZE] |=
			    BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
721 722 723 724 725 726
			mem_addr_4kb += vport_pq_mem_4kb;
		} else {
			mem_addr_4kb += pq_mem_4kb;
		}
	}

M
Mintz, Yuval 已提交
727 728 729 730 731
	/* Store Tx PQ VF mask to size select register */
	for (i = 0; i < num_tx_pq_vf_masks; i++)
		if (tx_pq_vf_mask[i])
			STORE_RT_REG(p_hwfn,
				     QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
Y
Yuval Mintz 已提交
732
				     tx_pq_vf_mask[i]);
733 734

	return 0;
735 736 737 738 739
}

/* Prepare Other PQ mapping runtime init values for the specified PF */
static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
				     u8 pf_id,
T
Tomer Tayar 已提交
740
				     bool is_pf_loading,
741
				     u32 num_pf_cids,
Y
Yuval Mintz 已提交
742
				     u32 num_tids, u32 base_mem_addr_4kb)
743
{
M
Mintz, Yuval 已提交
744
	u32 pq_size, pq_mem_4kb, mem_addr_4kb;
T
Tomer Tayar 已提交
745
	u16 i, j, pq_id, pq_group;
746

T
Tomer Tayar 已提交
747 748
	/* A single other PQ group is used in each PF, where PQ group i is used
	 * in PF i.
749
	 */
M
Mintz, Yuval 已提交
750 751 752 753
	pq_group = pf_id;
	pq_size = num_pf_cids + num_tids;
	pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
	mem_addr_4kb = base_mem_addr_4kb;
754

M
Mintz, Yuval 已提交
755
	/* Map PQ group to PF */
756 757
	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
		     (u32)(pf_id));
T
Tomer Tayar 已提交
758

M
Mintz, Yuval 已提交
759
	/* Set PQ sizes */
760 761
	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
		     QM_PQ_SIZE_256B(pq_size));
M
Mintz, Yuval 已提交
762

763 764
	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
T
Tomer Tayar 已提交
765
		/* Set PQ base address */
766 767 768
		STORE_RT_REG(p_hwfn,
			     QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
			     mem_addr_4kb);
T
Tomer Tayar 已提交
769 770 771 772 773 774 775 776

		/* Clear PQ pointer table entry */
		if (is_pf_loading)
			for (j = 0; j < 2; j++)
				STORE_RT_REG(p_hwfn,
					     QM_REG_PTRTBLOTHER_RT_OFFSET +
					     (pq_id * 2) + j, 0);

777 778 779 780 781 782 783 784 785 786 787
		mem_addr_4kb += pq_mem_4kb;
	}
}

/* Prepare PF WFQ runtime init values for the specified PF.
 * Return -1 on error.
 */
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
			      struct qed_qm_pf_rt_init_params *p_params)
{
	u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
T
Tomer Tayar 已提交
788 789 790
	struct init_qm_pq_params *pq_params = p_params->pq_params;
	u32 inc_val, crd_reg_offset;
	u8 ext_voq;
791 792
	u16 i;

793 794
	inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
	if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
795
		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
796 797 798 799
		return -1;
	}

	for (i = 0; i < num_tx_pqs; i++) {
T
Tomer Tayar 已提交
800
		ext_voq = qed_get_ext_voq(p_hwfn,
M
Michal Kalderon 已提交
801
					  pq_params[i].port_id,
T
Tomer Tayar 已提交
802 803 804 805 806 807 808 809
					  pq_params[i].tc_id,
					  p_params->max_phys_tcs_per_port);
		crd_reg_offset =
			(p_params->pf_id < MAX_NUM_PFS_BB ?
			 QM_REG_WFQPFCRD_RT_OFFSET :
			 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
			ext_voq * MAX_NUM_PFS_BB +
			(p_params->pf_id % MAX_NUM_PFS_BB);
810
		OVERWRITE_RT_REG(p_hwfn,
T
Tomer Tayar 已提交
811
				 crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
812 813
	}

Y
Yuval Mintz 已提交
814 815
	STORE_RT_REG(p_hwfn,
		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
816
		     QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
M
Mintz, Yuval 已提交
817 818
	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
		     inc_val);
T
Tomer Tayar 已提交
819

820 821 822 823 824 825
	return 0;
}

/* Prepare PF RL runtime init values for the specified PF.
 * Return -1 on error.
 */
Y
Yuval Mintz 已提交
826
static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
827 828 829
{
	u32 inc_val = QM_RL_INC_VAL(pf_rl);

T
Tomer Tayar 已提交
830
	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
831
		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
832 833
		return -1;
	}
T
Tomer Tayar 已提交
834 835 836 837 838 839 840

	STORE_RT_REG(p_hwfn,
		     QM_REG_RLPFCRD_RT_OFFSET + pf_id,
		     (u32)QM_RL_CRD_REG_SIGN_BIT);
	STORE_RT_REG(p_hwfn,
		     QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
		     QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
841
	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
T
Tomer Tayar 已提交
842

843 844 845 846 847 848 849
	return 0;
}

/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
 * Return -1 on error.
 */
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
850
			      u16 num_vports,
851 852
			      struct init_qm_vport_params *vport_params)
{
853
	u16 vport_pq_id, wfq, i;
854
	u32 inc_val;
855
	u8 tc;
856

M
Mintz, Yuval 已提交
857
	/* Go over all PF VPORTs */
Y
Yuval Mintz 已提交
858
	for (i = 0; i < num_vports; i++) {
T
Tomer Tayar 已提交
859
		/* Each VPORT can have several VPORT PQ IDs for various TCs */
860
		for (tc = 0; tc < NUM_OF_TCS; tc++) {
861
			/* Check if VPORT/TC is valid */
T
Tomer Tayar 已提交
862
			vport_pq_id = vport_params[i].first_tx_pq_id[tc];
863 864 865 866 867 868 869 870 871 872 873
			if (vport_pq_id == QM_INVALID_PQ_ID)
				continue;

			/* Find WFQ weight (per VPORT or per VPORT+TC) */
			wfq = vport_params[i].wfq;
			wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
			inc_val = QM_VP_WFQ_INC_VAL(wfq);
			if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
				DP_NOTICE(p_hwfn,
					  "Invalid VPORT WFQ weight configuration\n");
				return -1;
874
			}
875 876 877 878 879 880 881 882 883 884

			/* Config registers */
			STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
				     vport_pq_id,
				     (u32)QM_WFQ_CRD_REG_SIGN_BIT);
			STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
				     vport_pq_id,
				     inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
			STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
				     vport_pq_id, inc_val);
885 886 887 888 889 890 891 892 893 894 895
		}
	}

	return 0;
}

static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
				     struct qed_ptt *p_ptt)
{
	u32 reg_val, i;

T
Tomer Tayar 已提交
896
	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
897 898 899 900 901
	     i++) {
		udelay(QM_STOP_CMD_POLL_PERIOD_US);
		reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
	}

M
Mintz, Yuval 已提交
902
	/* Check if timeout while waiting for SDM command ready */
903 904 905 906 907 908 909 910 911 912 913
	if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
			   "Timeout when waiting for QM SDM command ready signal\n");
		return false;
	}

	return true;
}

static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
			    struct qed_ptt *p_ptt,
Y
Yuval Mintz 已提交
914
			    u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
915 916 917 918 919 920 921 922 923 924 925 926 927 928
{
	if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
		return false;

	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);

	return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}

/******************** INTERFACE IMPLEMENTATION *********************/
T
Tomer Tayar 已提交
929 930

u32 qed_qm_pf_mem_size(u32 num_pf_cids,
931
		       u32 num_vf_cids,
Y
Yuval Mintz 已提交
932
		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
933 934 935 936 937 938
{
	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
	       QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
	       QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
}

T
Tomer Tayar 已提交
939 940
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
			  struct qed_qm_common_rt_init_params *p_params)
941
{
942
	u32 mask = 0;
943

944 945 946 947
	/* Init AFullOprtnstcCrdMask */
	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
		  QM_OPPOR_LINE_VOQ_DEF);
	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
948 949 950 951 952 953
	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
		  p_params->pf_wfq_en ? 1 : 0);
	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
		  p_params->vport_wfq_en ? 1 : 0);
	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
		  p_params->pf_rl_en ? 1 : 0);
954
	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
955
		  p_params->global_rl_en ? 1 : 0);
956 957 958
	SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
	SET_FIELD(mask,
		  QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
959
	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
T
Tomer Tayar 已提交
960 961

	/* Enable/disable PF RL */
962
	qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
T
Tomer Tayar 已提交
963 964

	/* Enable/disable PF WFQ */
965
	qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
T
Tomer Tayar 已提交
966

967 968
	/* Enable/disable global RL */
	qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
T
Tomer Tayar 已提交
969 970

	/* Enable/disable VPORT WFQ */
971
	qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
T
Tomer Tayar 已提交
972 973

	/* Init PBF CMDQ line credit */
974 975 976 977
	qed_cmdq_lines_rt_init(p_hwfn,
			       p_params->max_ports_per_engine,
			       p_params->max_phys_tcs_per_port,
			       p_params->port_params);
T
Tomer Tayar 已提交
978 979

	/* Init BTB blocks in PBF */
980 981 982 983
	qed_btb_blocks_rt_init(p_hwfn,
			       p_params->max_ports_per_engine,
			       p_params->max_phys_tcs_per_port,
			       p_params->port_params);
T
Tomer Tayar 已提交
984

985 986
	qed_global_rl_rt_init(p_hwfn);

987 988 989 990 991 992 993 994 995 996 997
	return 0;
}

int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
		      struct qed_ptt *p_ptt,
		      struct qed_qm_pf_rt_init_params *p_params)
{
	struct init_qm_vport_params *vport_params = p_params->vport_params;
	u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
					       p_params->num_tids) *
				 QM_OTHER_PQS_PER_PF;
998 999 1000
	u16 i;
	u8 tc;

M
Mintz, Yuval 已提交
1001
	/* Clear first Tx PQ ID array for each VPORT */
1002 1003 1004 1005
	for (i = 0; i < p_params->num_vports; i++)
		for (tc = 0; tc < NUM_OF_TCS; tc++)
			vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;

M
Mintz, Yuval 已提交
1006
	/* Map Other PQs (if any) */
T
Tomer Tayar 已提交
1007 1008 1009 1010
	qed_other_pq_map_rt_init(p_hwfn,
				 p_params->pf_id,
				 p_params->is_pf_loading, p_params->num_pf_cids,
				 p_params->num_tids, 0);
1011

M
Mintz, Yuval 已提交
1012
	/* Map Tx PQs */
1013 1014
	if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
		return -1;
1015

T
Tomer Tayar 已提交
1016
	/* Init PF WFQ */
1017 1018 1019 1020
	if (p_params->pf_wfq)
		if (qed_pf_wfq_rt_init(p_hwfn, p_params))
			return -1;

T
Tomer Tayar 已提交
1021
	/* Init PF RL */
1022 1023 1024
	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
		return -1;

1025
	/* Init VPORT WFQ */
Y
Yuval Mintz 已提交
1026
	if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
1027 1028
		return -1;

1029 1030 1031 1032 1033 1034
	/* Set VPORT RL */
	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
				 p_params->num_rls, p_params->link_speed,
				 p_params->rl_params))
		return -1;

1035 1036 1037
	return 0;
}

1038
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
1039
		    struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
1040
{
1041
	u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
1042

1043
	if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
1044
		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
1045 1046 1047 1048
		return -1;
	}

	qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
T
Tomer Tayar 已提交
1049

1050 1051 1052
	return 0;
}

1053
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
1054
		   struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
1055 1056 1057
{
	u32 inc_val = QM_RL_INC_VAL(pf_rl);

T
Tomer Tayar 已提交
1058
	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
M
Mintz, Yuval 已提交
1059
		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
1060 1061 1062
		return -1;
	}

T
Tomer Tayar 已提交
1063 1064
	qed_wr(p_hwfn,
	       p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
1065 1066 1067 1068 1069
	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);

	return 0;
}

1070 1071
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
		       struct qed_ptt *p_ptt,
1072
		       u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
1073
{
1074
	int result = 0;
M
Mintz, Yuval 已提交
1075
	u16 vport_pq_id;
1076 1077
	u8 tc;

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
	for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
		vport_pq_id = first_tx_pq_id[tc];
		if (vport_pq_id != QM_INVALID_PQ_ID)
			result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
						       vport_pq_id, wfq);
	}

	return result;
}

int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
			  u16 first_tx_pq_id, u16 wfq)
{
	u32 inc_val;

	if (first_tx_pq_id == QM_INVALID_PQ_ID)
		return -1;

	inc_val = QM_VP_WFQ_INC_VAL(wfq);
	if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
1098
		DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
1099 1100 1101
		return -1;
	}

1102 1103 1104 1105 1106 1107
	qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
	       (u32)QM_WFQ_CRD_REG_SIGN_BIT);
	qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
	       inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
	qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
	       inc_val);
1108 1109 1110 1111

	return 0;
}

1112
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
1113 1114
		       struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
		       enum init_qm_rl_type vport_rl_type)
1115
{
1116
	u32 inc_val, upper_bound;
M
Mintz, Yuval 已提交
1117

1118 1119 1120 1121
	upper_bound =
	    (vport_rl_type ==
	     QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
	    QM_INITIAL_VOQ_BYTE_CRD;
1122
	inc_val = QM_RL_INC_VAL(rate_limit);
1123 1124
	if (inc_val > upper_bound) {
		DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
1125 1126 1127
		return -1;
	}

1128 1129
	qed_wr(p_hwfn, p_ptt,
	       QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
1130 1131 1132 1133
	qed_wr(p_hwfn,
	       p_ptt,
	       QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
	       upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
1134
	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
1135 1136 1137 1138 1139 1140 1141

	return 0;
}

bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
			  struct qed_ptt *p_ptt,
			  bool is_release_cmd,
Y
Yuval Mintz 已提交
1142
			  bool is_tx_pq, u16 start_pq, u16 num_pqs)
1143 1144
{
	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
T
Tomer Tayar 已提交
1145 1146 1147
	u32 pq_mask = 0, last_pq, pq_id;

	last_pq = start_pq + num_pqs - 1;
1148

M
Mintz, Yuval 已提交
1149
	/* Set command's PQ type */
1150 1151
	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);

T
Tomer Tayar 已提交
1152
	/* Go over requested PQs */
1153
	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
M
Mintz, Yuval 已提交
1154
		/* Set PQ bit in mask (stop command only) */
1155
		if (!is_release_cmd)
T
Tomer Tayar 已提交
1156
			pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
1157

M
Mintz, Yuval 已提交
1158
		/* If last PQ or end of PQ mask, write command */
1159 1160 1161
		if ((pq_id == last_pq) ||
		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
		     (QM_STOP_PQ_MASK_WIDTH - 1))) {
T
Tomer Tayar 已提交
1162 1163 1164 1165
			QM_CMD_SET_FIELD(cmd_arr,
					 QM_STOP_CMD, PAUSE_MASK, pq_mask);
			QM_CMD_SET_FIELD(cmd_arr,
					 QM_STOP_CMD,
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
					 GROUP_ID,
					 pq_id / QM_STOP_PQ_MASK_WIDTH);
			if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
					     cmd_arr[0], cmd_arr[1]))
				return false;
			pq_mask = 0;
		}
	}

	return true;
}
1177

T
Tomer Tayar 已提交
1178 1179 1180 1181 1182 1183 1184
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
	do { \
		typeof(var) *__p_var = &(var); \
		typeof(offset) __offset = offset; \
		*__p_var = (*__p_var & ~BIT(__offset)) | \
			   ((enable) ? BIT(__offset) : 0); \
	} while (0)
1185 1186 1187

#define PRS_ETH_TUNN_OUTPUT_FORMAT     0xF4DAB910
#define PRS_ETH_OUTPUT_FORMAT          0xFFFF4910
1188

1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
#define ARR_REG_WR(dev, ptt, addr, arr,	arr_size) \
	do { \
		u32 i; \
		\
		for (i = 0; i < (arr_size); i++) \
			qed_wr(dev, ptt, \
			       ((addr) + (4 * i)), \
			       ((u32 *)&(arr))[i]); \
	} while (0)

/**
1200 1201
 * qed_dmae_to_grc() - Internal function for writing from host to
 * wide-bus registers (split registers are not supported yet).
1202
 *
1203 1204 1205 1206 1207 1208 1209 1210
 * @p_hwfn: HW device data.
 * @p_ptt: PTT window used for writing the registers.
 * @p_data: Pointer to source data.
 * @addr: Destination register address.
 * @len_in_dwords: Data length in dwords (u32).
 *
 * Return: Length of the written data in dwords (u32) or -1 on invalid
 *         input.
1211
 */
1212
static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1213
			   __le32 *p_data, u32 addr, u32 len_in_dwords)
1214
{
1215
	struct qed_dmae_params params = { 0 };
1216
	u32 *data_cpu;
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
	int rc;

	if (!p_data)
		return -1;

	/* Set DMAE params */
	SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);

	/* Execute DMAE command */
	rc = qed_dmae_host2grc(p_hwfn, p_ptt,
			       (u64)(uintptr_t)(p_data),
			       addr, len_in_dwords, &params);

	/* If not read using DMAE, read using GRC */
	if (rc) {
		DP_VERBOSE(p_hwfn,
			   QED_MSG_DEBUG,
			   "Failed writing to chip using DMAE, using GRC instead\n");
1235 1236 1237 1238 1239 1240 1241

		/* Swap to CPU byteorder and write to registers using GRC */
		data_cpu = (__force u32 *)p_data;
		le32_to_cpu_array(data_cpu, len_in_dwords);

		ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords);
		cpu_to_le32_array(data_cpu, len_in_dwords);
1242 1243 1244 1245 1246
	}

	return len_in_dwords;
}

1247
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
1248
			     struct qed_ptt *p_ptt, u16 dest_port)
1249
{
T
Tomer Tayar 已提交
1250
	/* Update PRS register */
1251
	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
T
Tomer Tayar 已提交
1252 1253

	/* Update NIG register */
Y
Yuval Mintz 已提交
1254
	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
T
Tomer Tayar 已提交
1255 1256

	/* Update PBF register */
1257 1258 1259 1260
	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}

void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
1261
			  struct qed_ptt *p_ptt, bool vxlan_enable)
1262
{
T
Tomer Tayar 已提交
1263
	u32 reg_val;
1264 1265
	u8 shift;

T
Tomer Tayar 已提交
1266
	/* Update PRS register */
1267
	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1268 1269
	SET_FIELD(reg_val,
		  PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
1270
	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
M
Michal Kalderon 已提交
1271 1272
	if (reg_val) {
		reg_val =
1273
		    qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
M
Michal Kalderon 已提交
1274 1275 1276

		/* Update output  only if tunnel blocks not included. */
		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1277
			qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
M
Michal Kalderon 已提交
1278 1279
			       (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
	}
1280

T
Tomer Tayar 已提交
1281
	/* Update NIG register */
1282 1283
	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
T
Tomer Tayar 已提交
1284
	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
1285 1286
	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

T
Tomer Tayar 已提交
1287 1288 1289
	/* Update DORQ register */
	qed_wr(p_hwfn,
	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
1290 1291
}

T
Tomer Tayar 已提交
1292 1293
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
			struct qed_ptt *p_ptt,
1294 1295
			bool eth_gre_enable, bool ip_gre_enable)
{
T
Tomer Tayar 已提交
1296
	u32 reg_val;
1297 1298
	u8 shift;

T
Tomer Tayar 已提交
1299
	/* Update PRS register */
1300
	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1301 1302 1303 1304 1305 1306
	SET_FIELD(reg_val,
		  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
		  eth_gre_enable);
	SET_FIELD(reg_val,
		  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
		  ip_gre_enable);
1307
	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
M
Michal Kalderon 已提交
1308 1309
	if (reg_val) {
		reg_val =
1310
		    qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
M
Michal Kalderon 已提交
1311 1312 1313

		/* Update output  only if tunnel blocks not included. */
		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1314
			qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
M
Michal Kalderon 已提交
1315 1316
			       (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
	}
1317

T
Tomer Tayar 已提交
1318
	/* Update NIG register */
1319 1320
	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
T
Tomer Tayar 已提交
1321
	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
1322
	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
T
Tomer Tayar 已提交
1323
	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
1324 1325
	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

T
Tomer Tayar 已提交
1326 1327 1328 1329 1330 1331
	/* Update DORQ registers */
	qed_wr(p_hwfn,
	       p_ptt,
	       DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
	qed_wr(p_hwfn,
	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
1332 1333 1334
}

void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
Y
Yuval Mintz 已提交
1335
			      struct qed_ptt *p_ptt, u16 dest_port)
1336
{
T
Tomer Tayar 已提交
1337
	/* Update PRS register */
1338
	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
T
Tomer Tayar 已提交
1339 1340

	/* Update NIG register */
1341
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
T
Tomer Tayar 已提交
1342 1343

	/* Update PBF register */
1344 1345 1346 1347 1348
	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}

void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt,
Y
Yuval Mintz 已提交
1349
			   bool eth_geneve_enable, bool ip_geneve_enable)
1350
{
T
Tomer Tayar 已提交
1351
	u32 reg_val;
1352

T
Tomer Tayar 已提交
1353
	/* Update PRS register */
1354
	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1355 1356 1357 1358 1359 1360
	SET_FIELD(reg_val,
		  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
		  eth_geneve_enable);
	SET_FIELD(reg_val,
		  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
		  ip_geneve_enable);
1361
	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
M
Michal Kalderon 已提交
1362 1363
	if (reg_val) {
		reg_val =
1364
		    qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
M
Michal Kalderon 已提交
1365 1366 1367

		/* Update output  only if tunnel blocks not included. */
		if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1368
			qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
M
Michal Kalderon 已提交
1369 1370
			       (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
	}
1371

T
Tomer Tayar 已提交
1372
	/* Update NIG register */
1373 1374 1375 1376
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
	       eth_geneve_enable ? 1 : 0);
	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);

T
Tomer Tayar 已提交
1377
	/* EDPM with geneve tunnel not supported in BB */
1378 1379 1380
	if (QED_IS_BB_B0(p_hwfn->cdev))
		return;

T
Tomer Tayar 已提交
1381 1382 1383
	/* Update DORQ registers */
	qed_wr(p_hwfn,
	       p_ptt,
1384
	       DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
1385
	       eth_geneve_enable ? 1 : 0);
T
Tomer Tayar 已提交
1386 1387
	qed_wr(p_hwfn,
	       p_ptt,
1388
	       DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
1389 1390
	       ip_geneve_enable ? 1 : 0);
}
1391

1392
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET      3
1393
#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT   0xC8DAB910
M
Michal Kalderon 已提交
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412

void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
				struct qed_ptt *p_ptt, bool enable)
{
	u32 reg_val, cfg_mask;

	/* read PRS config register */
	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);

	/* set VXLAN_NO_L2_ENABLE mask */
	cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);

	if (enable) {
		/* set VXLAN_NO_L2_ENABLE flag */
		reg_val |= cfg_mask;

		/* update PRS FIC  register */
		qed_wr(p_hwfn,
		       p_ptt,
1413
		       PRS_REG_OUTPUT_FORMAT_4_0,
M
Michal Kalderon 已提交
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
		       (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
	} else {
		/* clear VXLAN_NO_L2_ENABLE flag */
		reg_val &= ~cfg_mask;
	}

	/* write PRS config register */
	qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
}

M
Mintz, Yuval 已提交
1424 1425
#define T_ETH_PACKET_ACTION_GFT_EVENTID  23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1426
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
M
Mintz, Yuval 已提交
1427
#define PARSER_ETH_CONN_CM_HDR 0
1428 1429 1430 1431
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)

T
Tomer Tayar 已提交
1432
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
1433
{
1434
	struct regpair ram_line = { 0 };
1435

T
Tomer Tayar 已提交
1436
	/* Disable gft search for PF */
1437
	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
T
Tomer Tayar 已提交
1438 1439 1440 1441

	/* Clean ram & cam for next gft session */

	/* Zero camline */
M
Mintz, Yuval 已提交
1442
	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
T
Tomer Tayar 已提交
1443 1444

	/* Zero ramline */
1445
	qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1446 1447
			PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
			sizeof(ram_line) / REG_SIZE);
1448 1449
}

T
Tomer Tayar 已提交
1450 1451 1452 1453 1454 1455 1456
void qed_gft_config(struct qed_hwfn *p_hwfn,
		    struct qed_ptt *p_ptt,
		    u16 pf_id,
		    bool tcp,
		    bool udp,
		    bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{
1457 1458 1459 1460
	struct regpair ram_line;
	u32 search_non_ip_as_gft;
	u32 reg_val, cam_line;
	u32 lo = 0, hi = 0;
1461 1462 1463

	if (!ipv6 && !ipv4)
		DP_NOTICE(p_hwfn,
T
Tomer Tayar 已提交
1464
			  "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1465 1466
	if (!tcp && !udp)
		DP_NOTICE(p_hwfn,
T
Tomer Tayar 已提交
1467 1468 1469
			  "gft_config: must accept at least on of - udp or tcp\n");
	if (profile_type >= MAX_GFT_PROFILE_TYPE)
		DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
1470

T
Tomer Tayar 已提交
1471 1472 1473 1474 1475
	/* Set RFS event ID to be awakened i Tstorm By Prs */
	reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
		  PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
	reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1476

T
Tomer Tayar 已提交
1477
	/* Do not load context only cid in PRS on match. */
1478 1479
	qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);

T
Tomer Tayar 已提交
1480 1481 1482 1483 1484 1485
	/* Do not use tenant ID exist bit for gft search */
	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);

	/* Set Cam */
	cam_line = 0;
	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1486

T
Tomer Tayar 已提交
1487 1488
	/* Filters are per PF!! */
	SET_FIELD(cam_line,
M
Mintz, Yuval 已提交
1489 1490
		  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
		  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
T
Tomer Tayar 已提交
1491 1492
	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);

1493
	if (!(tcp && udp)) {
T
Tomer Tayar 已提交
1494
		SET_FIELD(cam_line,
M
Mintz, Yuval 已提交
1495 1496
			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1497
		if (tcp)
T
Tomer Tayar 已提交
1498
			SET_FIELD(cam_line,
1499 1500 1501
				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
				  GFT_PROFILE_TCP_PROTOCOL);
		else
T
Tomer Tayar 已提交
1502
			SET_FIELD(cam_line,
1503 1504 1505 1506 1507
				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
				  GFT_PROFILE_UDP_PROTOCOL);
	}

	if (!(ipv4 && ipv6)) {
T
Tomer Tayar 已提交
1508
		SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1509
		if (ipv4)
T
Tomer Tayar 已提交
1510
			SET_FIELD(cam_line,
1511 1512 1513
				  GFT_CAM_LINE_MAPPED_IP_VERSION,
				  GFT_PROFILE_IPV4);
		else
T
Tomer Tayar 已提交
1514
			SET_FIELD(cam_line,
1515 1516 1517 1518
				  GFT_CAM_LINE_MAPPED_IP_VERSION,
				  GFT_PROFILE_IPV6);
	}

M
Mintz, Yuval 已提交
1519
	/* Write characteristics to cam */
1520
	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
T
Tomer Tayar 已提交
1521 1522 1523
	       cam_line);
	cam_line =
	    qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1524

M
Mintz, Yuval 已提交
1525
	/* Write line to RAM - compare to filter 4 tuple */
T
Tomer Tayar 已提交
1526

M
Michal Kalderon 已提交
1527 1528 1529
	/* Search no IP as GFT */
	search_non_ip_as_gft = 0;

M
Michal Kalderon 已提交
1530
	/* Tunnel type */
1531 1532
	SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
	SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
M
Michal Kalderon 已提交
1533

T
Tomer Tayar 已提交
1534
	if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1535 1536 1537 1538 1539 1540
		SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
		SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
		SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
		SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1);
		SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
T
Tomer Tayar 已提交
1541
	} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1542 1543 1544
		SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
		SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
M
Michal Kalderon 已提交
1545
	} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1546 1547
		SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
M
Michal Kalderon 已提交
1548
	} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1549 1550
		SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
		SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
M
Michal Kalderon 已提交
1551
	} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1552
		SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
M
Michal Kalderon 已提交
1553 1554 1555

		/* Allow tunneled traffic without inner IP */
		search_non_ip_as_gft = 1;
T
Tomer Tayar 已提交
1556 1557
	}

1558 1559 1560
	ram_line.lo = cpu_to_le32(lo);
	ram_line.hi = cpu_to_le32(hi);

M
Michal Kalderon 已提交
1561 1562
	qed_wr(p_hwfn,
	       p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
1563
	qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1564 1565
			PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
			sizeof(ram_line) / REG_SIZE);
M
Mintz, Yuval 已提交
1566 1567

	/* Set default profile so that no filter match will happen */
1568 1569 1570
	ram_line.lo = cpu_to_le32(0xffffffff);
	ram_line.hi = cpu_to_le32(0x3ff);
	qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
1571 1572 1573
			PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
			PRS_GFT_CAM_LINES_NO_MATCH,
			sizeof(ram_line) / REG_SIZE);
T
Tomer Tayar 已提交
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587

	/* Enable gft search */
	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}

DECLARE_CRC8_TABLE(cdu_crc8_table);

/* Calculate and return CDU validation byte per connection type/region/cid */
static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
{
	const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
	u8 crc, validation_byte = 0;
	static u8 crc8_table_valid; /* automatically initialized to 0 */
	u32 validation_string = 0;
1588
	__be32 data_to_crc;
T
Tomer Tayar 已提交
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609

	if (!crc8_table_valid) {
		crc8_populate_msb(cdu_crc8_table, 0x07);
		crc8_table_valid = 1;
	}

	/* The CRC is calculated on the String-to-compress:
	 * [31:8]  = {CID[31:20],CID[11:0]}
	 * [7:4]   = Region
	 * [3:0]   = Type
	 */
	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
		validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);

	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
		validation_string |= ((region & 0xF) << 4);

	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
		validation_string |= (conn_type & 0xF);

	/* Convert to big-endian and calculate CRC8 */
1610 1611 1612
	data_to_crc = cpu_to_be32(validation_string);
	crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
		   CRC8_INIT_VALUE);
T
Tomer Tayar 已提交
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723

	/* The validation byte [7:0] is composed:
	 * for type A validation
	 * [7]          = active configuration bit
	 * [6:0]        = crc[6:0]
	 *
	 * for type B validation
	 * [7]          = active configuration bit
	 * [6:3]        = connection_type[3:0]
	 * [2:0]        = crc[2:0]
	 */
	validation_byte |=
	    ((validation_cfg >>
	      CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;

	if ((validation_cfg >>
	     CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
		validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
	else
		validation_byte |= crc & 0x7F;

	return validation_byte;
}

/* Calcualte and set validation bytes for session context */
void qed_calc_session_ctx_validation(void *p_ctx_mem,
				     u16 ctx_size, u8 ctx_type, u32 cid)
{
	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;

	p_ctx = (u8 * const)p_ctx_mem;
	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];

	memset(p_ctx, 0, ctx_size);

	*x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
	*t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
	*u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
}

/* Calcualte and set validation bytes for task context */
void qed_calc_task_ctx_validation(void *p_ctx_mem,
				  u16 ctx_size, u8 ctx_type, u32 tid)
{
	u8 *p_ctx, *region1_val_ptr;

	p_ctx = (u8 * const)p_ctx_mem;
	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];

	memset(p_ctx, 0, ctx_size);

	*region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
}

/* Memset session context to 0 while preserving validation bytes */
void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
{
	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
	u8 x_val, t_val, u_val;

	p_ctx = (u8 * const)p_ctx_mem;
	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];

	x_val = *x_val_ptr;
	t_val = *t_val_ptr;
	u_val = *u_val_ptr;

	memset(p_ctx, 0, ctx_size);

	*x_val_ptr = x_val;
	*t_val_ptr = t_val;
	*u_val_ptr = u_val;
}

/* Memset task context to 0 while preserving validation bytes */
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
{
	u8 *p_ctx, *region1_val_ptr;
	u8 region1_val;

	p_ctx = (u8 * const)p_ctx_mem;
	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];

	region1_val = *region1_val_ptr;

	memset(p_ctx, 0, ctx_size);

	*region1_val_ptr = region1_val;
}

/* Enable and configure context validation */
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
				   struct qed_ptt *p_ptt)
{
	u32 ctx_validation;

	/* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);

	/* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);

	/* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
	qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1724
}
M
Michal Kalderon 已提交
1725

1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
const char *qed_get_protocol_type_str(u32 protocol_type)
{
	if (protocol_type >= ARRAY_SIZE(s_protocol_types))
		return "Invalid protocol type";

	return s_protocol_types[protocol_type];
}

const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id)
{
	const char *ramrod_cmd_id_str;

	if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids))
		return "Invalid protocol type";

	if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0]))
		return "Invalid Ramrod command ID";

	ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id];

	if (!ramrod_cmd_id_str)
		return "Invalid Ramrod command ID";

	return ramrod_cmd_id_str;
}

M
Michal Kalderon 已提交
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
{
	switch (storm_id) {
	case 0:
		return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
	case 1:
		return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
	case 2:
		return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
	case 3:
		return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
	case 4:
		return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
	case 5:
		return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);

	default:
		return 0;
	}
}

void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
			      struct qed_ptt *p_ptt,
			      u8 assert_level[NUM_STORMS])
{
	u8 storm_id;

	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
		u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);

		qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
	}
}
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851

#define PHYS_ADDR_DWORDS        DIV_ROUND_UP(sizeof(dma_addr_t), 4)
#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)

static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
{
	switch (storm_id) {
	case 0:
		return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    TSTORM_OVERLAY_BUF_ADDR_OFFSET;
	case 1:
		return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    MSTORM_OVERLAY_BUF_ADDR_OFFSET;
	case 2:
		return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    USTORM_OVERLAY_BUF_ADDR_OFFSET;
	case 3:
		return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    XSTORM_OVERLAY_BUF_ADDR_OFFSET;
	case 4:
		return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    YSTORM_OVERLAY_BUF_ADDR_OFFSET;
	case 5:
		return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
		    PSTORM_OVERLAY_BUF_ADDR_OFFSET;

	default:
		return 0;
	}
}

struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
					       const u32 * const
					       fw_overlay_in_buf,
					       u32 buf_size_in_bytes)
{
	u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
	struct phys_mem_desc *allocated_mem;

	if (!buf_size)
		return NULL;

	allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
				GFP_KERNEL);
	if (!allocated_mem)
		return NULL;

	memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));

	/* For each Storm, set physical address in RAM */
	while (buf_offset < buf_size) {
		struct phys_mem_desc *storm_mem_desc;
		struct fw_overlay_buf_hdr *hdr;
		u32 storm_buf_size;
		u8 storm_id;

		hdr =
		    (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
		storm_buf_size = GET_FIELD(hdr->data,
					   FW_OVERLAY_BUF_HDR_BUF_SIZE);
		storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
1852 1853
		if (storm_id >= NUM_STORMS)
			break;
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
		storm_mem_desc = allocated_mem + storm_id;
		storm_mem_desc->size = storm_buf_size * sizeof(u32);

		/* Allocate physical memory for Storm's overlays buffer */
		storm_mem_desc->virt_addr =
		    dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
				       storm_mem_desc->size,
				       &storm_mem_desc->phys_addr, GFP_KERNEL);
		if (!storm_mem_desc->virt_addr)
			break;

		/* Skip overlays buffer header */
		buf_offset += OVERLAY_HDR_SIZE_DWORDS;

		/* Copy Storm's overlays buffer to allocated memory */
		memcpy(storm_mem_desc->virt_addr,
		       &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);

		/* Advance to next Storm */
		buf_offset += storm_buf_size;
	}

	/* If memory allocation has failed, free all allocated memory */
	if (buf_offset < buf_size) {
1878
		qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
		return NULL;
	}

	return allocated_mem;
}

void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
			     struct qed_ptt *p_ptt,
			     struct phys_mem_desc *fw_overlay_mem)
{
	u8 storm_id;

	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
		struct phys_mem_desc *storm_mem_desc =
		    (struct phys_mem_desc *)fw_overlay_mem + storm_id;
		u32 ram_addr, i;

		/* Skip Storms with no FW overlays */
		if (!storm_mem_desc->virt_addr)
			continue;

		/* Calculate overlay RAM GRC address of current PF */
		ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
			   sizeof(dma_addr_t) * p_hwfn->rel_pf_id;

		/* Write Storm's overlay physical address to RAM */
		for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
			qed_wr(p_hwfn, p_ptt, ram_addr,
			       ((u32 *)&storm_mem_desc->phys_addr)[i]);
	}
}

void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
1912
			     struct phys_mem_desc **fw_overlay_mem)
1913 1914 1915
{
	u8 storm_id;

1916
	if (!fw_overlay_mem || !(*fw_overlay_mem))
1917 1918 1919 1920
		return;

	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
		struct phys_mem_desc *storm_mem_desc =
1921
		    (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931

		/* Free Storm's physical memory */
		if (storm_mem_desc->virt_addr)
			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
					  storm_mem_desc->size,
					  storm_mem_desc->virt_addr,
					  storm_mem_desc->phys_addr);
	}

	/* Free allocated virtual memory */
1932 1933
	kfree(*fw_overlay_mem);
	*fw_overlay_mem = NULL;
1934
}