qed_sp_commands.c 16.4 KB
Newer Older
1
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2
/* QLogic qed NIC Driver
M
Mintz, Yuval 已提交
3
 * Copyright (c) 2015-2017  QLogic Corporation
4
 * Copyright (c) 2019-2020 Marvell International Ltd.
5 6 7 8 9 10 11 12 13 14 15
 */

#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
16
#include "qed_dcbx.h"
17 18 19 20 21
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
Y
Yuval Mintz 已提交
22
#include "qed_sriov.h"
23

24 25 26 27 28 29 30 31 32 33 34 35 36
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
			    struct qed_spq_entry *p_ent)
{
	/* qed_spq_get_entry() can either get an entry from the free_pool,
	 * or, if no entries are left, allocate a new entry and add it to
	 * the unlimited_pending list.
	 */
	if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
		kfree(p_ent);
	else
		qed_spq_return_entry(p_hwfn, p_ent);
}

37 38
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
			struct qed_spq_entry **pp_ent,
Y
Yuval Mintz 已提交
39
			u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
40
{
41
	u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
42
	struct qed_spq_entry *p_ent = NULL;
43
	int rc;
44 45 46 47 48 49

	if (!pp_ent)
		return -ENOMEM;

	rc = qed_spq_get_entry(p_hwfn, pp_ent);

Y
Yuval Mintz 已提交
50
	if (rc)
51 52 53 54 55 56 57 58 59
		return rc;

	p_ent = *pp_ent;

	p_ent->elem.hdr.cid		= cpu_to_le32(opaque_cid);
	p_ent->elem.hdr.cmd_id		= cmd;
	p_ent->elem.hdr.protocol_id	= protocol;

	p_ent->priority		= QED_SPQ_PRIORITY_NORMAL;
60
	p_ent->comp_mode	= p_data->comp_mode;
61 62 63 64 65 66 67 68
	p_ent->comp_done.done	= 0;

	switch (p_ent->comp_mode) {
	case QED_SPQ_MODE_EBLOCK:
		p_ent->comp_cb.cookie = &p_ent->comp_done;
		break;

	case QED_SPQ_MODE_BLOCK:
69
		if (!p_data->p_comp_data)
70
			goto err;
71

72
		p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
73 74 75
		break;

	case QED_SPQ_MODE_CB:
76
		if (!p_data->p_comp_data)
77 78
			p_ent->comp_cb.function = NULL;
		else
79
			p_ent->comp_cb = *p_data->p_comp_data;
80 81 82 83 84
		break;

	default:
		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
			  p_ent->comp_mode);
85
		goto err;
86 87
	}

88 89 90 91 92 93
	DP_VERBOSE(p_hwfn,
		   QED_MSG_SPQ,
		   "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n",
		   opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd),
		   cmd, qed_get_protocol_type_str(protocol), protocol,
		   (unsigned long long)(uintptr_t)&p_ent->ramrod,
94 95 96
		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
			   "MODE_CB"));
97 98

	memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
99 100

	return 0;
101 102

err:
103
	qed_sp_destroy_request(p_hwfn, p_ent);
104 105

	return -EINVAL;
106 107
}

108
static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
109 110 111 112 113 114 115 116 117 118
{
	switch (type) {
	case QED_TUNN_CLSS_MAC_VLAN:
		return TUNNEL_CLSS_MAC_VLAN;
	case QED_TUNN_CLSS_MAC_VNI:
		return TUNNEL_CLSS_MAC_VNI;
	case QED_TUNN_CLSS_INNER_MAC_VLAN:
		return TUNNEL_CLSS_INNER_MAC_VLAN;
	case QED_TUNN_CLSS_INNER_MAC_VNI:
		return TUNNEL_CLSS_INNER_MAC_VNI;
119 120
	case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
		return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
121 122 123 124 125 126
	default:
		return TUNNEL_CLSS_MAC_VLAN;
	}
}

static void
127 128
qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
			    struct qed_tunnel_info *p_src, bool b_pf_start)
129
{
130 131
	if (p_src->vxlan.b_update_mode || b_pf_start)
		p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
132

133 134
	if (p_src->l2_gre.b_update_mode || b_pf_start)
		p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
135

136 137
	if (p_src->ip_gre.b_update_mode || b_pf_start)
		p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
138

139 140 141
	if (p_src->l2_geneve.b_update_mode || b_pf_start)
		p_tun->l2_geneve.b_mode_enabled =
		    p_src->l2_geneve.b_mode_enabled;
142

143 144 145
	if (p_src->ip_geneve.b_update_mode || b_pf_start)
		p_tun->ip_geneve.b_mode_enabled =
		    p_src->ip_geneve.b_mode_enabled;
146 147
}

148 149
static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
				  struct qed_tunnel_info *p_src)
150
{
151
	int type;
152

153 154 155 156 157 158 159 160 161 162 163 164 165 166
	p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
	p_tun->b_update_tx_cls = p_src->b_update_tx_cls;

	type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
	p_tun->vxlan.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
	p_tun->l2_gre.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
	p_tun->ip_gre.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
	p_tun->l2_geneve.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
	p_tun->ip_geneve.tun_cls = type;
}
167

168 169 170 171 172
static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
			       struct qed_tunnel_info *p_src)
{
	p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
	p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
173

174 175
	if (p_src->geneve_port.b_update_port)
		p_tun->geneve_port.port = p_src->geneve_port.port;
176

177 178 179
	if (p_src->vxlan_port.b_update_port)
		p_tun->vxlan_port.port = p_src->vxlan_port.port;
}
180

181
static void
M
Mintz, Yuval 已提交
182
__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
183 184 185 186
			      struct qed_tunn_update_type *tun_type)
{
	*p_tunn_cls = tun_type->tun_cls;
}
187

188
static void
M
Mintz, Yuval 已提交
189
qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
190
			    struct qed_tunn_update_type *tun_type,
M
Mintz, Yuval 已提交
191 192
			    u8 *p_update_port,
			    __le16 *p_port,
193 194
			    struct qed_tunn_update_udp_port *p_udp_port)
{
M
Mintz, Yuval 已提交
195
	__qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
196 197 198
	if (p_udp_port->b_update_port) {
		*p_update_port = 1;
		*p_port = cpu_to_le16(p_udp_port->port);
199
	}
200
}
201

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
static void
qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
			      struct qed_tunnel_info *p_src,
			      struct pf_update_tunnel_config *p_tunn_cfg)
{
	struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;

	qed_set_pf_update_tunn_mode(p_tun, p_src, false);
	qed_set_tunn_cls_info(p_tun, p_src);
	qed_set_tunn_ports(p_tun, p_src);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
				    &p_tun->vxlan,
				    &p_tunn_cfg->set_vxlan_udp_port_flg,
				    &p_tunn_cfg->vxlan_udp_port,
				    &p_tun->vxlan_port);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
				    &p_tun->l2_geneve,
				    &p_tunn_cfg->set_geneve_udp_port_flg,
				    &p_tunn_cfg->geneve_udp_port,
				    &p_tun->geneve_port);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
				      &p_tun->ip_geneve);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
				      &p_tun->l2_gre);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
				      &p_tun->ip_gre);

	p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
235 236 237 238
}

static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
				 struct qed_ptt *p_ptt,
239
				 struct qed_tunnel_info *p_tun)
240
{
241 242 243
	qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
			   p_tun->ip_gre.b_mode_enabled);
	qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
244

245 246 247
	qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
			      p_tun->ip_geneve.b_mode_enabled);
}
248

249
static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
250
				      struct qed_ptt *p_ptt,
251 252 253
				      struct qed_tunnel_info *p_tunn)
{
	if (p_tunn->vxlan_port.b_update_port)
254
		qed_set_vxlan_dest_port(p_hwfn, p_ptt,
255
					p_tunn->vxlan_port.port);
256

257
	if (p_tunn->geneve_port.b_update_port)
258
		qed_set_geneve_dest_port(p_hwfn, p_ptt,
259
					 p_tunn->geneve_port.port);
260

261
	qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
262 263 264 265
}

static void
qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
266
			     struct qed_tunnel_info *p_src,
267 268
			     struct pf_start_tunnel_config *p_tunn_cfg)
{
269
	struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
270 271 272 273

	if (!p_src)
		return;

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
	qed_set_pf_update_tunn_mode(p_tun, p_src, true);
	qed_set_tunn_cls_info(p_tun, p_src);
	qed_set_tunn_ports(p_tun, p_src);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
				    &p_tun->vxlan,
				    &p_tunn_cfg->set_vxlan_udp_port_flg,
				    &p_tunn_cfg->vxlan_udp_port,
				    &p_tun->vxlan_port);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
				    &p_tun->l2_geneve,
				    &p_tunn_cfg->set_geneve_udp_port_flg,
				    &p_tunn_cfg->geneve_udp_port,
				    &p_tun->geneve_port);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
				      &p_tun->ip_geneve);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
				      &p_tun->l2_gre);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
				      &p_tun->ip_gre);
298 299
}

300
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
301
		    struct qed_ptt *p_ptt,
302
		    struct qed_tunnel_info *p_tunn,
303
		    bool allow_npar_tx_switch)
304
{
305
	struct outer_tag_config_struct *outer_tag_config;
306 307 308 309
	struct pf_start_ramrod_data *p_ramrod = NULL;
	u16 sb = qed_int_get_sp_sb_id(p_hwfn);
	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
	struct qed_spq_entry *p_ent = NULL;
310
	struct qed_sp_init_data init_data;
311
	u8 page_cnt, i;
312
	int rc;
313 314 315 316 317

	/* update initial eq producer */
	qed_eq_prod_update(p_hwfn,
			   qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));

318 319 320 321
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
322

323
	rc = qed_sp_init_request(p_hwfn, &p_ent,
324
				 COMMON_RAMROD_PF_START,
Y
Yuval Mintz 已提交
325
				 PROTOCOLID_COMMON, &init_data);
326 327 328 329 330 331 332 333 334 335
	if (rc)
		return rc;

	p_ramrod = &p_ent->ramrod.pf_start;

	p_ramrod->event_ring_sb_id	= cpu_to_le16(sb);
	p_ramrod->event_ring_sb_index	= sb_index;
	p_ramrod->path_id		= QED_PATH_ID(p_hwfn);
	p_ramrod->dont_log_ramrods	= 0;
	p_ramrod->log_type_mask		= cpu_to_le16(0xf);
Y
Yuval Mintz 已提交
336

337
	if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
Y
Yuval Mintz 已提交
338
		p_ramrod->mf_mode = MF_OVLAN;
339
	else
Y
Yuval Mintz 已提交
340
		p_ramrod->mf_mode = MF_NPAR;
T
Tomer Tayar 已提交
341

342 343 344
	outer_tag_config = &p_ramrod->outer_tag_config;
	outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan);

345
	if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
346
		outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q);
347
	} else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
348
		outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD);
349
		outer_tag_config->enable_stag_pri_change = 1;
350 351
	}

352
	outer_tag_config->pri_map_valid = 1;
353
	for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
354
		outer_tag_config->inner_to_outer_pri_map[i] = i;
355 356 357 358 359 360

	/* enable_stag_pri_change should be set if port is in BD mode or,
	 * UFP with Host Control mode.
	 */
	if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
		if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
361
			outer_tag_config->enable_stag_pri_change = 1;
362
		else
363
			outer_tag_config->enable_stag_pri_change = 0;
364

365
		outer_tag_config->outer_tag.tci |=
366 367
		    cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
	}
368 369

	/* Place EQ address in RAMROD */
Y
Yuval Mintz 已提交
370
	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
371
		       qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
Y
Yuval Mintz 已提交
372 373
	page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
	p_ramrod->event_ring_num_pages = page_cnt;
374 375 376

	/* Place consolidation queue address in ramrod */
	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
377
		       qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
378 379
	page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
	p_ramrod->consolid_q_num_pages = page_cnt;
380

Y
Yuval Mintz 已提交
381
	qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
382

383
	if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
Y
Yuval Mintz 已提交
384 385
		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;

Y
Yuval Mintz 已提交
386 387 388 389
	switch (p_hwfn->hw_info.personality) {
	case QED_PCI_ETH:
		p_ramrod->personality = PERSONALITY_ETH;
		break;
390 391 392
	case QED_PCI_FCOE:
		p_ramrod->personality = PERSONALITY_FCOE;
		break;
Y
Yuval Mintz 已提交
393
	case QED_PCI_ISCSI:
394
	case QED_PCI_NVMETCP:
395
		p_ramrod->personality = PERSONALITY_TCP_ULP;
Y
Yuval Mintz 已提交
396 397
		break;
	case QED_PCI_ETH_ROCE:
398
	case QED_PCI_ETH_IWARP:
Y
Yuval Mintz 已提交
399 400 401
		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
		break;
	default:
402
		DP_NOTICE(p_hwfn, "Unknown personality %d\n",
Y
Yuval Mintz 已提交
403 404 405 406
			  p_hwfn->hw_info.personality);
		p_ramrod->personality = PERSONALITY_ETH;
	}

Y
Yuval Mintz 已提交
407 408 409
	if (p_hwfn->cdev->p_iov_info) {
		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;

410 411
		p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
		p_ramrod->num_vfs = (u8)p_iov->total_vfs;
Y
Yuval Mintz 已提交
412
	}
Y
Yuval Mintz 已提交
413 414
	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
Y
Yuval Mintz 已提交
415

416
	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
T
Tomer Tayar 已提交
417
		   "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
418
		   sb, sb_index, outer_tag_config->outer_tag.tci);
419

420 421
	rc = qed_spq_post(p_hwfn, p_ent, NULL);

422
	if (p_tunn)
423 424
		qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
					  &p_hwfn->cdev->tunnel);
425 426

	return rc;
427 428
}

429 430 431 432
int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
433
	int rc;
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452

	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_CB;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
				      &p_ent->ramrod.pf_update);

	return qed_spq_post(p_hwfn, p_ent, NULL);
}

453 454 455 456
int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
457
	int rc;
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485

	if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
		DP_INFO(p_hwfn, "Invalid priority type %d\n",
			p_hwfn->ufp_info.pri_type);
		return -EINVAL;
	}

	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_CB;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
	if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
		p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
	else
		p_ent->ramrod.pf_update.enable_stag_pri_change = 0;

	return qed_spq_post(p_hwfn, p_ent, NULL);
}

486 487
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
488
			      struct qed_ptt *p_ptt,
489
			      struct qed_tunnel_info *p_tunn,
490 491 492 493 494
			      enum spq_mode comp_mode,
			      struct qed_spq_comp_cb *p_comp_data)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
495
	int rc;
496

497 498 499
	if (IS_VF(p_hwfn->cdev))
		return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);

500 501 502
	if (!p_tunn)
		return -EINVAL;

503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = comp_mode;
	init_data.p_comp_data = p_comp_data;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
				      &p_ent->ramrod.pf_update.tunnel_config);

	rc = qed_spq_post(p_hwfn, p_ent, NULL);
	if (rc)
		return rc;

523
	qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
524 525 526 527

	return rc;
}

528 529 530
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
531
	struct qed_sp_init_data init_data;
532
	int rc;
533

534 535 536 537 538
	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
539

540
	rc = qed_sp_init_request(p_hwfn, &p_ent,
541
				 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
542
				 &init_data);
543 544 545 546 547
	if (rc)
		return rc;

	return qed_spq_post(p_hwfn, p_ent, NULL);
}
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568

int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	int rc;

	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	return qed_spq_post(p_hwfn, p_ent, NULL);
}
M
Mintz, Yuval 已提交
569 570 571 572 573

int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
574
	int rc;
M
Mintz, Yuval 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589

	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_CB;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
	p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
590 591 592
	if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
		p_ent->ramrod.pf_update.mf_vlan |=
			cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
M
Mintz, Yuval 已提交
593 594 595

	return qed_spq_post(p_hwfn, p_ent, NULL);
}