qed_sp_commands.c 15.2 KB
Newer Older
1
/* QLogic qed NIC Driver
M
Mintz, Yuval 已提交
2
 * Copyright (c) 2015-2017  QLogic Corporation
3
 *
M
Mintz, Yuval 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and /or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
31 32 33 34 35 36 37 38 39 40 41
 */

#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
42
#include "qed_dcbx.h"
43 44 45 46 47
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
Y
Yuval Mintz 已提交
48
#include "qed_sriov.h"
49 50 51

int qed_sp_init_request(struct qed_hwfn *p_hwfn,
			struct qed_spq_entry **pp_ent,
Y
Yuval Mintz 已提交
52
			u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
53
{
54
	u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
55
	struct qed_spq_entry *p_ent = NULL;
56
	int rc;
57 58 59 60 61 62

	if (!pp_ent)
		return -ENOMEM;

	rc = qed_spq_get_entry(p_hwfn, pp_ent);

Y
Yuval Mintz 已提交
63
	if (rc)
64 65 66 67 68 69 70 71 72
		return rc;

	p_ent = *pp_ent;

	p_ent->elem.hdr.cid		= cpu_to_le32(opaque_cid);
	p_ent->elem.hdr.cmd_id		= cmd;
	p_ent->elem.hdr.protocol_id	= protocol;

	p_ent->priority		= QED_SPQ_PRIORITY_NORMAL;
73
	p_ent->comp_mode	= p_data->comp_mode;
74 75 76 77 78 79 80 81
	p_ent->comp_done.done	= 0;

	switch (p_ent->comp_mode) {
	case QED_SPQ_MODE_EBLOCK:
		p_ent->comp_cb.cookie = &p_ent->comp_done;
		break;

	case QED_SPQ_MODE_BLOCK:
82
		if (!p_data->p_comp_data)
83 84
			return -EINVAL;

85
		p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
86 87 88
		break;

	case QED_SPQ_MODE_CB:
89
		if (!p_data->p_comp_data)
90 91
			p_ent->comp_cb.function = NULL;
		else
92
			p_ent->comp_cb = *p_data->p_comp_data;
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
		break;

	default:
		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
			  p_ent->comp_mode);
		return -EINVAL;
	}

	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
		   "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
		   opaque_cid, cmd, protocol,
		   (unsigned long)&p_ent->ramrod,
		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
			   "MODE_CB"));
108 109

	memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
110 111 112 113

	return 0;
}

114
static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
115 116 117 118 119 120 121 122 123 124
{
	switch (type) {
	case QED_TUNN_CLSS_MAC_VLAN:
		return TUNNEL_CLSS_MAC_VLAN;
	case QED_TUNN_CLSS_MAC_VNI:
		return TUNNEL_CLSS_MAC_VNI;
	case QED_TUNN_CLSS_INNER_MAC_VLAN:
		return TUNNEL_CLSS_INNER_MAC_VLAN;
	case QED_TUNN_CLSS_INNER_MAC_VNI:
		return TUNNEL_CLSS_INNER_MAC_VNI;
125 126
	case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
		return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
127 128 129 130 131 132
	default:
		return TUNNEL_CLSS_MAC_VLAN;
	}
}

static void
133 134
qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
			    struct qed_tunnel_info *p_src, bool b_pf_start)
135
{
136 137
	if (p_src->vxlan.b_update_mode || b_pf_start)
		p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
138

139 140
	if (p_src->l2_gre.b_update_mode || b_pf_start)
		p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
141

142 143
	if (p_src->ip_gre.b_update_mode || b_pf_start)
		p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
144

145 146 147
	if (p_src->l2_geneve.b_update_mode || b_pf_start)
		p_tun->l2_geneve.b_mode_enabled =
		    p_src->l2_geneve.b_mode_enabled;
148

149 150 151
	if (p_src->ip_geneve.b_update_mode || b_pf_start)
		p_tun->ip_geneve.b_mode_enabled =
		    p_src->ip_geneve.b_mode_enabled;
152 153
}

154 155
static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
				  struct qed_tunnel_info *p_src)
156 157 158
{
	enum tunnel_clss type;

159 160 161 162 163 164 165 166 167 168 169 170 171 172
	p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
	p_tun->b_update_tx_cls = p_src->b_update_tx_cls;

	type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
	p_tun->vxlan.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
	p_tun->l2_gre.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
	p_tun->ip_gre.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
	p_tun->l2_geneve.tun_cls = type;
	type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
	p_tun->ip_geneve.tun_cls = type;
}
173

174 175 176 177 178
static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
			       struct qed_tunnel_info *p_src)
{
	p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
	p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
179

180 181
	if (p_src->geneve_port.b_update_port)
		p_tun->geneve_port.port = p_src->geneve_port.port;
182

183 184 185
	if (p_src->vxlan_port.b_update_port)
		p_tun->vxlan_port.port = p_src->vxlan_port.port;
}
186

187
static void
M
Mintz, Yuval 已提交
188
__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
189 190 191 192
			      struct qed_tunn_update_type *tun_type)
{
	*p_tunn_cls = tun_type->tun_cls;
}
193

194
static void
M
Mintz, Yuval 已提交
195
qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
196
			    struct qed_tunn_update_type *tun_type,
M
Mintz, Yuval 已提交
197 198
			    u8 *p_update_port,
			    __le16 *p_port,
199 200
			    struct qed_tunn_update_udp_port *p_udp_port)
{
M
Mintz, Yuval 已提交
201
	__qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
202 203 204
	if (p_udp_port->b_update_port) {
		*p_update_port = 1;
		*p_port = cpu_to_le16(p_udp_port->port);
205
	}
206
}
207

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static void
qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
			      struct qed_tunnel_info *p_src,
			      struct pf_update_tunnel_config *p_tunn_cfg)
{
	struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;

	qed_set_pf_update_tunn_mode(p_tun, p_src, false);
	qed_set_tunn_cls_info(p_tun, p_src);
	qed_set_tunn_ports(p_tun, p_src);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
				    &p_tun->vxlan,
				    &p_tunn_cfg->set_vxlan_udp_port_flg,
				    &p_tunn_cfg->vxlan_udp_port,
				    &p_tun->vxlan_port);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
				    &p_tun->l2_geneve,
				    &p_tunn_cfg->set_geneve_udp_port_flg,
				    &p_tunn_cfg->geneve_udp_port,
				    &p_tun->geneve_port);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
				      &p_tun->ip_geneve);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
				      &p_tun->l2_gre);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
				      &p_tun->ip_gre);

	p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
241 242 243 244
}

static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
				 struct qed_ptt *p_ptt,
245
				 struct qed_tunnel_info *p_tun)
246
{
247 248 249
	qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
			   p_tun->ip_gre.b_mode_enabled);
	qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
250

251 252 253
	qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
			      p_tun->ip_geneve.b_mode_enabled);
}
254

255
static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
256
				      struct qed_ptt *p_ptt,
257 258 259
				      struct qed_tunnel_info *p_tunn)
{
	if (p_tunn->vxlan_port.b_update_port)
260
		qed_set_vxlan_dest_port(p_hwfn, p_ptt,
261
					p_tunn->vxlan_port.port);
262

263
	if (p_tunn->geneve_port.b_update_port)
264
		qed_set_geneve_dest_port(p_hwfn, p_ptt,
265
					 p_tunn->geneve_port.port);
266

267
	qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
268 269 270 271
}

static void
qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
272
			     struct qed_tunnel_info *p_src,
273 274
			     struct pf_start_tunnel_config *p_tunn_cfg)
{
275
	struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
276 277 278 279

	if (!p_src)
		return;

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	qed_set_pf_update_tunn_mode(p_tun, p_src, true);
	qed_set_tunn_cls_info(p_tun, p_src);
	qed_set_tunn_ports(p_tun, p_src);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
				    &p_tun->vxlan,
				    &p_tunn_cfg->set_vxlan_udp_port_flg,
				    &p_tunn_cfg->vxlan_udp_port,
				    &p_tun->vxlan_port);

	qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
				    &p_tun->l2_geneve,
				    &p_tunn_cfg->set_geneve_udp_port_flg,
				    &p_tunn_cfg->geneve_udp_port,
				    &p_tun->geneve_port);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
				      &p_tun->ip_geneve);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
				      &p_tun->l2_gre);

	__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
				      &p_tun->ip_gre);
304 305
}

306
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
307
		    struct qed_ptt *p_ptt,
308
		    struct qed_tunnel_info *p_tunn,
Y
Yuval Mintz 已提交
309
		    enum qed_mf_mode mode, bool allow_npar_tx_switch)
310 311 312 313 314
{
	struct pf_start_ramrod_data *p_ramrod = NULL;
	u16 sb = qed_int_get_sp_sb_id(p_hwfn);
	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
	struct qed_spq_entry *p_ent = NULL;
315
	struct qed_sp_init_data init_data;
316
	int rc = -EINVAL;
Y
Yuval Mintz 已提交
317
	u8 page_cnt;
318 319 320 321 322

	/* update initial eq producer */
	qed_eq_prod_update(p_hwfn,
			   qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));

323 324 325 326
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
327

328
	rc = qed_sp_init_request(p_hwfn, &p_ent,
329
				 COMMON_RAMROD_PF_START,
Y
Yuval Mintz 已提交
330
				 PROTOCOLID_COMMON, &init_data);
331 332 333 334 335 336 337 338 339 340
	if (rc)
		return rc;

	p_ramrod = &p_ent->ramrod.pf_start;

	p_ramrod->event_ring_sb_id	= cpu_to_le16(sb);
	p_ramrod->event_ring_sb_index	= sb_index;
	p_ramrod->path_id		= QED_PATH_ID(p_hwfn);
	p_ramrod->dont_log_ramrods	= 0;
	p_ramrod->log_type_mask		= cpu_to_le16(0xf);
Y
Yuval Mintz 已提交
341

Y
Yuval Mintz 已提交
342 343 344 345 346 347 348 349 350 351 352 353
	switch (mode) {
	case QED_MF_DEFAULT:
	case QED_MF_NPAR:
		p_ramrod->mf_mode = MF_NPAR;
		break;
	case QED_MF_OVLAN:
		p_ramrod->mf_mode = MF_OVLAN;
		break;
	default:
		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
		p_ramrod->mf_mode = MF_NPAR;
	}
T
Tomer Tayar 已提交
354 355 356

	p_ramrod->outer_tag_config.outer_tag.tci =
		cpu_to_le16(p_hwfn->hw_info.ovlan);
357 358

	/* Place EQ address in RAMROD */
Y
Yuval Mintz 已提交
359
	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
360
		       p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
Y
Yuval Mintz 已提交
361 362
	page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
	p_ramrod->event_ring_num_pages = page_cnt;
Y
Yuval Mintz 已提交
363
	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
364
		       p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
365

Y
Yuval Mintz 已提交
366
	qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
367

Y
Yuval Mintz 已提交
368 369 370
	if (IS_MF_SI(p_hwfn))
		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;

Y
Yuval Mintz 已提交
371 372 373 374
	switch (p_hwfn->hw_info.personality) {
	case QED_PCI_ETH:
		p_ramrod->personality = PERSONALITY_ETH;
		break;
375 376 377
	case QED_PCI_FCOE:
		p_ramrod->personality = PERSONALITY_FCOE;
		break;
Y
Yuval Mintz 已提交
378 379 380 381
	case QED_PCI_ISCSI:
		p_ramrod->personality = PERSONALITY_ISCSI;
		break;
	case QED_PCI_ETH_ROCE:
382
	case QED_PCI_ETH_IWARP:
Y
Yuval Mintz 已提交
383 384 385
		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
		break;
	default:
386
		DP_NOTICE(p_hwfn, "Unknown personality %d\n",
Y
Yuval Mintz 已提交
387 388 389 390
			  p_hwfn->hw_info.personality);
		p_ramrod->personality = PERSONALITY_ETH;
	}

Y
Yuval Mintz 已提交
391 392 393 394 395 396
	if (p_hwfn->cdev->p_iov_info) {
		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;

		p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
		p_ramrod->num_vfs = (u8) p_iov->total_vfs;
	}
Y
Yuval Mintz 已提交
397 398
	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
Y
Yuval Mintz 已提交
399

400
	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
T
Tomer Tayar 已提交
401 402
		   "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
		   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
403

404 405
	rc = qed_spq_post(p_hwfn, p_ent, NULL);

406
	if (p_tunn)
407 408
		qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
					  &p_hwfn->cdev->tunnel);
409 410

	return rc;
411 412
}

413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	int rc = -EINVAL;

	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_CB;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
				      &p_ent->ramrod.pf_update);

	return qed_spq_post(p_hwfn, p_ent, NULL);
}

437 438
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
439
			      struct qed_ptt *p_ptt,
440
			      struct qed_tunnel_info *p_tunn,
441 442 443 444 445 446 447
			      enum spq_mode comp_mode,
			      struct qed_spq_comp_cb *p_comp_data)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	int rc = -EINVAL;

448 449 450
	if (IS_VF(p_hwfn->cdev))
		return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);

451 452 453
	if (!p_tunn)
		return -EINVAL;

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = comp_mode;
	init_data.p_comp_data = p_comp_data;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
				      &p_ent->ramrod.pf_update.tunnel_config);

	rc = qed_spq_post(p_hwfn, p_ent, NULL);
	if (rc)
		return rc;

474
	qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
475 476 477 478

	return rc;
}

479 480 481
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
482
	struct qed_sp_init_data init_data;
483 484
	int rc = -EINVAL;

485 486 487 488 489
	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
490

491
	rc = qed_sp_init_request(p_hwfn, &p_ent,
492
				 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
493
				 &init_data);
494 495 496 497 498
	if (rc)
		return rc;

	return qed_spq_post(p_hwfn, p_ent, NULL);
}
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519

int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	int rc;

	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	return qed_spq_post(p_hwfn, p_ent, NULL);
}
M
Mintz, Yuval 已提交
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543

int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
{
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	int rc = -EINVAL;

	/* Get SPQ entry */
	memset(&init_data, 0, sizeof(init_data));
	init_data.cid = qed_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = QED_SPQ_MODE_CB;

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				 &init_data);
	if (rc)
		return rc;

	p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
	p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);

	return qed_spq_post(p_hwfn, p_ent, NULL);
}