hclge_mbx.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2016-2017 Hisilicon Limited.

#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"

/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
 * receives a mailbox message from VF.
 * @vport: pointer to struct hclge_vport
 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
 *		  message
 * @resp_status: indicate to VF whether its request success(0) or failed.
 */
static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
				struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
				int resp_status,
				u8 *resp_data, u16 resp_data_len)
{
	struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
	struct hclge_dev *hdev = vport->back;
	enum hclge_cmd_status status;
	struct hclge_desc desc;

	resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;

	if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
		dev_err(&hdev->pdev->dev,
			"PF fail to gen resp to VF len %d exceeds max len %d\n",
			resp_data_len,
			HCLGE_MBX_MAX_RESP_DATA_SIZE);
	}

	hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);

	resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
	resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;

	resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
	resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
	resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
	resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;

	if (resp_data && resp_data_len > 0)
		memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);

	status = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (status)
		dev_err(&hdev->pdev->dev,
			"PF failed(=%d) to send response to VF\n", status);

	return status;
}

static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
			      u16 mbx_opcode, u8 dest_vfid)
{
	struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
	struct hclge_dev *hdev = vport->back;
	enum hclge_cmd_status status;
	struct hclge_desc desc;

	resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;

	hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);

	resp_pf_to_vf->dest_vfid = dest_vfid;
	resp_pf_to_vf->msg_len = msg_len;
	resp_pf_to_vf->msg[0] = mbx_opcode;

	memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);

	status = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (status)
		dev_err(&hdev->pdev->dev,
			"PF failed(=%d) to send mailbox message to VF\n",
			status);

	return status;
}

82
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
83
{
84 85
	struct hclge_dev *hdev = vport->back;
	enum hnae3_reset_type reset_type;
86 87 88 89 90
	u8 msg_data[2];
	u8 dest_vfid;

	dest_vfid = (u8)vport->vport_id;

91 92
	if (hdev->reset_type == HNAE3_FUNC_RESET)
		reset_type = HNAE3_VF_PF_FUNC_RESET;
93 94
	else if (hdev->reset_type == HNAE3_FLR_RESET)
		reset_type = HNAE3_VF_FULL_RESET;
95 96 97 98 99
	else
		return -EINVAL;

	memcpy(&msg_data[0], &reset_type, sizeof(u16));

100
	/* send this requested info to VF */
101
	return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
102 103 104
				  HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}

105 106 107 108 109 110 111 112 113 114 115 116 117
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
{
	struct hnae3_ring_chain_node *chain_tmp, *chain;

	chain = head->next;

	while (chain) {
		chain_tmp = chain->next;
		kzfree(chain);
		chain = chain_tmp;
	}
}

118 119
/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
 * from mailbox message
120 121 122 123 124
 * msg[0]: opcode
 * msg[1]: <not relevant to this function>
 * msg[2]: ring_num
 * msg[3]: first ring type (TX|RX)
 * msg[4]: first tqp id
125 126
 * msg[5]: first int_gl idx
 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
127 128 129 130 131 132 133 134 135 136 137 138
 */
static int hclge_get_ring_chain_from_mbx(
			struct hclge_mbx_vf_to_pf_cmd *req,
			struct hnae3_ring_chain_node *ring_chain,
			struct hclge_vport *vport)
{
	struct hnae3_ring_chain_node *cur_chain, *new_chain;
	int ring_num;
	int i;

	ring_num = req->msg[2];

139 140 141 142 143
	if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
		HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
		HCLGE_MBX_RING_NODE_VARIABLE_NUM))
		return -ENOMEM;

P
Peng Li 已提交
144
	hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
145 146
	ring_chain->tqp_index =
			hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
147 148
	hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
			HNAE3_RING_GL_IDX_S,
P
Peng Li 已提交
149
			req->msg[5]);
150 151 152 153 154 155 156 157

	cur_chain = ring_chain;

	for (i = 1; i < ring_num; i++) {
		new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
		if (!new_chain)
			goto err;

P
Peng Li 已提交
158 159 160
		hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
			      req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
			      HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
161 162 163

		new_chain->tqp_index =
		hclge_get_queue_id(vport->nic.kinfo.tqp
164 165
			[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
			HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
166

167 168
		hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
				HNAE3_RING_GL_IDX_S,
P
Peng Li 已提交
169 170
				req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
				HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
171

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
		cur_chain->next = new_chain;
		cur_chain = new_chain;
	}

	return 0;
err:
	hclge_free_vector_ring_chain(ring_chain);
	return -ENOMEM;
}

static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
					     struct hclge_mbx_vf_to_pf_cmd *req)
{
	struct hnae3_ring_chain_node ring_chain;
	int vector_id = req->msg[1];
	int ret;

	memset(&ring_chain, 0, sizeof(ring_chain));
	ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
	if (ret)
		return ret;

	ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
	if (ret)
		return ret;

	hclge_free_vector_ring_chain(&ring_chain);

	return 0;
}

203 204 205
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
				     struct hclge_mbx_vf_to_pf_cmd *req)
{
206
	bool en_bc = req->msg[1] ? true : false;
207 208
	struct hclge_promisc_param param;

209 210
	/* vf is not allowed to enable unicast/multicast broadcast */
	hclge_promisc_param_init(&param, false, false, en_bc, vport->vport_id);
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	return hclge_cmd_set_promisc_mode(vport->back, &param);
}

static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
				    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
				    bool gen_resp)
{
	const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
	struct hclge_dev *hdev = vport->back;
	int status;

	if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
		const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);

		hclge_rm_uc_addr_common(vport, old_addr);
		status = hclge_add_uc_addr_common(vport, mac_addr);
227
		if (status) {
228
			hclge_add_uc_addr_common(vport, old_addr);
229 230 231 232 233 234
		} else {
			hclge_rm_vport_mac_table(vport, mac_addr,
						 false, HCLGE_MAC_ADDR_UC);
			hclge_add_vport_mac_table(vport, mac_addr,
						  HCLGE_MAC_ADDR_UC);
		}
235 236
	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
		status = hclge_add_uc_addr_common(vport, mac_addr);
237 238 239
		if (!status)
			hclge_add_vport_mac_table(vport, mac_addr,
						  HCLGE_MAC_ADDR_UC);
240 241
	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
		status = hclge_rm_uc_addr_common(vport, mac_addr);
242 243 244
		if (!status)
			hclge_rm_vport_mac_table(vport, mac_addr,
						 false, HCLGE_MAC_ADDR_UC);
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
	} else {
		dev_err(&hdev->pdev->dev,
			"failed to set unicast mac addr, unknown subcode %d\n",
			mbx_req->msg[1]);
		return -EIO;
	}

	if (gen_resp)
		hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);

	return 0;
}

static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
				    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
				    bool gen_resp)
{
	const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
	struct hclge_dev *hdev = vport->back;
264 265
	u8 resp_len = 0;
	u8 resp_data;
266 267 268 269
	int status;

	if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
		status = hclge_add_mc_addr_common(vport, mac_addr);
270 271 272
		if (!status)
			hclge_add_vport_mac_table(vport, mac_addr,
						  HCLGE_MAC_ADDR_MC);
273 274
	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
		status = hclge_rm_mc_addr_common(vport, mac_addr);
275 276 277
		if (!status)
			hclge_rm_vport_mac_table(vport, mac_addr,
						 false, HCLGE_MAC_ADDR_MC);
278 279 280 281 282 283 284 285
	} else {
		dev_err(&hdev->pdev->dev,
			"failed to set mcast mac addr, unknown subcode %d\n",
			mbx_req->msg[1]);
		return -EIO;
	}

	if (gen_resp)
286 287
		hclge_gen_resp_to_vf(vport, mbx_req, status,
				     &resp_data, resp_len);
288 289 290 291 292 293 294 295 296 297 298

	return 0;
}

static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
				 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
				 bool gen_resp)
{
	int status = 0;

	if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
299
		struct hnae3_handle *handle = &vport->nic;
300 301 302 303 304 305
		u16 vlan, proto;
		bool is_kill;

		is_kill = !!mbx_req->msg[2];
		memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
		memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
306 307
		status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
					       vlan, is_kill);
308 309 310 311 312
	} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
		struct hnae3_handle *handle = &vport->nic;
		bool en = mbx_req->msg[2] ? true : false;

		status = hclge_en_hw_strip_rxvtag(handle, en);
313 314 315 316 317 318 319 320
	}

	if (gen_resp)
		status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);

	return status;
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
static int hclge_set_vf_alive(struct hclge_vport *vport,
			      struct hclge_mbx_vf_to_pf_cmd *mbx_req,
			      bool gen_resp)
{
	bool alive = !!mbx_req->msg[2];
	int ret = 0;

	if (alive)
		ret = hclge_vport_start(vport);
	else
		hclge_vport_stop(vport);

	return ret;
}

336 337 338 339
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
			       struct hclge_mbx_vf_to_pf_cmd *mbx_req,
			       bool gen_resp)
{
340 341 342 343 344 345
	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
	u8 vf_tc_map = 0;
	int i, ret;

	for (i = 0; i < kinfo->num_tc; i++)
		vf_tc_map |= BIT(i);
346

347
	ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
348 349 350 351 352 353 354 355 356
				   sizeof(u8));

	return ret;
}

static int hclge_get_vf_queue_info(struct hclge_vport *vport,
				   struct hclge_mbx_vf_to_pf_cmd *mbx_req,
				   bool gen_resp)
{
357
#define HCLGE_TQPS_RSS_INFO_LEN		6
358 359 360 361 362
	u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
	struct hclge_dev *hdev = vport->back;

	/* get the queue related info */
	memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
363
	memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
364
	memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16));
365 366 367 368 369

	return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
				    HCLGE_TQPS_RSS_INFO_LEN);
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
				    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
				    bool gen_resp)
{
#define HCLGE_TQPS_DEPTH_INFO_LEN	4
	u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN];
	struct hclge_dev *hdev = vport->back;

	/* get the queue depth info */
	memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16));
	memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16));
	return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
				    HCLGE_TQPS_DEPTH_INFO_LEN);
}

385 386 387 388 389 390 391 392 393 394 395
static int hclge_get_vf_media_type(struct hclge_vport *vport,
				   struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
	struct hclge_dev *hdev = vport->back;
	u8 resp_data;

	resp_data = hdev->hw.mac.media_type;
	return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data,
				    sizeof(resp_data));
}

396 397 398 399 400
static int hclge_get_link_info(struct hclge_vport *vport,
			       struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
	struct hclge_dev *hdev = vport->back;
	u16 link_status;
401
	u8 msg_data[8];
402
	u8 dest_vfid;
403
	u16 duplex;
404 405 406

	/* mac.link can only be 0 or 1 */
	link_status = (u16)hdev->hw.mac.link;
407
	duplex = hdev->hw.mac.duplex;
408
	memcpy(&msg_data[0], &link_status, sizeof(u16));
409 410
	memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
	memcpy(&msg_data[6], &duplex, sizeof(u16));
411 412 413
	dest_vfid = mbx_req->mbx_src_vfid;

	/* send this requested info to VF */
414
	return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
415 416 417
				  HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
static void hclge_get_link_mode(struct hclge_vport *vport,
				struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_SUPPORTED   1
	struct hclge_dev *hdev = vport->back;
	unsigned long advertising;
	unsigned long supported;
	unsigned long send_data;
	u8 msg_data[10];
	u8 dest_vfid;

	advertising = hdev->hw.mac.advertising[0];
	supported = hdev->hw.mac.supported[0];
	dest_vfid = mbx_req->mbx_src_vfid;
	msg_data[0] = mbx_req->msg[2];

	send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;

	memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
	hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
			   HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}

441 442
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
				     struct hclge_mbx_vf_to_pf_cmd *mbx_req)
443 444 445 446 447
{
	u16 queue_id;

	memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));

448 449 450 451
	hclge_reset_vf_queue(vport, queue_id);

	/* send response msg to VF after queue reset complete*/
	hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
452 453
}

454 455 456 457 458 459 460
static void hclge_reset_vf(struct hclge_vport *vport,
			   struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
	struct hclge_dev *hdev = vport->back;
	int ret;

	dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
461
		 vport->vport_id);
462

463 464
	ret = hclge_func_reset_cmd(hdev, vport->vport_id);
	hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
465 466
}

467 468 469 470 471 472
static void hclge_vf_keep_alive(struct hclge_vport *vport,
				struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
	vport->last_active_jiffies = jiffies;
}

473 474 475 476 477 478 479 480 481 482 483 484
static int hclge_set_vf_mtu(struct hclge_vport *vport,
			    struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
	int ret;
	u32 mtu;

	memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
	ret = hclge_set_vport_mtu(vport, mtu);

	return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}

485 486 487 488 489 490 491 492 493 494 495 496 497
static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
				    struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
	u16 queue_id, qid_in_pf;
	u8 resp_data[2];

	memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
	qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
	memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));

	return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
}

J
Jian Shen 已提交
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
static int hclge_get_rss_key(struct hclge_vport *vport,
			     struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_RSS_MBX_RESP_LEN	8
	u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
	struct hclge_dev *hdev = vport->back;
	u8 index;

	index = mbx_req->msg[2];

	memcpy(&resp_data[0],
	       &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
	       HCLGE_RSS_MBX_RESP_LEN);

	return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
				    HCLGE_RSS_MBX_RESP_LEN);
}

516 517 518 519 520 521 522
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
	u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);

	return tail == hw->cmq.crq.next_to_use;
}

523 524 525 526 527 528
void hclge_mbx_handler(struct hclge_dev *hdev)
{
	struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
	struct hclge_mbx_vf_to_pf_cmd *req;
	struct hclge_vport *vport;
	struct hclge_desc *desc;
529
	int ret, flag;
530 531

	/* handle all the mailbox requests in the queue */
532
	while (!hclge_cmd_crq_empty(&hdev->hw)) {
533 534 535 536 537 538
		if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
			dev_warn(&hdev->pdev->dev,
				 "command queue needs re-initializing\n");
			return;
		}

539 540 541
		desc = &crq->desc[crq->next_to_use];
		req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;

542
		flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
P
Peng Li 已提交
543
		if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
544 545 546 547 548 549 550 551 552 553
			dev_warn(&hdev->pdev->dev,
				 "dropped invalid mailbox message, code = %d\n",
				 req->msg[0]);

			/* dropping/not processing this invalid message */
			crq->desc[crq->next_to_use].flag = 0;
			hclge_mbx_ring_ptr_move_crq(crq);
			continue;
		}

554 555 556
		vport = &hdev->vport[req->mbx_src_vfid];

		switch (req->msg[0]) {
557 558 559 560 561 562 563 564
		case HCLGE_MBX_MAP_RING_TO_VECTOR:
			ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
								req);
			break;
		case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
			ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
								req);
			break;
565 566 567 568 569 570 571 572
		case HCLGE_MBX_SET_PROMISC_MODE:
			ret = hclge_set_vf_promisc_mode(vport, req);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF fail(%d) to set VF promisc mode\n",
					ret);
			break;
		case HCLGE_MBX_SET_UNICAST:
573
			ret = hclge_set_vf_uc_mac_addr(vport, req, true);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF fail(%d) to set VF UC MAC Addr\n",
					ret);
			break;
		case HCLGE_MBX_SET_MULTICAST:
			ret = hclge_set_vf_mc_mac_addr(vport, req, false);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF fail(%d) to set VF MC MAC Addr\n",
					ret);
			break;
		case HCLGE_MBX_SET_VLAN:
			ret = hclge_set_vf_vlan_cfg(vport, req, false);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF failed(%d) to config VF's VLAN\n",
					ret);
			break;
593 594 595 596 597 598 599
		case HCLGE_MBX_SET_ALIVE:
			ret = hclge_set_vf_alive(vport, req, false);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF failed(%d) to set VF's ALIVE\n",
					ret);
			break;
600 601 602 603 604 605 606
		case HCLGE_MBX_GET_QINFO:
			ret = hclge_get_vf_queue_info(vport, req, true);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF failed(%d) to get Q info for VF\n",
					ret);
			break;
607 608 609 610 611 612 613 614
		case HCLGE_MBX_GET_QDEPTH:
			ret = hclge_get_vf_queue_depth(vport, req, true);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF failed(%d) to get Q depth for VF\n",
					ret);
			break;

615 616 617 618 619 620 621 622 623 624 625 626 627 628
		case HCLGE_MBX_GET_TCINFO:
			ret = hclge_get_vf_tcinfo(vport, req, true);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF failed(%d) to get TC info for VF\n",
					ret);
			break;
		case HCLGE_MBX_GET_LINK_STATUS:
			ret = hclge_get_link_info(vport, req);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF fail(%d) to get link stat for VF\n",
					ret);
			break;
629
		case HCLGE_MBX_QUEUE_RESET:
630
			hclge_mbx_reset_vf_queue(vport, req);
631
			break;
632 633 634
		case HCLGE_MBX_RESET:
			hclge_reset_vf(vport, req);
			break;
635 636 637
		case HCLGE_MBX_KEEP_ALIVE:
			hclge_vf_keep_alive(vport, req);
			break;
638 639 640 641 642 643
		case HCLGE_MBX_SET_MTU:
			ret = hclge_set_vf_mtu(vport, req);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"VF fail(%d) to set mtu\n", ret);
			break;
644 645 646 647 648 649 650
		case HCLGE_MBX_GET_QID_IN_PF:
			ret = hclge_get_queue_id_in_pf(vport, req);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF failed(%d) to get qid for VF\n",
					ret);
			break;
J
Jian Shen 已提交
651 652 653 654 655 656 657
		case HCLGE_MBX_GET_RSS_KEY:
			ret = hclge_get_rss_key(vport, req);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF fail(%d) to get rss key for VF\n",
					ret);
			break;
658 659 660
		case HCLGE_MBX_GET_LINK_MODE:
			hclge_get_link_mode(vport, req);
			break;
661 662 663 664 665 666
		case HCLGE_MBX_GET_VF_FLR_STATUS:
			mutex_lock(&hdev->vport_cfg_mutex);
			hclge_rm_vport_all_mac_table(vport, true,
						     HCLGE_MAC_ADDR_UC);
			hclge_rm_vport_all_mac_table(vport, true,
						     HCLGE_MAC_ADDR_MC);
L
liuzhongzhu 已提交
667
			hclge_rm_vport_all_vlan_table(vport, true);
668 669
			mutex_unlock(&hdev->vport_cfg_mutex);
			break;
670 671 672 673 674 675 676
		case HCLGE_MBX_GET_MEDIA_TYPE:
			ret = hclge_get_vf_media_type(vport, req);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF fail(%d) to media type for VF\n",
					ret);
			break;
677 678 679 680 681 682
		default:
			dev_err(&hdev->pdev->dev,
				"un-supported mailbox message, code = %d\n",
				req->msg[0]);
			break;
		}
683
		crq->desc[crq->next_to_use].flag = 0;
684 685 686 687 688 689
		hclge_mbx_ring_ptr_move_crq(crq);
	}

	/* Write back CMDQ_RQ header pointer, M7 need this pointer */
	hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
}