hclge_debugfs.c 71.8 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 Hisilicon Limited. */

#include <linux/device.h>

6
#include "hclge_debugfs.h"
7
#include "hclge_err.h"
8
#include "hclge_main.h"
9
#include "hclge_tm.h"
10 11
#include "hnae3.h"

12
static const char * const state_str[] = { "off", "on" };
13 14 15 16
static const char * const hclge_mac_state_str[] = {
	"TO_ADD", "TO_DEL", "ACTIVE"
};

17 18
static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };

19
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
20
	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
21 22 23 24
	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
25
	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
26 27 28 29
	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
30
	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
31 32 33 34
	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
35
	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
36 37 38 39
	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
40
	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
41 42 43 44
	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
45
	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
46 47 48 49
	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
50
	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
51 52 53 54
	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
55
	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
56 57 58 59
	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
60
	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
61 62 63 64
	  .dfx_msg = &hclge_dbg_rtc_reg[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
65
	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
66 67 68 69
	  .dfx_msg = &hclge_dbg_ppp_reg[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
70
	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
71 72 73 74
	  .dfx_msg = &hclge_dbg_rcb_reg[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
75
	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
76 77 78 79 80 81
	  .dfx_msg = &hclge_dbg_tqp_reg[0],
	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
};

82 83 84 85
/* make sure: len(name) + interval >= maxlen(item data) + 2,
 * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
 * and print as "%u"(maxlen: 10), so the interval should be at least 5.
 */
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
static void hclge_dbg_fill_content(char *content, u16 len,
				   const struct hclge_dbg_item *items,
				   const char **result, u16 size)
{
	char *pos = content;
	u16 i;

	memset(content, ' ', len);
	for (i = 0; i < size; i++) {
		if (result)
			strncpy(pos, result[i], strlen(result[i]));
		else
			strncpy(pos, items[i].name, strlen(items[i].name));
		pos += strlen(items[i].name) + items[i].interval;
	}
	*pos++ = '\n';
	*pos++ = '\0';
}

static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
	if (id)
108
		sprintf(buf, "vf%u", id - 1U);
109 110 111 112 113 114
	else
		sprintf(buf, "pf");

	return buf;
}

115 116
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
				    u32 *bd_num)
117
{
118 119 120
	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
	int entries_per_desc;
	int index;
121 122
	int ret;

123 124
	ret = hclge_query_bd_num_cmd_send(hdev, desc);
	if (ret) {
125
		dev_err(&hdev->pdev->dev,
126 127
			"failed to get dfx bd_num, offset = %d, ret = %d\n",
			offset, ret);
128 129 130
		return ret;
	}

131 132
	entries_per_desc = ARRAY_SIZE(desc[0].data);
	index = offset % entries_per_desc;
133 134 135 136 137 138 139 140

	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
	if (!(*bd_num)) {
		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
		return -EINVAL;
	}

	return 0;
141 142 143 144 145 146 147 148 149 150 151 152 153 154
}

static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
			      struct hclge_desc *desc_src,
			      int index, int bd_num,
			      enum hclge_opcode_type cmd)
{
	struct hclge_desc *desc = desc_src;
	int ret, i;

	hclge_cmd_setup_basic_desc(desc, cmd, true);
	desc->data[0] = cpu_to_le32(index);

	for (i = 1; i < bd_num; i++) {
155
		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
156 157 158 159 160
		desc++;
		hclge_cmd_setup_basic_desc(desc, cmd, true);
	}

	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
161
	if (ret)
162
		dev_err(&hdev->pdev->dev,
163
			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
164 165 166
	return ret;
}

167 168 169 170
static int
hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
		       const struct hclge_dbg_reg_type_info *reg_info,
		       char *buf, int len, int *pos)
171
{
172 173
	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
174
	struct hclge_desc *desc_src;
175 176
	u32 index, entry, i, cnt;
	int bd_num, min_num, ret;
177 178
	struct hclge_desc *desc;

179 180 181
	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
	if (ret)
		return ret;
182

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
	if (!desc_src)
		return -ENOMEM;

	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);

	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
		*pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
				  cnt++, dfx_message->message);

	for (i = 0; i < cnt; i++)
		*pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);

	*pos += scnprintf(buf + *pos, len - *pos, "\n");

	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
		dfx_message = reg_info->dfx_msg;
		desc = desc_src;
		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
					 reg_msg->cmd);
		if (ret)
			break;

		for (i = 0; i < min_num; i++, dfx_message++) {
			entry = i % HCLGE_DESC_DATA_LEN;
			if (i > 0 && !entry)
				desc++;

			*pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
					  le32_to_cpu(desc->data[entry]));
		}
		*pos += scnprintf(buf + *pos, len - *pos, "\n");
215
	}
216

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
	kfree(desc_src);
	return ret;
}

static int
hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
			  const struct hclge_dbg_reg_type_info *reg_info,
			  char *buf, int len, int *pos)
{
	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
	struct hclge_desc *desc_src;
	int bd_num, min_num, ret;
	struct hclge_desc *desc;
	u32 entry, i;

	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
	if (ret)
		return ret;

	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
238
	if (!desc_src)
239
		return -ENOMEM;
240 241

	desc = desc_src;
242 243

	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
244
	if (ret) {
245 246
		kfree(desc);
		return ret;
247 248
	}

249
	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
250

251 252 253
	for (i = 0; i < min_num; i++, dfx_message++) {
		entry = i % HCLGE_DESC_DATA_LEN;
		if (i > 0 && !entry)
254
			desc++;
255 256
		if (!dfx_message->flag)
			continue;
257

258 259 260
		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
				  dfx_message->message,
				  le32_to_cpu(desc->data[entry]));
261 262 263
	}

	kfree(desc_src);
264
	return 0;
265 266
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
	{HCLGE_MAC_TX_EN_B,  "mac_trans_en"},
	{HCLGE_MAC_RX_EN_B,  "mac_rcv_en"},
	{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
	{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
	{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
	{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
	{HCLGE_MAC_APP_LP_B,  "mac_app_loop_en"},
	{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
	{HCLGE_MAC_FCS_TX_B,  "mac_fcs_tx_en"},
	{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
	{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
	{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
	{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
	{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
};

284 285
static int  hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
					     int len, int *pos)
286 287 288
{
	struct hclge_config_mac_mode_cmd *req;
	struct hclge_desc desc;
289
	u32 loop_en, i, offset;
290 291 292 293 294 295 296 297
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump mac enable status, ret = %d\n", ret);
298
		return ret;
299 300 301 302 303
	}

	req = (struct hclge_config_mac_mode_cmd *)desc.data;
	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);

304 305 306 307 308 309
	for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
		offset = hclge_dbg_mac_en_status[i].offset;
		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
				  hclge_dbg_mac_en_status[i].message,
				  hnae3_get_bit(loop_en, offset));
	}
310 311

	return 0;
312 313
}

314 315
static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
					 int len, int *pos)
316 317 318 319 320 321 322 323 324 325 326
{
	struct hclge_config_max_frm_size_cmd *req;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump mac frame size, ret = %d\n", ret);
327
		return ret;
328 329 330 331
	}

	req = (struct hclge_config_max_frm_size_cmd *)desc.data;

332 333 334 335 336 337
	*pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
			  le16_to_cpu(req->max_frm_size));
	*pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
			  req->min_frm_size);

	return 0;
338 339
}

340 341
static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
					   int len, int *pos)
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
{
#define HCLGE_MAC_SPEED_SHIFT	0
#define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
#define HCLGE_MAC_DUPLEX_SHIFT	7

	struct hclge_config_mac_speed_dup_cmd *req;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump mac speed duplex, ret = %d\n", ret);
357
		return ret;
358 359 360 361
	}

	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;

362 363 364 365 366 367 368
	*pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
			  hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
					  HCLGE_MAC_SPEED_SHIFT));
	*pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
			  hnae3_get_bit(req->speed_dup,
					HCLGE_MAC_DUPLEX_SHIFT));
	return 0;
369 370
}

371 372 373 374 375 376 377 378 379 380
static void hclge_dbg_dump_mac_type(struct hclge_dev *hdev, char *buf, int len,
				    int *pos)
{
	struct hclge_vport *vport = &hdev->vport[0];
	struct hnae3_handle *handle = &vport->nic;

	*pos += scnprintf(buf + *pos, len - *pos, "type: %s\n",
			  handle->mac_type ? "ROH" : "Ethernet");
}

381
static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
382
{
383 384 385 386 387 388
	int pos = 0;
	int ret;

	ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
	if (ret)
		return ret;
389

390 391 392
	ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
	if (ret)
		return ret;
393

394 395 396 397 398 399 400
	ret = hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
	if (ret)
		return ret;

	hclge_dbg_dump_mac_type(hdev, buf, len, &pos);

	return 0;
401 402
}

403 404
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
				   int *pos)
405
{
406
	struct hclge_dbg_bitmap_cmd req;
407 408 409 410 411 412 413
	struct hclge_desc desc;
	u16 qset_id, qset_num;
	int ret;

	ret = hclge_tm_get_qset_num(hdev, &qset_num);
	if (ret)
		return ret;
414

415 416 417 418 419 420 421
	*pos += scnprintf(buf + *pos, len - *pos,
			  "qset_id  roce_qset_mask  nic_qset_mask  qset_shaping_pass  qset_bp_status\n");
	for (qset_id = 0; qset_id < qset_num; qset_id++) {
		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
					 HCLGE_OPC_QSET_DFX_STS);
		if (ret)
			return ret;
422

423
		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
424 425 426

		*pos += scnprintf(buf + *pos, len - *pos,
				  "%04u           %#x            %#x             %#x               %#x\n",
427 428
				  qset_id, req.bit0, req.bit1, req.bit2,
				  req.bit3);
429 430
	}

431 432
	return 0;
}
433

434 435 436
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
				  int *pos)
{
437
	struct hclge_dbg_bitmap_cmd req;
438 439 440
	struct hclge_desc desc;
	u8 pri_id, pri_num;
	int ret;
441

442
	ret = hclge_tm_get_pri_num(hdev, &pri_num);
443
	if (ret)
444
		return ret;
445

446 447 448 449 450 451 452
	*pos += scnprintf(buf + *pos, len - *pos,
			  "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
	for (pri_id = 0; pri_id < pri_num; pri_id++) {
		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
					 HCLGE_OPC_PRI_DFX_STS);
		if (ret)
			return ret;
453

454
		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
455 456 457

		*pos += scnprintf(buf + *pos, len - *pos,
				  "%03u       %#x           %#x                %#x\n",
458
				  pri_id, req.bit0, req.bit1, req.bit2);
459 460 461 462 463 464 465 466
	}

	return 0;
}

static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
				 int *pos)
{
467
	struct hclge_dbg_bitmap_cmd req;
468 469 470 471 472 473 474 475 476 477 478 479
	struct hclge_desc desc;
	u8 pg_id;
	int ret;

	*pos += scnprintf(buf + *pos, len - *pos,
			  "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
					 HCLGE_OPC_PG_DFX_STS);
		if (ret)
			return ret;

480
		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
481 482 483

		*pos += scnprintf(buf + *pos, len - *pos,
				  "%03u      %#x           %#x               %#x\n",
484
				  pg_id, req.bit0, req.bit1, req.bit2);
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	}

	return 0;
}

static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
				    int *pos)
{
	struct hclge_desc desc;
	u16 nq_id;
	int ret;

	*pos += scnprintf(buf + *pos, len - *pos,
			  "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
					 HCLGE_OPC_SCH_NQ_CNT);
		if (ret)
			return ret;

		*pos += scnprintf(buf + *pos, len - *pos, "%04u           %#x",
				  nq_id, le32_to_cpu(desc.data[1]));

		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
					 HCLGE_OPC_SCH_RQ_CNT);
		if (ret)
			return ret;

		*pos += scnprintf(buf + *pos, len - *pos,
				  "               %#x\n",
				  le32_to_cpu(desc.data[1]));
	}
517

518 519
	return 0;
}
520

521 522 523
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
				   int *pos)
{
524
	struct hclge_dbg_bitmap_cmd req;
525 526 527 528 529 530
	struct hclge_desc desc;
	u8 port_id = 0;
	int ret;

	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
				 HCLGE_OPC_PORT_DFX_STS);
531
	if (ret)
532
		return ret;
533

534
	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
535

536
	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
537
			 req.bit0);
538
	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
539
			 req.bit1);
540 541 542 543 544 545 546 547 548 549 550 551 552

	return 0;
}

static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
				 int *pos)
{
	struct hclge_desc desc[2];
	u8 port_id = 0;
	int ret;

	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
				 HCLGE_OPC_TM_INTERNAL_CNT);
553
	if (ret)
554
		return ret;
555

556 557 558 559
	*pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
			  le32_to_cpu(desc[0].data[1]));
	*pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
			  le32_to_cpu(desc[0].data[2]));
560

561 562
	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
				 HCLGE_OPC_TM_INTERNAL_STS);
563
	if (ret)
564
		return ret;
565

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
	*pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
			  le32_to_cpu(desc[0].data[1]));
	*pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
			  le32_to_cpu(desc[0].data[2]));
	*pos += scnprintf(buf + *pos, len - *pos,
			  "sch_roce_fifo_afull_gap: %#x\n",
			  le32_to_cpu(desc[0].data[3]));
	*pos += scnprintf(buf + *pos, len - *pos,
			  "tx_private_waterline: %#x\n",
			  le32_to_cpu(desc[0].data[4]));
	*pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
			  le32_to_cpu(desc[0].data[5]));
	*pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
			  le32_to_cpu(desc[1].data[0]));
	*pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
			  le32_to_cpu(desc[1].data[1]));

	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
		return 0;

	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
				 HCLGE_OPC_TM_INTERNAL_STS_1);
	if (ret)
		return ret;

	*pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
			  le32_to_cpu(desc[0].data[1]));
	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
			  le32_to_cpu(desc[0].data[2]));
	*pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
			  le32_to_cpu(desc[0].data[3]));
	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
			  le32_to_cpu(desc[0].data[4]));
	*pos += scnprintf(buf + *pos, len - *pos,
			  "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
			  le32_to_cpu(desc[0].data[5]));

	return 0;
}

static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
{
	int pos = 0;
	int ret;

	ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
	if (ret)
		return ret;
614

615
	ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
616
	if (ret)
617 618 619
		return ret;

	ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
620
	if (ret)
621
		return ret;
622

623 624 625
	ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
	if (ret)
		return ret;
626

627
	ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
628
	if (ret)
629
		return ret;
630

631
	return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
632 633
}

634 635
static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
				  enum hnae3_dbg_cmd cmd, char *buf, int len)
636
{
637
	const struct hclge_dbg_reg_type_info *reg_info;
638
	int pos = 0, ret = 0;
639 640 641 642
	int i;

	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
		reg_info = &hclge_dbg_reg_info[i];
643 644 645 646 647 648 649 650 651
		if (cmd == reg_info->cmd) {
			if (cmd == HNAE3_DBG_CMD_REG_TQP)
				return hclge_dbg_dump_reg_tqp(hdev, reg_info,
							      buf, len, &pos);

			ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
							len, &pos);
			if (ret)
				break;
652 653 654
		}
	}

655
	return ret;
656 657
}

658
static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
659 660 661
{
	struct hclge_ets_tc_weight_cmd *ets_weight;
	struct hclge_desc desc;
662 663 664 665
	char *sch_mode_str;
	int pos = 0;
	int ret;
	u8 i;
666

667
	if (!hnae3_dev_dcb_supported(hdev)) {
668 669 670
		dev_err(&hdev->pdev->dev,
			"Only DCB-supported dev supports tc\n");
		return -EOPNOTSUPP;
671 672
	}

673 674 675
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
676 677 678
		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
			ret);
		return ret;
679 680 681 682
	}

	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;

683 684 685 686 687 688 689 690 691 692 693 694
	pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
			 hdev->tm_info.num_tc);
	pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
			 ets_weight->weight_offset);

	pos += scnprintf(buf + pos, len - pos, "TC    MODE  WEIGHT\n");
	for (i = 0; i < HNAE3_MAX_TC; i++) {
		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
				 i, sch_mode_str,
				 hdev->tm_info.pg_info[0].tc_dwrr[i]);
	}
695

696
	return 0;
697 698
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
static const struct hclge_dbg_item tm_pg_items[] = {
	{ "ID", 2 },
	{ "PRI_MAP", 2 },
	{ "MODE", 2 },
	{ "DWRR", 2 },
	{ "C_IR_B", 2 },
	{ "C_IR_U", 2 },
	{ "C_IR_S", 2 },
	{ "C_BS_B", 2 },
	{ "C_BS_S", 2 },
	{ "C_FLAG", 2 },
	{ "C_RATE(Mbps)", 2 },
	{ "P_IR_B", 2 },
	{ "P_IR_U", 2 },
	{ "P_IR_S", 2 },
	{ "P_BS_B", 2 },
	{ "P_BS_S", 2 },
	{ "P_FLAG", 2 },
	{ "P_RATE(Mbps)", 0 }
};
719

720 721 722 723 724 725 726 727 728 729 730
static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
					  char **result, u8 *index)
{
	sprintf(result[(*index)++], "%3u", para->ir_b);
	sprintf(result[(*index)++], "%3u", para->ir_u);
	sprintf(result[(*index)++], "%3u", para->ir_s);
	sprintf(result[(*index)++], "%3u", para->bs_b);
	sprintf(result[(*index)++], "%3u", para->bs_s);
	sprintf(result[(*index)++], "%3u", para->flag);
	sprintf(result[(*index)++], "%6u", para->rate);
}
731

732 733
static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
				  char *buf, int len)
734 735 736 737 738 739 740
{
	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
	char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
	u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
	char content[HCLGE_DBG_TM_INFO_LEN];
	int pos = 0;
	int ret;
741

742 743 744 745
	for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
		result[i] = data_str;
		data_str += HCLGE_DBG_DATA_STR_LEN;
	}
746

747 748 749
	hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
			       NULL, ARRAY_SIZE(tm_pg_items));
	pos += scnprintf(buf + pos, len - pos, "%s", content);
750

751 752 753 754
	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
		if (ret)
			return ret;
755

756 757 758
		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
		if (ret)
			return ret;
759

760 761 762
		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
		if (ret)
			return ret;
763

764 765 766 767 768
		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
					     HCLGE_OPC_TM_PG_C_SHAPPING,
					     &c_shaper_para);
		if (ret)
			return ret;
769

770 771 772 773 774
		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
					     HCLGE_OPC_TM_PG_P_SHAPPING,
					     &p_shaper_para);
		if (ret)
			return ret;
775

776 777
		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
				       "sp";
778

779 780 781 782 783 784 785 786 787 788 789 790
		j = 0;
		sprintf(result[j++], "%02u", pg_id);
		sprintf(result[j++], "0x%02x", pri_bit_map);
		sprintf(result[j++], "%4s", sch_mode_str);
		sprintf(result[j++], "%3u", weight);
		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);

		hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
				       (const char **)result,
				       ARRAY_SIZE(tm_pg_items));
		pos += scnprintf(buf + pos, len - pos, "%s", content);
791 792
	}

793
	return 0;
794 795
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
{
	char *data_str;
	int ret;

	data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
			   HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
	if (!data_str)
		return -ENOMEM;

	ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);

	kfree(data_str);

	return ret;
}

813
static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
814
{
815 816
	struct hclge_tm_shaper_para shaper_para;
	int pos = 0;
817 818
	int ret;

819
	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
820
	if (ret)
821
		return ret;
822

823 824 825 826 827 828 829
	pos += scnprintf(buf + pos, len - pos,
			 "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
	pos += scnprintf(buf + pos, len - pos,
			 "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
			 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
			 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
			 shaper_para.rate);
830

831
	return 0;
832 833
}

834 835
static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
					 char *buf, int len)
836
{
837
	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
838
	struct hclge_bp_to_qs_map_cmd *map;
839
	struct hclge_desc desc;
840 841 842 843
	int pos = 0;
	u8 group_id;
	u8 grp_num;
	u16 i = 0;
844
	int ret;
845

846 847
	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
848
	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
849
	for (group_id = 0; group_id < grp_num; group_id++) {
850 851 852 853 854
		hclge_cmd_setup_basic_desc(&desc,
					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
					   true);
		map->tc_id = tc_id;
		map->qs_group_id = group_id;
855
		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
856 857 858 859 860 861
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"failed to get bp to qset map, ret = %d\n",
				ret);
			return ret;
		}
862

863
		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
864 865
	}

866
	pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
867
	for (group_id = 0; group_id < grp_num / 8; group_id++) {
868
		pos += scnprintf(buf + pos, len - pos,
869
			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
870 871 872 873
			 group_id * 256, qset_mapping[i + 7],
			 qset_mapping[i + 6], qset_mapping[i + 5],
			 qset_mapping[i + 4], qset_mapping[i + 3],
			 qset_mapping[i + 2], qset_mapping[i + 1],
874
			 qset_mapping[i]);
875 876 877
		i += 8;
	}

878 879
	return pos;
}
880

881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
{
	u16 queue_id;
	u16 qset_id;
	u8 link_vld;
	int pos = 0;
	u8 pri_id;
	u8 tc_id;
	int ret;

	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
		if (ret)
			return ret;

		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
						&link_vld);
		if (ret)
			return ret;

		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
		if (ret)
			return ret;

		pos += scnprintf(buf + pos, len - pos,
				 "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
		pos += scnprintf(buf + pos, len - pos,
				 "%04u        %4u       %3u      %2u\n",
				 queue_id, qset_id, pri_id, tc_id);

		if (!hnae3_dev_dcb_supported(hdev))
			continue;

		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
						    len - pos);
		if (ret < 0)
			return ret;
		pos += ret;

		pos += scnprintf(buf + pos, len - pos, "\n");
	}

	return 0;
924 925
}

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
{
	struct hclge_tm_nodes_cmd *nodes;
	struct hclge_desc desc;
	int pos = 0;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump tm nodes, ret = %d\n", ret);
		return ret;
	}

	nodes = (struct hclge_tm_nodes_cmd *)desc.data;

	pos += scnprintf(buf + pos, len - pos, "       BASE_ID  MAX_NUM\n");
	pos += scnprintf(buf + pos, len - pos, "PG      %4u      %4u\n",
			 nodes->pg_base_id, nodes->pg_num);
	pos += scnprintf(buf + pos, len - pos, "PRI     %4u      %4u\n",
			 nodes->pri_base_id, nodes->pri_num);
	pos += scnprintf(buf + pos, len - pos, "QSET    %4u      %4u\n",
			 le16_to_cpu(nodes->qset_base_id),
			 le16_to_cpu(nodes->qset_num));
	pos += scnprintf(buf + pos, len - pos, "QUEUE   %4u      %4u\n",
			 le16_to_cpu(nodes->queue_base_id),
			 le16_to_cpu(nodes->queue_num));

	return 0;
}

958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
static const struct hclge_dbg_item tm_pri_items[] = {
	{ "ID", 4 },
	{ "MODE", 2 },
	{ "DWRR", 2 },
	{ "C_IR_B", 2 },
	{ "C_IR_U", 2 },
	{ "C_IR_S", 2 },
	{ "C_BS_B", 2 },
	{ "C_BS_S", 2 },
	{ "C_FLAG", 2 },
	{ "C_RATE(Mbps)", 2 },
	{ "P_IR_B", 2 },
	{ "P_IR_U", 2 },
	{ "P_IR_S", 2 },
	{ "P_BS_B", 2 },
	{ "P_BS_S", 2 },
	{ "P_FLAG", 2 },
	{ "P_RATE(Mbps)", 0 }
};

978 979
static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
{
980 981 982 983 984 985
	char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
	char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
	char content[HCLGE_DBG_TM_INFO_LEN];
	u8 pri_num, sch_mode, weight, i, j;
	int pos, ret;
986 987 988 989 990

	ret = hclge_tm_get_pri_num(hdev, &pri_num);
	if (ret)
		return ret;

991 992 993 994 995 996
	for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
		result[i] = &data_str[i][0];

	hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
			       NULL, ARRAY_SIZE(tm_pri_items));
	pos = scnprintf(buf, len, "%s", content);
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

	for (i = 0; i < pri_num; i++) {
		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
		if (ret)
			return ret;

		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
		if (ret)
			return ret;

		ret = hclge_tm_get_pri_shaper(hdev, i,
					      HCLGE_OPC_TM_PRI_C_SHAPPING,
					      &c_shaper_para);
		if (ret)
			return ret;

		ret = hclge_tm_get_pri_shaper(hdev, i,
					      HCLGE_OPC_TM_PRI_P_SHAPPING,
					      &p_shaper_para);
		if (ret)
			return ret;

		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
			       "sp";

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
		j = 0;
		sprintf(result[j++], "%04u", i);
		sprintf(result[j++], "%4s", sch_mode_str);
		sprintf(result[j++], "%3u", weight);
		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
		hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
				       (const char **)result,
				       ARRAY_SIZE(tm_pri_items));
		pos += scnprintf(buf + pos, len - pos, "%s", content);
1032 1033 1034 1035 1036
	}

	return 0;
}

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
static const struct hclge_dbg_item tm_qset_items[] = {
	{ "ID", 4 },
	{ "MAP_PRI", 2 },
	{ "LINK_VLD", 2 },
	{ "MODE", 2 },
	{ "DWRR", 2 },
	{ "IR_B", 2 },
	{ "IR_U", 2 },
	{ "IR_S", 2 },
	{ "BS_B", 2 },
	{ "BS_S", 2 },
	{ "FLAG", 2 },
	{ "RATE(Mbps)", 0 }
};

1052 1053
static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
{
1054 1055
	char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
	char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1056
	u8 priority, link_vld, sch_mode, weight;
1057 1058 1059
	struct hclge_tm_shaper_para shaper_para;
	char content[HCLGE_DBG_TM_INFO_LEN];
	u16 qset_num, i;
1060
	int ret, pos;
1061
	u8 j;
1062 1063 1064 1065 1066

	ret = hclge_tm_get_qset_num(hdev, &qset_num);
	if (ret)
		return ret;

1067 1068 1069 1070 1071 1072
	for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
		result[i] = &data_str[i][0];

	hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
			       NULL, ARRAY_SIZE(tm_qset_items));
	pos = scnprintf(buf, len, "%s", content);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086

	for (i = 0; i < qset_num; i++) {
		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
		if (ret)
			return ret;

		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
		if (ret)
			return ret;

		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
		if (ret)
			return ret;

1087 1088 1089 1090
		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
		if (ret)
			return ret;

1091 1092
		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
			       "sp";
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105

		j = 0;
		sprintf(result[j++], "%04u", i);
		sprintf(result[j++], "%4u", priority);
		sprintf(result[j++], "%4u", link_vld);
		sprintf(result[j++], "%4s", sch_mode_str);
		sprintf(result[j++], "%3u", weight);
		hclge_dbg_fill_shaper_content(&shaper_para, result, &j);

		hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
				       (const char **)result,
				       ARRAY_SIZE(tm_qset_items));
		pos += scnprintf(buf + pos, len - pos, "%s", content);
1106 1107 1108 1109 1110
	}

	return 0;
}

1111 1112
static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
					int len)
1113 1114 1115
{
	struct hclge_cfg_pause_param_cmd *pause_param;
	struct hclge_desc desc;
1116
	int pos = 0;
1117 1118 1119 1120 1121
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
1122 1123 1124
		dev_err(&hdev->pdev->dev,
			"failed to dump qos pause, ret = %d\n", ret);
		return ret;
1125 1126 1127
	}

	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1128 1129 1130 1131 1132 1133

	pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
			 pause_param->pause_trans_gap);
	pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
			 le16_to_cpu(pause_param->pause_trans_time));
	return 0;
1134 1135
}

1136 1137
#define HCLGE_DBG_TC_MASK		0x0F

1138 1139
static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
				      int len)
1140
{
1141 1142
#define HCLGE_DBG_TC_BIT_WIDTH		4

1143 1144
	struct hclge_qos_pri_map_cmd *pri_map;
	struct hclge_desc desc;
1145 1146 1147
	int pos = 0;
	u8 *pri_tc;
	u8 tc, i;
1148 1149 1150 1151 1152 1153
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
1154 1155
			"failed to dump qos pri map, ret = %d\n", ret);
		return ret;
1156 1157 1158
	}

	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171

	pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
			 pri_map->vlan_pri);
	pos += scnprintf(buf + pos, len - pos, "PRI  TC\n");

	pri_tc = (u8 *)pri_map;
	for (i = 0; i < HNAE3_MAX_TC; i++) {
		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
		tc &= HCLGE_DBG_TC_MASK;
		pos += scnprintf(buf + pos, len - pos, "%u     %u\n", i, tc);
	}

	return 0;
1172 1173
}

1174 1175 1176
static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
				       int len)
{
1177
	struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
1178 1179 1180
	struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
	u8 *req0 = (u8 *)desc[0].data;
	u8 *req1 = (u8 *)desc[1].data;
1181
	u8 dscp_tc[HNAE3_MAX_DSCP];
1182 1183 1184 1185
	int pos, ret;
	u8 i, j;

	pos = scnprintf(buf, len, "tc map mode: %s\n",
1186
			tc_map_mode_str[kinfo->tc_map_mode]);
1187

1188
	if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
		return 0;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump qos dscp map, ret = %d\n", ret);
		return ret;
	}

	pos += scnprintf(buf + pos, len - pos, "\nDSCP  PRIO  TC\n");

	/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1204 1205
	for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
		j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1206 1207 1208 1209 1210 1211 1212 1213 1214
		/* Each dscp setting has 4 bits, so each byte saves two dscp
		 * setting
		 */
		dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
		dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
		dscp_tc[i] &= HCLGE_DBG_TC_MASK;
		dscp_tc[j] &= HCLGE_DBG_TC_MASK;
	}

1215 1216
	for (i = 0; i < HNAE3_MAX_DSCP; i++) {
		if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
1217 1218 1219
			continue;

		pos += scnprintf(buf + pos, len - pos, " %2u    %u    %u\n",
1220
				 i, kinfo->dscp_prio[i], dscp_tc[i]);
1221 1222 1223 1224 1225
	}

	return 0;
}

1226
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1227 1228
{
	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1229
	struct hclge_desc desc;
1230
	int pos = 0;
1231 1232
	int i, ret;

1233 1234
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1235 1236 1237
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump tx buf, ret = %d\n", ret);
1238
		return ret;
1239
	}
1240

1241
	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1242
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1243 1244 1245
		pos += scnprintf(buf + pos, len - pos,
				 "tx_packet_buf_tc_%d: 0x%x\n", i,
				 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1246

1247
	return pos;
1248 1249
}

1250 1251
static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
					  int len)
1252 1253 1254
{
	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
	struct hclge_desc desc;
1255
	int pos = 0;
1256 1257 1258 1259
	int i, ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1260 1261 1262
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump rx priv buf, ret = %d\n", ret);
1263
		return ret;
1264 1265 1266
	}

	pos += scnprintf(buf + pos, len - pos, "\n");
1267

1268
	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1269
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1270 1271 1272
		pos += scnprintf(buf + pos, len - pos,
				 "rx_packet_buf_tc_%d: 0x%x\n", i,
				 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1273

1274 1275
	pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
			 le16_to_cpu(rx_buf_cmd->shared_buf));
1276

1277
	return pos;
1278 1279
}

1280 1281
static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
					   int len)
1282 1283 1284
{
	struct hclge_rx_com_wl *rx_com_wl;
	struct hclge_desc desc;
1285
	int pos = 0;
1286 1287 1288 1289
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1290 1291 1292
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump rx common wl, ret = %d\n", ret);
1293
		return ret;
1294
	}
1295

1296
	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1297 1298 1299 1300 1301
	pos += scnprintf(buf + pos, len - pos, "\n");
	pos += scnprintf(buf + pos, len - pos,
			 "rx_com_wl: high: 0x%x, low: 0x%x\n",
			 le16_to_cpu(rx_com_wl->com_wl.high),
			 le16_to_cpu(rx_com_wl->com_wl.low));
1302

1303
	return pos;
1304 1305
}

1306 1307
static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
					    int len)
1308 1309 1310
{
	struct hclge_rx_com_wl *rx_packet_cnt;
	struct hclge_desc desc;
1311
	int pos = 0;
1312 1313 1314 1315
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1316 1317 1318
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1319
		return ret;
1320
	}
1321

1322
	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1323 1324 1325 1326
	pos += scnprintf(buf + pos, len - pos,
			 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
			 le16_to_cpu(rx_packet_cnt->com_wl.high),
			 le16_to_cpu(rx_packet_cnt->com_wl.low));
1327

1328
	return pos;
1329 1330
}

1331 1332
static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
					     int len)
1333 1334 1335
{
	struct hclge_rx_priv_wl_buf *rx_priv_wl;
	struct hclge_desc desc[2];
1336
	int pos = 0;
1337 1338 1339
	int i, ret;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1340
	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1341
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1342
	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1343 1344 1345
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump rx priv wl buf, ret = %d\n", ret);
1346
		return ret;
1347
	}
1348 1349 1350

	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1351
		pos += scnprintf(buf + pos, len - pos,
1352
			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1353 1354
			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1355 1356 1357

	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1358
		pos += scnprintf(buf + pos, len - pos,
1359 1360
			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
			 i + HCLGE_TC_NUM_ONE_DESC,
1361 1362
			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1363

1364
	return pos;
1365 1366
}

1367 1368
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
						  char *buf, int len)
1369 1370 1371
{
	struct hclge_rx_com_thrd *rx_com_thrd;
	struct hclge_desc desc[2];
1372
	int pos = 0;
1373 1374 1375
	int i, ret;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1376
	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1377
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1378
	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1379 1380 1381
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump rx common threshold, ret = %d\n", ret);
1382
		return ret;
1383
	}
1384

1385
	pos += scnprintf(buf + pos, len - pos, "\n");
1386 1387
	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1388
		pos += scnprintf(buf + pos, len - pos,
1389
			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1390 1391
			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1392 1393 1394

	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1395
		pos += scnprintf(buf + pos, len - pos,
1396 1397
			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
			 i + HCLGE_TC_NUM_ONE_DESC,
1398 1399
			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1400

1401
	return pos;
1402 1403
}

1404 1405
static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
				      int len)
1406
{
1407
	int pos = 0;
1408 1409
	int ret;

1410 1411 1412 1413
	ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
	if (ret < 0)
		return ret;
	pos += ret;
1414

1415 1416 1417 1418
	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
	if (ret < 0)
		return ret;
	pos += ret;
1419

1420 1421 1422 1423
	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
	if (ret < 0)
		return ret;
	pos += ret;
1424

1425 1426 1427 1428
	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
	if (ret < 0)
		return ret;
	pos += ret;
1429

1430 1431 1432
	pos += scnprintf(buf + pos, len - pos, "\n");
	if (!hnae3_dev_dcb_supported(hdev))
		return 0;
1433

1434 1435 1436 1437
	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
	if (ret < 0)
		return ret;
	pos += ret;
1438

1439 1440 1441 1442
	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
						     len - pos);
	if (ret < 0)
		return ret;
1443

1444
	return 0;
1445 1446
}

1447
static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1448 1449 1450
{
	struct hclge_mac_ethertype_idx_rd_cmd *req0;
	struct hclge_desc desc;
1451
	u32 msg_egress_port;
1452
	int pos = 0;
1453 1454
	int ret, i;

1455 1456 1457 1458 1459
	pos += scnprintf(buf + pos, len - pos,
			 "entry  mac_addr          mask  ether  ");
	pos += scnprintf(buf + pos, len - pos,
			 "mask  vlan  mask  i_map  i_dir  e_type  ");
	pos += scnprintf(buf + pos, len - pos, "pf_id  vf_id  q_id  drop\n");
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469

	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
					   true);
		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
		req0->index = cpu_to_le16(i);

		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
1470 1471
				"failed to dump manage table, ret = %d\n", ret);
			return ret;
1472 1473 1474 1475 1476
		}

		if (!req0->resp_code)
			continue;

1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
		pos += scnprintf(buf + pos, len - pos, "%02u     %pM ",
				 le16_to_cpu(req0->index), req0->mac_addr);

		pos += scnprintf(buf + pos, len - pos,
				 "%x     %04x   %x     %04x  ",
				 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
				 le16_to_cpu(req0->ethter_type),
				 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
				 le16_to_cpu(req0->vlan_tag) &
				 HCLGE_DBG_MNG_VLAN_TAG);

		pos += scnprintf(buf + pos, len - pos,
				 "%x     %02x     %02x     ",
				 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
				 req0->i_port_bitmap, req0->i_port_direction);
1492

1493
		msg_egress_port = le16_to_cpu(req0->egress_port);
1494 1495 1496 1497 1498 1499 1500
		pos += scnprintf(buf + pos, len - pos,
				 "%x       %x      %02x     %04x  %x\n",
				 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
				 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
				 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
				 le16_to_cpu(req0->egress_queue),
				 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1501
	}
1502 1503

	return 0;
1504 1505
}

1506 1507 1508
#define HCLGE_DBG_TCAM_BUF_SIZE 256

static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1509
				  char *tcam_buf, u8 stage, u32 loc)
1510 1511 1512 1513 1514
{
	struct hclge_fd_tcam_config_1_cmd *req1;
	struct hclge_fd_tcam_config_2_cmd *req2;
	struct hclge_fd_tcam_config_3_cmd *req3;
	struct hclge_desc desc[3];
1515
	int pos = 0;
1516 1517 1518 1519
	int ret, i;
	u32 *req;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1520
	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1521
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1522
	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1523 1524 1525 1526 1527 1528
	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);

	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;

1529
	req1->stage  = stage;
1530
	req1->xy_sel = sel_x ? 1 : 0;
1531
	req1->index  = cpu_to_le32(loc);
1532 1533 1534

	ret = hclge_cmd_send(&hdev->hw, desc, 3);
	if (ret)
1535
		return ret;
1536

1537 1538
	pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
			 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1539
			 loc);
1540

G
Guojia Liao 已提交
1541
	/* tcam_data0 ~ tcam_data1 */
1542 1543
	req = (u32 *)req1->tcam_data;
	for (i = 0; i < 2; i++)
1544 1545
		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
				 "%08x\n", *req++);
1546

G
Guojia Liao 已提交
1547
	/* tcam_data2 ~ tcam_data7 */
1548 1549
	req = (u32 *)req2->tcam_data;
	for (i = 0; i < 6; i++)
1550 1551
		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
				 "%08x\n", *req++);
1552

G
Guojia Liao 已提交
1553
	/* tcam_data8 ~ tcam_data12 */
1554 1555
	req = (u32 *)req3->tcam_data;
	for (i = 0; i < 5; i++)
1556 1557
		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
				 "%08x\n", *req++);
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574

	return ret;
}

static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
{
	struct hclge_fd_rule *rule;
	struct hlist_node *node;
	int cnt = 0;

	spin_lock_bh(&hdev->fd_rule_lock);
	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
		rule_locs[cnt] = rule->location;
		cnt++;
	}
	spin_unlock_bh(&hdev->fd_rule_lock);

1575
	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1576 1577 1578
		return -EINVAL;

	return cnt;
1579 1580
}

1581
static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1582
{
1583
	u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1584 1585
	int i, ret, rule_cnt;
	u16 *rule_locs;
1586 1587
	char *tcam_buf;
	int pos = 0;
1588

1589
	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1590 1591
		dev_err(&hdev->pdev->dev,
			"Only FD-supported dev supports dump fd tcam\n");
1592
		return -EOPNOTSUPP;
1593
	}
1594

1595 1596
	if (!hdev->hclge_fd_rule_num || !rule_num)
		return 0;
1597

1598
	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1599
	if (!rule_locs)
1600 1601 1602 1603 1604 1605 1606
		return -ENOMEM;

	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
	if (!tcam_buf) {
		kfree(rule_locs);
		return -ENOMEM;
	}
1607 1608

	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1609 1610
	if (rule_cnt < 0) {
		ret = rule_cnt;
1611
		dev_err(&hdev->pdev->dev,
1612 1613
			"failed to get rule number, ret = %d\n", ret);
		goto out;
1614 1615
	}

1616
	ret = 0;
1617
	for (i = 0; i < rule_cnt; i++) {
1618
		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, HCLGE_FD_STAGE_1, rule_locs[i]);
1619 1620 1621
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"failed to get fd tcam key x, ret = %d\n", ret);
1622
			goto out;
1623 1624
		}

1625 1626
		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);

1627
		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, HCLGE_FD_STAGE_1, rule_locs[i]);
1628 1629 1630
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"failed to get fd tcam key y, ret = %d\n", ret);
1631
			goto out;
1632
		}
1633 1634

		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1635 1636
	}

1637 1638
out:
	kfree(tcam_buf);
1639
	kfree(rule_locs);
1640
	return ret;
1641 1642
}

1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
static int hclge_query_rules_valid(struct hclge_dev *hdev, u8 stage, u32 loc)
{
#define HCLGE_TCAM_SELECTION_X	1
	struct hclge_fd_tcam_config_1_cmd *req1;
	struct hclge_fd_tcam_config_2_cmd *req2;
	struct hclge_fd_tcam_config_3_cmd *req3;
	struct hclge_desc desc[3];
	int ret;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);

	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;

	req1->stage = stage;
	req1->xy_sel = HCLGE_TCAM_SELECTION_X;
	req1->index = cpu_to_le32(loc);

	ret = hclge_cmd_send(&hdev->hw, desc, 3);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to read tcam status, ret = %d\n", ret);
		return ret;
	}

	return req1->entry_vld;
}

static int hclge_dbg_dump_qb_tcam(struct hclge_dev *hdev, char *buf, int len)
{
	char *tcam_buf;
	int pos = 0;
	int ret = 0;
	int i;

	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
		dev_err(&hdev->pdev->dev,
			"Only FD-supported dev supports dump fd tcam\n");
		return -EOPNOTSUPP;
	}

	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
	if (!tcam_buf)
		return -ENOMEM;

	for (i = 0; i < hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; i++) {
		if (hclge_query_rules_valid(hdev, HCLGE_FD_STAGE_1, i) <= 0)
			continue;

		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf,
					     HCLGE_FD_STAGE_1, i);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"failed to get qb tcam key x, ret = %d\n", ret);
			goto out;
		}

		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);

		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf,
					     HCLGE_FD_STAGE_1, i);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"failed to get qb tcam key y, ret = %d\n", ret);
			goto out;
		}

		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
	}

out:
	kfree(tcam_buf);
	return ret;
}

1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
{
	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
	struct hclge_fd_ad_cnt_read_cmd *req;
	char str_id[HCLGE_DBG_ID_LEN];
	struct hclge_desc desc;
	int pos = 0;
	int ret;
	u64 cnt;
	u8 i;

1734 1735 1736
	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
		return -EOPNOTSUPP;

1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
	pos += scnprintf(buf + pos, len - pos,
			 "func_id\thit_times\n");

	for (i = 0; i < func_num; i++) {
		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
		req->index = cpu_to_le16(i);
		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
				ret);
			return ret;
		}
		cnt = le64_to_cpu(req->cnt);
		hclge_dbg_get_func_id_str(str_id, i);
		pos += scnprintf(buf + pos, len - pos,
				 "%s\t%llu\n", str_id, cnt);
	}

	return 0;
}

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
	{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
	{HCLGE_MISC_RESET_STS_REG,   "reset interrupt source"},
	{HCLGE_MISC_VECTOR_INT_STS,  "reset interrupt status"},
	{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
	{HCLGE_GLOBAL_RESET_REG,  "hardware reset status"},
	{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
	{HCLGE_FUN_RST_ING, "function reset status"}
};

1769
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1770
{
1771
	u32 i, offset;
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
	int pos = 0;

	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
			 hdev->rst_stats.pf_rst_cnt);
	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
			 hdev->rst_stats.flr_rst_cnt);
	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
			 hdev->rst_stats.global_rst_cnt);
	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
			 hdev->rst_stats.imp_rst_cnt);
	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
			 hdev->rst_stats.reset_done_cnt);
	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
			 hdev->rst_stats.hw_reset_done_cnt);
	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
			 hdev->rst_stats.reset_cnt);
	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
			 hdev->rst_stats.reset_fail_cnt);
1790 1791 1792 1793 1794 1795 1796 1797

	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
		offset = hclge_dbg_rst_info[i].offset;
		pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
				 hclge_dbg_rst_info[i].message,
				 hclge_read_dev(&hdev->hw, offset));
	}

1798 1799 1800 1801
	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
			 hdev->state);

	return 0;
1802 1803
}

1804
static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1805
{
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
	unsigned long rem_nsec;
	int pos = 0;
	u64 lc;

	lc = local_clock();
	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);

	pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
			 (unsigned long)lc, rem_nsec / 1000);
	pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
			 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
	pos += scnprintf(buf + pos, len - pos,
			 "last_service_task_processed: %lu(jiffies)\n",
			 hdev->last_serv_processed);
	pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
			 hdev->serv_processed_cnt);

	return 0;
1824 1825
}

1826
static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1827
{
1828 1829 1830 1831 1832 1833
	int pos = 0;

	pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
			 hdev->num_nic_msi);
	pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
			 hdev->num_roce_msi);
1834 1835
	pos += scnprintf(buf + pos, len - pos, "num_roh_msi: %u\n",
			 hdev->num_roh_msi);
1836 1837 1838 1839 1840 1841
	pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
			 hdev->num_msi_used);
	pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
			 hdev->num_msi_left);

	return 0;
1842 1843
}

1844 1845
static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
					  char *buf, int len, u32 bd_num)
1846
{
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2

	struct hclge_desc *desc_index = desc_src;
	u32 offset = 0;
	int pos = 0;
	u32 i, j;

	pos += scnprintf(buf + pos, len - pos, "offset | data\n");

	for (i = 0; i < bd_num; i++) {
		j = 0;
		while (j < HCLGE_DESC_DATA_LEN - 1) {
			pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
					 offset);
			pos += scnprintf(buf + pos, len - pos, "0x%08x  ",
					 le32_to_cpu(desc_index->data[j++]));
			pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
					 le32_to_cpu(desc_index->data[j++]));
			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
		}
		desc_index++;
	}
}

static int
hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
{
	struct hclge_get_imp_bd_cmd *req;
	struct hclge_desc *desc_src;
1876
	struct hclge_desc desc;
1877 1878
	u32 bd_num;
	int ret;
1879

1880
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1881

1882
	req = (struct hclge_get_imp_bd_cmd *)desc.data;
1883 1884 1885
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
1886
			"failed to get imp statistics bd number, ret = %d\n",
1887
			ret);
1888
		return ret;
1889 1890 1891
	}

	bd_num = le32_to_cpu(req->bd_num);
1892 1893 1894 1895
	if (!bd_num) {
		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
		return -EINVAL;
	}
1896

1897
	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1898
	if (!desc_src)
1899
		return -ENOMEM;
1900

1901 1902
	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
				  HCLGE_OPC_IMP_STATS_INFO);
1903 1904 1905
	if (ret) {
		kfree(desc_src);
		dev_err(&hdev->pdev->dev,
1906 1907
			"failed to get imp statistics, ret = %d\n", ret);
		return ret;
1908 1909
	}

1910
	hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1911 1912

	kfree(desc_src);
1913 1914

	return 0;
1915 1916
}

1917
#define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1918
#define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
1919

1920
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1921
					char *buf, int len, int *pos)
1922 1923 1924
{
#define HCLGE_CMD_DATA_NUM		6

1925 1926
	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
	int i, j;
1927 1928 1929 1930 1931 1932

	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
			if (i == 0 && j == 0)
				continue;

1933
			*pos += scnprintf(buf + *pos, len - *pos,
1934 1935 1936 1937 1938 1939 1940
					  "0x%04x | 0x%08x\n", offset,
					  le32_to_cpu(desc[i].data[j]));

			offset += sizeof(u32);
			*index -= sizeof(u32);

			if (*index <= 0)
1941 1942 1943 1944 1945
				return;
		}
	}
}

1946 1947
static int
hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1948
{
1949
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1950

1951 1952
	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1953 1954 1955
	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
	int pos = 0;
	u32 data0;
1956 1957
	int ret;

1958
	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1959

1960 1961 1962
	while (index > 0) {
		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1963
			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1964
		else
1965
			data0 |= (u32)index << 16;
1966 1967 1968
		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
					 HCLGE_OPC_QUERY_NCL_CONFIG);
		if (ret)
1969
			return ret;
1970

1971
		hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1972
	}
1973 1974

	return 0;
1975 1976
}

1977
static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1978 1979 1980
{
	struct phy_device *phydev = hdev->hw.mac.phydev;
	struct hclge_config_mac_mode_cmd *req_app;
1981
	struct hclge_common_lb_cmd *req_common;
1982 1983
	struct hclge_desc desc;
	u8 loopback_en;
1984
	int pos = 0;
1985 1986 1987
	int ret;

	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1988
	req_common = (struct hclge_common_lb_cmd *)desc.data;
1989

1990 1991
	pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
			 hdev->hw.mac.mac_id);
1992 1993 1994 1995 1996 1997

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to dump app loopback status, ret = %d\n", ret);
1998
		return ret;
1999 2000 2001 2002
	}

	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
				    HCLGE_MAC_APP_LP_B);
2003 2004
	pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
			 state_str[loopback_en]);
2005

2006
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
2007 2008 2009
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
2010
			"failed to dump common loopback status, ret = %d\n",
2011
			ret);
2012
		return ret;
2013 2014
	}

2015
	loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
2016 2017
	pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
			 state_str[loopback_en]);
2018

2019
	loopback_en = req_common->enable &
2020 2021 2022
			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
	pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
			 state_str[loopback_en]);
2023

2024
	if (phydev) {
2025 2026 2027
		loopback_en = phydev->loopback_enabled;
		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
				 state_str[loopback_en]);
2028 2029 2030
	} else if (hnae3_dev_phy_imp_supported(hdev)) {
		loopback_en = req_common->enable &
			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
2031 2032
		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
				 state_str[loopback_en]);
2033
	}
2034 2035

	return 0;
2036 2037
}

2038 2039 2040
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
 * @hdev: pointer to struct hclge_dev
 */
2041 2042
static int
hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
2043 2044 2045
{
	struct hclge_mac_tnl_stats stats;
	unsigned long rem_nsec;
2046
	int pos = 0;
2047

2048 2049
	pos += scnprintf(buf + pos, len - pos,
			 "Recently generated mac tnl interruption:\n");
2050 2051 2052

	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
2053 2054 2055 2056 2057

		pos += scnprintf(buf + pos, len - pos,
				 "[%07lu.%03lu] status = 0x%x\n",
				 (unsigned long)stats.time, rem_nsec / 1000,
				 stats.status);
2058
	}
2059 2060

	return 0;
2061 2062
}

2063

2064 2065 2066 2067 2068 2069 2070 2071
static const struct hclge_dbg_item mac_list_items[] = {
	{ "FUNC_ID", 2 },
	{ "MAC_ADDR", 12 },
	{ "STATE", 2 },
};

static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
				    bool is_unicast)
2072
{
2073 2074 2075
	char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
	char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
	char *result[ARRAY_SIZE(mac_list_items)];
2076 2077 2078 2079
	struct hclge_mac_node *mac_node, *tmp;
	struct hclge_vport *vport;
	struct list_head *list;
	u32 func_id;
2080 2081
	int pos = 0;
	int i;
2082

2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
	for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
		result[i] = &data_str[i][0];

	pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
			 is_unicast ? "UC" : "MC");
	hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
			       NULL, ARRAY_SIZE(mac_list_items));
	pos += scnprintf(buf + pos, len - pos, "%s", content);

	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
		vport = &hdev->vport[func_id];
		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
		spin_lock_bh(&vport->mac_list_lock);
		list_for_each_entry_safe(mac_node, tmp, list, node) {
			i = 0;
			result[i++] = hclge_dbg_get_func_id_str(str_id,
								func_id);
			sprintf(result[i++], "%pM", mac_node->mac_addr);
			sprintf(result[i++], "%5s",
				hclge_mac_state_str[mac_node->state]);
			hclge_dbg_fill_content(content, sizeof(content),
					       mac_list_items,
					       (const char **)result,
					       ARRAY_SIZE(mac_list_items));
			pos += scnprintf(buf + pos, len - pos, "%s", content);
		}
		spin_unlock_bh(&vport->mac_list_lock);
2110
	}
2111
}
2112

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
{
	u8 func_num = pci_num_vf(hdev->pdev) + 1;
	struct hclge_vport *vport;
	int pos = 0;
	u8 i;

	pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
			  hdev->num_alloc_vport);
	pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
			 hdev->max_umv_size);
	pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
			 hdev->wanted_umv_size);
	pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
			 hdev->priv_umv_size);

	mutex_lock(&hdev->vport_lock);
	pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
			 hdev->share_umv_size);
	for (i = 0; i < func_num; i++) {
		vport = &hdev->vport[i];
		pos += scnprintf(buf + pos, len - pos,
				 "vport(%u) used_umv_num : %u\n",
				 i, vport->used_umv_num);
	}
	mutex_unlock(&hdev->vport_lock);

2140 2141 2142
	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
			 hdev->used_mc_mac_num);

2143 2144 2145
	return 0;
}

2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
					 struct hclge_dbg_vlan_cfg *vlan_cfg)
{
	struct hclge_vport_vtag_rx_cfg_cmd *req;
	struct hclge_desc desc;
	u16 bmap_index;
	u8 rx_cfg;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);

	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to get vport%u rxvlan cfg, ret = %d\n",
			vf_id, ret);
		return ret;
	}

	rx_cfg = req->vport_vlan_cfg;
	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);

	return 0;
}

static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
					 struct hclge_dbg_vlan_cfg *vlan_cfg)
{
	struct hclge_vport_vtag_tx_cfg_cmd *req;
	struct hclge_desc desc;
	u16 bmap_index;
	u8 tx_cfg;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to get vport%u txvlan cfg, ret = %d\n",
			vf_id, ret);
		return ret;
	}

	tx_cfg = req->vport_vlan_cfg;
	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);

	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);

	return 0;
}

static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
					    u8 vlan_type, u8 vf_id,
					    struct hclge_desc *desc)
{
	struct hclge_vlan_filter_ctrl_cmd *req;
	int ret;

	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
	req->vlan_type = vlan_type;
	req->vf_id = vf_id;

	ret = hclge_cmd_send(&hdev->hw, desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"failed to get vport%u vlan filter config, ret = %d.\n",
			vf_id, ret);

	return ret;
}

static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
				       u8 vf_id, u8 *vlan_fe)
{
	struct hclge_vlan_filter_ctrl_cmd *req;
	struct hclge_desc desc;
	int ret;

	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
	if (ret)
		return ret;

	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
	*vlan_fe = req->vlan_fe;

	return 0;
}

static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
						   u8 vf_id, u8 *bypass_en)
{
	struct hclge_port_vlan_filter_bypass_cmd *req;
	struct hclge_desc desc;
	int ret;

	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
		return 0;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
	req->vf_id = vf_id;

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
			vf_id, ret);
		return ret;
	}

	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);

	return 0;
}

static const struct hclge_dbg_item vlan_filter_items[] = {
	{ "FUNC_ID", 2 },
	{ "I_VF_VLAN_FILTER", 2 },
	{ "E_VF_VLAN_FILTER", 2 },
	{ "PORT_VLAN_FILTER_BYPASS", 0 }
};

static const struct hclge_dbg_item vlan_offload_items[] = {
	{ "FUNC_ID", 2 },
	{ "PVID", 4 },
	{ "ACCEPT_TAG1", 2 },
	{ "ACCEPT_TAG2", 2 },
	{ "ACCEPT_UNTAG1", 2 },
	{ "ACCEPT_UNTAG2", 2 },
	{ "INSERT_TAG1", 2 },
	{ "INSERT_TAG2", 2 },
	{ "SHIFT_TAG", 2 },
	{ "STRIP_TAG1", 2 },
	{ "STRIP_TAG2", 2 },
	{ "DROP_TAG1", 2 },
	{ "DROP_TAG2", 2 },
	{ "PRI_ONLY_TAG1", 2 },
	{ "PRI_ONLY_TAG2", 0 }
};

static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
					     int len, int *pos)
{
	char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
	const char *result[ARRAY_SIZE(vlan_filter_items)];
	u8 i, j, vlan_fe, bypass, ingress, egress;
	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
	int ret;

	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
					  &vlan_fe);
	if (ret)
		return ret;
	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;

	*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
			  state_str[ingress]);
	*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
			  state_str[egress]);

	hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
			       NULL, ARRAY_SIZE(vlan_filter_items));
	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);

	for (i = 0; i < func_num; i++) {
		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
						  &vlan_fe);
		if (ret)
			return ret;

		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
		if (ret)
			return ret;
		j = 0;
		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
		result[j++] = state_str[ingress];
		result[j++] = state_str[egress];
		result[j++] =
			test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
				 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
		hclge_dbg_fill_content(content, sizeof(content),
				       vlan_filter_items, result,
				       ARRAY_SIZE(vlan_filter_items));
		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
	}
	*pos += scnprintf(buf + *pos, len - *pos, "\n");

	return 0;
}

static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
					      int len, int *pos)
{
	char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
	const char *result[ARRAY_SIZE(vlan_offload_items)];
	char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
	struct hclge_dbg_vlan_cfg vlan_cfg;
	int ret;
	u8 i, j;

	hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
			       NULL, ARRAY_SIZE(vlan_offload_items));
	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);

	for (i = 0; i < func_num; i++) {
		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
		if (ret)
			return ret;

		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
		if (ret)
			return ret;

		sprintf(str_pvid, "%u", vlan_cfg.pvid);
		j = 0;
		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
		result[j++] = str_pvid;
		result[j++] = state_str[vlan_cfg.accept_tag1];
		result[j++] = state_str[vlan_cfg.accept_tag2];
		result[j++] = state_str[vlan_cfg.accept_untag1];
		result[j++] = state_str[vlan_cfg.accept_untag2];
		result[j++] = state_str[vlan_cfg.insert_tag1];
		result[j++] = state_str[vlan_cfg.insert_tag2];
		result[j++] = state_str[vlan_cfg.shift_tag];
		result[j++] = state_str[vlan_cfg.strip_tag1];
		result[j++] = state_str[vlan_cfg.strip_tag2];
		result[j++] = state_str[vlan_cfg.drop_tag1];
		result[j++] = state_str[vlan_cfg.drop_tag2];
		result[j++] = state_str[vlan_cfg.pri_only1];
		result[j++] = state_str[vlan_cfg.pri_only2];

		hclge_dbg_fill_content(content, sizeof(content),
				       vlan_offload_items, result,
				       ARRAY_SIZE(vlan_offload_items));
		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
	}

	return 0;
}

static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
				      int len)
{
	int pos = 0;
	int ret;

	ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
	if (ret)
		return ret;

	return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
}

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
{
	struct hclge_ptp *ptp = hdev->ptp;
	u32 sw_cfg = ptp->ptp_cfg;
	unsigned int tx_start;
	unsigned int last_rx;
	int pos = 0;
	u32 hw_cfg;
	int ret;

	pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
			 ptp->info.name);
	pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
			 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
			 "yes" : "no");
	pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
			 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
			 "yes" : "no");
	pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
			 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
			 "yes" : "no");

	last_rx = jiffies_to_msecs(ptp->last_rx);
	pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
			 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
	pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);

	tx_start = jiffies_to_msecs(ptp->tx_start);
	pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
			 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
	pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
	pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
			 ptp->tx_skipped);
	pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
			 ptp->tx_timeout);
	pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
			 ptp->last_tx_seqid);

	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
	if (ret)
		return ret;

	pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
			 sw_cfg, hw_cfg);

	pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
			 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);

	return 0;
}

2476 2477 2478 2479 2480 2481 2482 2483
static int hclge_dbg_dump_tcam(struct hclge_dev *hdev, char *buf, int len)
{
	if (test_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state))
		return hclge_dbg_dump_qb_tcam(hdev, buf, len);
	else
		return hclge_dbg_dump_fd_tcam(hdev, buf, len);
};

2484 2485 2486
static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
{
	hclge_dbg_dump_mac_list(hdev, buf, len, true);
2487

2488 2489
	return 0;
}
2490

2491 2492 2493
static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
{
	hclge_dbg_dump_mac_list(hdev, buf, len, false);
2494 2495 2496 2497

	return 0;
}

2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
	{
		.cmd = HNAE3_DBG_CMD_TM_NODES,
		.dbg_dump = hclge_dbg_dump_tm_nodes,
	},
	{
		.cmd = HNAE3_DBG_CMD_TM_PRI,
		.dbg_dump = hclge_dbg_dump_tm_pri,
	},
	{
		.cmd = HNAE3_DBG_CMD_TM_QSET,
		.dbg_dump = hclge_dbg_dump_tm_qset,
	},
2511 2512 2513 2514
	{
		.cmd = HNAE3_DBG_CMD_TM_MAP,
		.dbg_dump = hclge_dbg_dump_tm_map,
	},
2515 2516 2517 2518 2519 2520 2521 2522
	{
		.cmd = HNAE3_DBG_CMD_TM_PG,
		.dbg_dump = hclge_dbg_dump_tm_pg,
	},
	{
		.cmd = HNAE3_DBG_CMD_TM_PORT,
		.dbg_dump = hclge_dbg_dump_tm_port,
	},
2523 2524 2525 2526
	{
		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
		.dbg_dump = hclge_dbg_dump_tc,
	},
2527 2528 2529 2530
	{
		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
		.dbg_dump = hclge_dbg_dump_qos_pause_cfg,
	},
2531 2532 2533 2534
	{
		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
		.dbg_dump = hclge_dbg_dump_qos_pri_map,
	},
2535 2536 2537 2538
	{
		.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
		.dbg_dump = hclge_dbg_dump_qos_dscp_map,
	},
2539 2540 2541 2542
	{
		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
		.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
	},
2543 2544 2545 2546 2547 2548 2549 2550
	{
		.cmd = HNAE3_DBG_CMD_MAC_UC,
		.dbg_dump = hclge_dbg_dump_mac_uc,
	},
	{
		.cmd = HNAE3_DBG_CMD_MAC_MC,
		.dbg_dump = hclge_dbg_dump_mac_mc,
	},
2551 2552 2553 2554
	{
		.cmd = HNAE3_DBG_CMD_MNG_TBL,
		.dbg_dump = hclge_dbg_dump_mng_table,
	},
2555 2556 2557 2558
	{
		.cmd = HNAE3_DBG_CMD_LOOPBACK,
		.dbg_dump = hclge_dbg_dump_loopback,
	},
2559 2560 2561 2562
	{
		.cmd = HNAE3_DBG_CMD_PTP_INFO,
		.dbg_dump = hclge_dbg_dump_ptp_info,
	},
2563 2564 2565 2566
	{
		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
		.dbg_dump = hclge_dbg_dump_interrupt,
	},
2567 2568 2569 2570
	{
		.cmd = HNAE3_DBG_CMD_RESET_INFO,
		.dbg_dump = hclge_dbg_dump_rst_info,
	},
2571 2572 2573 2574
	{
		.cmd = HNAE3_DBG_CMD_IMP_INFO,
		.dbg_dump = hclge_dbg_get_imp_stats_info,
	},
2575 2576 2577 2578
	{
		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
		.dbg_dump = hclge_dbg_dump_ncl_config,
	},
2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
	{
		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_SSU,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_RPU,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_NCSI,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_RTC,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_PPP,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_RCB,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_TQP,
		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
	},
	{
		.cmd = HNAE3_DBG_CMD_REG_MAC,
		.dbg_dump = hclge_dbg_dump_mac,
	},
2619 2620 2621 2622
	{
		.cmd = HNAE3_DBG_CMD_REG_DCB,
		.dbg_dump = hclge_dbg_dump_dcb,
	},
2623 2624 2625 2626
	{
		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
		.dbg_dump = hclge_dbg_dump_mac_tnl_status,
	},
2627 2628 2629 2630
	{
		.cmd = HNAE3_DBG_CMD_FD_TCAM,
		.dbg_dump = hclge_dbg_dump_tcam,
	},
2631 2632 2633 2634
	{
		.cmd = HNAE3_DBG_CMD_SERV_INFO,
		.dbg_dump = hclge_dbg_dump_serv_info,
	},
2635 2636 2637 2638
	{
		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
		.dbg_dump = hclge_dbg_dump_vlan_config,
	},
2639 2640 2641 2642
	{
		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
		.dbg_dump = hclge_dbg_dump_fd_counter,
	},
2643 2644 2645 2646
	{
		.cmd = HNAE3_DBG_CMD_UMV_INFO,
		.dbg_dump = hclge_dbg_dump_umv_info,
	},
2647 2648 2649
};

int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2650 2651 2652
		       char *buf, int len)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
2653
	const struct hclge_dbg_func *cmd_func;
2654
	struct hclge_dev *hdev = vport->back;
2655
	u32 i;
2656

2657
	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2658 2659 2660 2661 2662 2663 2664 2665
		if (cmd == hclge_dbg_cmd_func[i].cmd) {
			cmd_func = &hclge_dbg_cmd_func[i];
			if (cmd_func->dbg_dump)
				return cmd_func->dbg_dump(hdev, buf, len);
			else
				return cmd_func->dbg_dump_reg(hdev, cmd, buf,
							      len);
		}
2666
	}
2667

2668
	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2669 2670
	return -EINVAL;
}