hclge_debugfs.c 33.6 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 Hisilicon Limited. */

#include <linux/device.h>

6
#include "hclge_debugfs.h"
7 8
#include "hclge_cmd.h"
#include "hclge_main.h"
9
#include "hclge_tm.h"
10 11
#include "hnae3.h"

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
	struct hclge_desc desc[4];
	int ret;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);

	ret = hclge_cmd_send(&hdev->hw, desc, 4);
	if (ret != HCLGE_CMD_EXEC_SUCCESS) {
		dev_err(&hdev->pdev->dev,
			"get dfx bdnum fail, status is %d.\n", ret);
		return ret;
	}

	return (int)desc[offset / 6].data[offset % 6];
}

static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
			      struct hclge_desc *desc_src,
			      int index, int bd_num,
			      enum hclge_opcode_type cmd)
{
	struct hclge_desc *desc = desc_src;
	int ret, i;

	hclge_cmd_setup_basic_desc(desc, cmd, true);
	desc->data[0] = cpu_to_le32(index);

	for (i = 1; i < bd_num; i++) {
		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
		desc++;
		hclge_cmd_setup_basic_desc(desc, cmd, true);
	}

	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"read reg cmd send fail, status is %d.\n", ret);
		return ret;
	}

	return ret;
}

static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
				      struct hclge_dbg_dfx_message *dfx_message,
				      char *cmd_buf, int msg_num, int offset,
				      enum hclge_opcode_type cmd)
{
67 68
#define BD_DATA_NUM       6

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	struct hclge_desc *desc_src;
	struct hclge_desc *desc;
	int bd_num, buf_len;
	int ret, i;
	int index;
	int max;

	ret = kstrtouint(cmd_buf, 10, &index);
	index = (ret != 0) ? 0 : index;

	bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
	if (bd_num <= 0)
		return;

	buf_len	 = sizeof(struct hclge_desc) * bd_num;
	desc_src = kzalloc(buf_len, GFP_KERNEL);
	if (!desc_src) {
		dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
		return;
	}

	desc = desc_src;
	ret  = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd);
	if (ret != HCLGE_CMD_EXEC_SUCCESS) {
		kfree(desc_src);
		return;
	}

97 98
	max = (bd_num * BD_DATA_NUM) <= msg_num ?
		(bd_num * BD_DATA_NUM) : msg_num;
99 100 101

	desc = desc_src;
	for (i = 0; i < max; i++) {
102
		((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
103 104
		if (dfx_message->flag)
			dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
105 106
				 dfx_message->message,
				 desc->data[i % BD_DATA_NUM]);
107 108 109 110 111 112 113

		dfx_message++;
	}

	kfree(desc_src);
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
{
	struct device *dev = &hdev->pdev->dev;
	struct hclge_dbg_bitmap_cmd *bitmap;
	int rq_id, pri_id, qset_id;
	int port_id, nq_id, pg_id;
	struct hclge_desc desc[2];

	int cnt, ret;

	cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
		     &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
	if (cnt != 6) {
		dev_err(&hdev->pdev->dev,
			"dump dcb: bad command parameter, cnt=%d\n", cnt);
		return;
	}

	ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
				 HCLGE_OPC_QSET_DFX_STS);
	if (ret)
		return;

	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
	dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
	dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
	dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
	dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);

	ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
	if (ret)
		return;

	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
	dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
	dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
	dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);

	ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
	if (ret)
		return;

	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
	dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
	dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
	dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);

	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
				 HCLGE_OPC_PORT_DFX_STS);
	if (ret)
		return;

	bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
	dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
	dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);

	ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
	if (ret)
		return;

	dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);

	ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
	if (ret)
		return;

	dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);

	ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
	if (ret)
		return;

	dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
	dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
	dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
	dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
	dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
	dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
	dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);

	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
				 HCLGE_OPC_TM_INTERNAL_CNT);
	if (ret)
		return;

	dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
	dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);

	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
				 HCLGE_OPC_TM_INTERNAL_STS_1);
	if (ret)
		return;

	dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
	dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
	dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
	dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
	dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
{
	int msg_num;

	if (strncmp(&cmd_buf[9], "bios common", 11) == 0) {
		msg_num = sizeof(hclge_dbg_bios_common_reg) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
					  &cmd_buf[21], msg_num,
					  HCLGE_DBG_DFX_BIOS_OFFSET,
					  HCLGE_OPC_DFX_BIOS_COMMON_REG);
	} else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) {
		msg_num = sizeof(hclge_dbg_ssu_reg_0) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_SSU_0_OFFSET,
					  HCLGE_OPC_DFX_SSU_REG_0);

		msg_num = sizeof(hclge_dbg_ssu_reg_1) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_SSU_1_OFFSET,
					  HCLGE_OPC_DFX_SSU_REG_1);

		msg_num = sizeof(hclge_dbg_ssu_reg_2) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_SSU_2_OFFSET,
					  HCLGE_OPC_DFX_SSU_REG_2);
	} else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) {
		msg_num = sizeof(hclge_dbg_igu_egu_reg) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
					  &cmd_buf[17], msg_num,
					  HCLGE_DBG_DFX_IGU_OFFSET,
					  HCLGE_OPC_DFX_IGU_EGU_REG);
	} else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) {
		msg_num = sizeof(hclge_dbg_rpu_reg_0) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_RPU_0_OFFSET,
					  HCLGE_OPC_DFX_RPU_REG_0);

		msg_num = sizeof(hclge_dbg_rpu_reg_1) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_RPU_1_OFFSET,
					  HCLGE_OPC_DFX_RPU_REG_1);
	} else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) {
		msg_num = sizeof(hclge_dbg_ncsi_reg) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
					  &cmd_buf[14], msg_num,
					  HCLGE_DBG_DFX_NCSI_OFFSET,
					  HCLGE_OPC_DFX_NCSI_REG);
	} else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) {
		msg_num = sizeof(hclge_dbg_rtc_reg) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_RTC_OFFSET,
					  HCLGE_OPC_DFX_RTC_REG);
	} else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) {
		msg_num = sizeof(hclge_dbg_ppp_reg) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_PPP_OFFSET,
					  HCLGE_OPC_DFX_PPP_REG);
	} else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) {
		msg_num = sizeof(hclge_dbg_rcb_reg) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_RCB_OFFSET,
					  HCLGE_OPC_DFX_RCB_REG);
	} else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) {
		msg_num = sizeof(hclge_dbg_tqp_reg) /
			  sizeof(struct hclge_dbg_dfx_message);
		hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
					  &cmd_buf[13], msg_num,
					  HCLGE_DBG_DFX_TQP_OFFSET,
					  HCLGE_OPC_DFX_TQP_REG);
302 303
	} else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) {
		hclge_dbg_dump_dcb(hdev, &cmd_buf[13]);
304 305 306 307 308 309
	} else {
		dev_info(&hdev->pdev->dev, "unknown command\n");
		return;
	}
}

310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
				  char *title_buf, char *true_buf,
				  char *false_buf)
{
	if (flag)
		dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
			 true_buf);
	else
		dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
			 false_buf);
}

static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
{
	struct hclge_ets_tc_weight_cmd *ets_weight;
	struct hclge_desc desc;
	int i, ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
		return;
	}

	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;

	dev_info(&hdev->pdev->dev, "dump tc\n");
	dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
		 ets_weight->weight_offset);

	for (i = 0; i < HNAE3_MAX_TC; i++)
		hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
				      "tc", "no sp mode", "sp mode");
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
{
	struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
	struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
	enum hclge_opcode_type cmd;
	struct hclge_desc desc;
	int ret;

	cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_pg_cmd_send;

	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
	dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
		 pg_shap_cfg_cmd->pg_shapping_para);

	cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_pg_cmd_send;

	pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
	dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
		 pg_shap_cfg_cmd->pg_shapping_para);

	cmd = HCLGE_OPC_TM_PORT_SHAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_pg_cmd_send;

	port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
		 port_shap_cfg_cmd->port_shapping_para);

	cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_pg_cmd_send;

	dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);

	cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_pg_cmd_send;

402
	dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
403 404 405 406 407 408 409

	cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_pg_cmd_send;

410
	dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
411 412 413 414 415 416 417 418

	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_pg_cmd_send;

	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
419
	dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
420
		 bp_to_qs_map_cmd->tc_id);
421
	dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
		 bp_to_qs_map_cmd->qs_group_id);
	dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
		 bp_to_qs_map_cmd->qs_bit_map);
	return;

err_tm_pg_cmd_send:
	dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
		cmd, ret);
}

static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
{
	struct hclge_priority_weight_cmd *priority_weight;
	struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
	struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
	struct hclge_pg_weight_cmd *pg_weight;
	struct hclge_qs_weight_cmd *qs_weight;
	enum hclge_opcode_type cmd;
	struct hclge_desc desc;
	int ret;

	cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "dump tm\n");
	dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
		 pg_to_pri_map->pg_id);
	dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
		 pg_to_pri_map->pri_bit_map);

	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
		 qs_to_pri_map->qs_id);
	dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
		 qs_to_pri_map->priority);
	dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
		 qs_to_pri_map->link_vld);

	cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
480
	dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
		 nq_to_qs_map->qset_id);

	cmd = HCLGE_OPC_TM_PG_WEIGHT;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
	dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);

	cmd = HCLGE_OPC_TM_QS_WEIGHT;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
	dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);

	cmd = HCLGE_OPC_TM_PRI_WEIGHT;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
	dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);

	cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
	dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
		 shap_cfg_cmd->pri_shapping_para);

	cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_cmd_send;

	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
	dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
		 shap_cfg_cmd->pri_shapping_para);

	hclge_dbg_dump_tm_pg(hdev);

	return;

err_tm_cmd_send:
	dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
		cmd, ret);
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
{
	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
	struct hclge_qs_to_pri_link_cmd *map;
	struct hclge_tqp_tx_queue_tc_cmd *tc;
	enum hclge_opcode_type cmd;
	struct hclge_desc desc;
	int queue_id, group_id;
	u32 qset_maping[32];
	int tc_id, qset_id;
	int pri_id, ret;
	u32 i;

	ret = kstrtouint(&cmd_buf[12], 10, &queue_id);
	queue_id = (ret != 0) ? 0 : queue_id;

	cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
	nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_map_cmd_send;
	qset_id = nq_to_qs_map->qset_id & 0x3FF;

	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	map->qs_id = cpu_to_le16(qset_id);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_map_cmd_send;
	pri_id = map->priority;

	cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
	tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
	tc->queue_id = cpu_to_le16(queue_id);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_map_cmd_send;
	tc_id = tc->tc_id & 0x7;

	dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
	dev_info(&hdev->pdev->dev, "%04d     | %04d    | %02d     | %02d\n",
		 queue_id, qset_id, pri_id, tc_id);

	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
	for (group_id = 0; group_id < 32; group_id++) {
		hclge_cmd_setup_basic_desc(&desc, cmd, true);
		bp_to_qs_map_cmd->tc_id = tc_id;
		bp_to_qs_map_cmd->qs_group_id = group_id;
		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret)
			goto err_tm_map_cmd_send;

		qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
	}

	dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");

	i = 0;
	for (group_id = 0; group_id < 4; group_id++) {
		dev_info(&hdev->pdev->dev,
			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
			 group_id * 256, qset_maping[(u32)(i + 7)],
			 qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
			 qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
			 qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
			 qset_maping[i]);
		i += 8;
	}

	return;

err_tm_map_cmd_send:
	dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n",
		cmd, ret);
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
{
	struct hclge_cfg_pause_param_cmd *pause_param;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
			ret);
		return;
	}

	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
	dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
		 pause_param->pause_trans_gap);
	dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
		 pause_param->pause_trans_time);
}

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
{
	struct hclge_qos_pri_map_cmd *pri_map;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"dump qos pri map fail, status is %d.\n", ret);
		return;
	}

	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "dump qos pri map\n");
	dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
	dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
	dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
	dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
	dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
	dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
	dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
	dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
	dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
{
	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
	struct hclge_rx_priv_wl_buf *rx_priv_wl;
	struct hclge_rx_com_wl *rx_packet_cnt;
	struct hclge_rx_com_thrd *rx_com_thrd;
	struct hclge_rx_com_wl *rx_com_wl;
	enum hclge_opcode_type cmd;
	struct hclge_desc desc[2];
	int i, ret;

	cmd = HCLGE_OPC_TX_BUFF_ALLOC;
	hclge_cmd_setup_basic_desc(desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, desc, 1);
	if (ret)
		goto err_qos_cmd_send;

	dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");

	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
698
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
699 700 701 702 703 704 705 706 707 708 709
		dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
			 tx_buf_cmd->tx_pkt_buff[i]);

	cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
	hclge_cmd_setup_basic_desc(desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, desc, 1);
	if (ret)
		goto err_qos_cmd_send;

	dev_info(&hdev->pdev->dev, "\n");
	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
710
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
		dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
			 rx_buf_cmd->buf_num[i]);

	dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
		 rx_buf_cmd->shared_buf);

	cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
	ret = hclge_cmd_send(&hdev->hw, desc, 2);
	if (ret)
		goto err_qos_cmd_send;

	dev_info(&hdev->pdev->dev, "\n");
	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
		dev_info(&hdev->pdev->dev,
			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
			 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);

	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
		dev_info(&hdev->pdev->dev,
			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
			 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);

	cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
	hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
	ret = hclge_cmd_send(&hdev->hw, desc, 2);
	if (ret)
		goto err_qos_cmd_send;

	dev_info(&hdev->pdev->dev, "\n");
	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
		dev_info(&hdev->pdev->dev,
			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
			 rx_com_thrd->com_thrd[i].high,
			 rx_com_thrd->com_thrd[i].low);

	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
		dev_info(&hdev->pdev->dev,
			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
			 rx_com_thrd->com_thrd[i].high,
			 rx_com_thrd->com_thrd[i].low);

	cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
	hclge_cmd_setup_basic_desc(desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, desc, 1);
	if (ret)
		goto err_qos_cmd_send;

	rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
	dev_info(&hdev->pdev->dev, "\n");
	dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
		 rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);

	cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
	hclge_cmd_setup_basic_desc(desc, cmd, true);
	ret = hclge_cmd_send(&hdev->hw, desc, 1);
	if (ret)
		goto err_qos_cmd_send;

	rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
	dev_info(&hdev->pdev->dev,
		 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
		 rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);

	return;

err_qos_cmd_send:
	dev_err(&hdev->pdev->dev,
		"dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
{
	struct hclge_mac_ethertype_idx_rd_cmd *req0;
	char printf_buf[HCLGE_DBG_BUF_LEN];
	struct hclge_desc desc;
	int ret, i;

	dev_info(&hdev->pdev->dev, "mng tab:\n");
	memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
	strncat(printf_buf,
		"entry|mac_addr         |mask|ether|mask|vlan|mask",
		HCLGE_DBG_BUF_LEN - 1);
	strncat(printf_buf + strlen(printf_buf),
		"|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
		HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);

	dev_info(&hdev->pdev->dev, "%s", printf_buf);

	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
					   true);
		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
		req0->index = cpu_to_le16(i);

		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"call hclge_cmd_send fail, ret = %d\n", ret);
			return;
		}

		if (!req0->resp_code)
			continue;

		memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
		snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
			 "%02u   |%02x:%02x:%02x:%02x:%02x:%02x|",
			 req0->index, req0->mac_add[0], req0->mac_add[1],
			 req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
			 req0->mac_add[5]);

		snprintf(printf_buf + strlen(printf_buf),
			 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
			 "%x   |%04x |%x   |%04x|%x   |%02x   |%02x   |",
			 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
			 req0->ethter_type,
			 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
			 req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
			 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
			 req0->i_port_bitmap, req0->i_port_direction);

		snprintf(printf_buf + strlen(printf_buf),
			 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
			 "%d     |%d    |%02d   |%04d|%x\n",
			 !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
			 req0->egress_port & HCLGE_DBG_MNG_PF_ID,
			 (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
			 req0->egress_queue,
			 !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));

		dev_info(&hdev->pdev->dev, "%s", printf_buf);
	}
}

854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
				   bool sel_x, u32 loc)
{
	struct hclge_fd_tcam_config_1_cmd *req1;
	struct hclge_fd_tcam_config_2_cmd *req2;
	struct hclge_fd_tcam_config_3_cmd *req3;
	struct hclge_desc desc[3];
	int ret, i;
	u32 *req;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);

	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;

	req1->stage  = stage;
	req1->xy_sel = sel_x ? 1 : 0;
	req1->index  = cpu_to_le32(loc);

	ret = hclge_cmd_send(&hdev->hw, desc, 3);
	if (ret)
		return;

	dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
		 sel_x ? "x" : "y", loc);

	req = (u32 *)req1->tcam_data;
	for (i = 0; i < 2; i++)
		dev_info(&hdev->pdev->dev, "%08x\n", *req++);

	req = (u32 *)req2->tcam_data;
	for (i = 0; i < 6; i++)
		dev_info(&hdev->pdev->dev, "%08x\n", *req++);

	req = (u32 *)req3->tcam_data;
	for (i = 0; i < 5; i++)
		dev_info(&hdev->pdev->dev, "%08x\n", *req++);
}

static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
{
	u32 i;

	for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
		hclge_dbg_fd_tcam_read(hdev, 0, true, i);
		hclge_dbg_fd_tcam_read(hdev, 0, false, i);
	}
}

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
{
	dev_info(&hdev->pdev->dev, "PF reset count: %d\n",
		 hdev->rst_stats.pf_rst_cnt);
	dev_info(&hdev->pdev->dev, "FLR reset count: %d\n",
		 hdev->rst_stats.flr_rst_cnt);
	dev_info(&hdev->pdev->dev, "CORE reset count: %d\n",
		 hdev->rst_stats.core_rst_cnt);
	dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n",
		 hdev->rst_stats.global_rst_cnt);
	dev_info(&hdev->pdev->dev, "IMP reset count: %d\n",
		 hdev->rst_stats.imp_rst_cnt);
	dev_info(&hdev->pdev->dev, "reset done count: %d\n",
		 hdev->rst_stats.reset_done_cnt);
	dev_info(&hdev->pdev->dev, "HW reset done count: %d\n",
		 hdev->rst_stats.hw_reset_done_cnt);
	dev_info(&hdev->pdev->dev, "reset count: %d\n",
		 hdev->rst_stats.reset_cnt);
}

928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
{
	struct hclge_desc *desc_src, *desc_tmp;
	struct hclge_get_m7_bd_cmd *req;
	struct hclge_desc desc;
	u32 bd_num, buf_len;
	int ret, i;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);

	req = (struct hclge_get_m7_bd_cmd *)desc.data;
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"get firmware statistics bd number failed, ret=%d\n",
			ret);
		return;
	}

	bd_num = le32_to_cpu(req->bd_num);

	buf_len	 = sizeof(struct hclge_desc) * bd_num;
	desc_src = kzalloc(buf_len, GFP_KERNEL);
	if (!desc_src) {
		dev_err(&hdev->pdev->dev,
			"allocate desc for get_m7_stats failed\n");
		return;
	}

	desc_tmp = desc_src;
	ret  = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
				  HCLGE_OPC_M7_STATS_INFO);
	if (ret) {
		kfree(desc_src);
		dev_err(&hdev->pdev->dev,
			"get firmware statistics failed, ret=%d\n", ret);
		return;
	}

	for (i = 0; i < bd_num; i++) {
		dev_info(&hdev->pdev->dev, "0x%08x  0x%08x  0x%08x\n",
			 le32_to_cpu(desc_tmp->data[0]),
			 le32_to_cpu(desc_tmp->data[1]),
			 le32_to_cpu(desc_tmp->data[2]));
		dev_info(&hdev->pdev->dev, "0x%08x  0x%08x  0x%08x\n",
			 le32_to_cpu(desc_tmp->data[3]),
			 le32_to_cpu(desc_tmp->data[4]),
			 le32_to_cpu(desc_tmp->data[5]));

		desc_tmp++;
	}

	kfree(desc_src);
}

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
 * @hdev: pointer to struct hclge_dev
 * @cmd_buf: string that contains offset and length
 */
static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET	4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH	(20 + 24 * 4)
#define HCLGE_CMD_DATA_NUM		6

	struct hclge_desc desc[5];
	u32 byte_offset;
	int bd_num = 5;
	int offset;
	int length;
	int data0;
	int ret;
	int i;
	int j;

	ret = sscanf(cmd_buf, "%x %x", &offset, &length);
	if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
	    length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
		dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
		return;
	}
	if (offset < 0 || length <= 0) {
		dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
		return;
	}

	dev_info(&hdev->pdev->dev, "offset |    data\n");

	while (length > 0) {
		data0 = offset;
		if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
			data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
		else
			data0 |= length << 16;
		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
					 HCLGE_OPC_QUERY_NCL_CONFIG);
		if (ret)
			return;

		byte_offset = offset;
		for (i = 0; i < bd_num; i++) {
			for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
				if (i == 0 && j == 0)
					continue;

				dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
					 byte_offset,
					 le32_to_cpu(desc[i].data[j]));
				byte_offset += sizeof(u32);
				length -= sizeof(u32);
				if (length <= 0)
					return;
			}
		}
		offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
	}
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
 * @hdev: pointer to struct hclge_dev
 */
static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
{
#define HCLGE_BILLION_NANO_SECONDS 1000000000

	struct hclge_mac_tnl_stats stats;
	unsigned long rem_nsec;

	dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");

	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1060
		dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
1061 1062 1063 1064 1065
			 (unsigned long)stats.time, rem_nsec / 1000,
			 stats.status);
	}
}

1066 1067 1068 1069 1070 1071 1072
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
		hclge_dbg_fd_tcam(hdev);
1073 1074
	} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
		hclge_dbg_dump_tc(hdev);
1075 1076
	} else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
		hclge_dbg_dump_tm_map(hdev, cmd_buf);
1077 1078
	} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
		hclge_dbg_dump_tm(hdev);
1079 1080
	} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
		hclge_dbg_dump_qos_pause_cfg(hdev);
1081 1082
	} else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
		hclge_dbg_dump_qos_pri_map(hdev);
1083 1084
	} else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
		hclge_dbg_dump_qos_buf_cfg(hdev);
1085 1086
	} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
		hclge_dbg_dump_mng_table(hdev);
1087 1088
	} else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
		hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
1089 1090
	} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
		hclge_dbg_dump_rst_info(hdev);
1091 1092
	} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
		hclge_dbg_get_m7_stats_info(hdev);
1093 1094 1095
	} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
		hclge_dbg_dump_ncl_config(hdev,
					  &cmd_buf[sizeof("dump ncl_config")]);
1096 1097
	} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
		hclge_dbg_dump_mac_tnl_status(hdev);
1098 1099 1100 1101 1102 1103 1104
	} else {
		dev_info(&hdev->pdev->dev, "unknown command\n");
		return -EINVAL;
	}

	return 0;
}