hclge_cmd.c 15.1 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2016-2017 Hisilicon Limited.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24

#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-direction.h>
#include "hclge_cmd.h"
#include "hnae3.h"
#include "hclge_main.h"

#define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)

static int hclge_ring_space(struct hclge_cmq_ring *ring)
{
	int ntu = ring->next_to_use;
	int ntc = ring->next_to_clean;
	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;

	return ring->desc_num - used - 1;
}

25
static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
26
{
27 28
	int ntu = ring->next_to_use;
	int ntc = ring->next_to_clean;
29

30 31
	if (ntu > ntc)
		return head >= ntc && head <= ntu;
32

33
	return head >= ntc || head <= ntu;
34 35
}

36 37 38 39
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
	int size  = ring->desc_num * sizeof(struct hclge_desc);

40 41
	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
					&ring->desc_dma_addr, GFP_KERNEL);
42 43 44 45 46 47 48 49
	if (!ring->desc)
		return -ENOMEM;

	return 0;
}

static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
{
50
	int size  = ring->desc_num * sizeof(struct hclge_desc);
51

52 53 54 55 56
	if (ring->desc) {
		dma_free_coherent(cmq_ring_to_dev(ring), size,
				  ring->desc, ring->desc_dma_addr);
		ring->desc = NULL;
	}
57 58
}

59
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
60 61 62 63 64 65
{
	struct hclge_hw *hw = &hdev->hw;
	struct hclge_cmq_ring *ring =
		(ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
	int ret;

66
	ring->ring_type = ring_type;
67 68 69 70 71 72 73 74 75 76 77 78
	ring->dev = hdev;

	ret = hclge_alloc_cmd_desc(ring);
	if (ret) {
		dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
			(ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
		return ret;
	}

	return 0;
}

L
Lipeng 已提交
79 80 81 82 83 84 85 86 87
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
{
	desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
	if (is_read)
		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
	else
		desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
}

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
				enum hclge_opcode_type opcode, bool is_read)
{
	memset((void *)desc, 0, sizeof(struct hclge_desc));
	desc->opcode = cpu_to_le16(opcode);
	desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);

	if (is_read)
		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
}

static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
{
	dma_addr_t dma = ring->desc_dma_addr;
	struct hclge_dev *hdev = ring->dev;
	struct hclge_hw *hw = &hdev->hw;
104
	u32 reg_val;
105

106
	if (ring->ring_type == HCLGE_TYPE_CSQ) {
107
		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
108
				lower_32_bits(dma));
109
		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
110
				upper_32_bits(dma));
111 112 113 114
		reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
		reg_val &= HCLGE_NIC_SW_RST_RDY;
		reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
		hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
115
		hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
116
		hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
117 118
	} else {
		hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
119
				lower_32_bits(dma));
120
		hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121
				upper_32_bits(dma));
122
		hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123
				ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
124
		hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
125
		hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
126 127 128 129 130 131 132 133 134 135 136
	}
}

static void hclge_cmd_init_regs(struct hclge_hw *hw)
{
	hclge_cmd_config_regs(&hw->cmq.csq);
	hclge_cmd_config_regs(&hw->cmq.crq);
}

static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
137
	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
138 139
	struct hclge_cmq_ring *csq = &hw->cmq.csq;
	u32 head;
140
	int clean;
141 142

	head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
143 144 145
	rmb(); /* Make sure head is ready before touch any data */

	if (!is_valid_csq_clean_head(csq, head)) {
146
		dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
147
			 csq->next_to_use, csq->next_to_clean);
148 149 150 151 152 153
		dev_warn(&hdev->pdev->dev,
			 "Disabling any further commands to IMP firmware\n");
		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
		dev_warn(&hdev->pdev->dev,
			 "IMP firmware watchdog reset soon expected!\n");
		return -EIO;
154
	}
155

156 157
	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
	csq->next_to_clean = head;
158 159 160 161 162 163 164 165 166 167 168
	return clean;
}

static int hclge_cmd_csq_done(struct hclge_hw *hw)
{
	u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
	return head == hw->cmq.csq.next_to_use;
}

static bool hclge_is_special_opcode(u16 opcode)
{
169 170 171
	/* these commands have several descriptors,
	 * and use the first one to save opcode and return value
	 */
172 173 174 175 176
	u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
			     HCLGE_OPC_STATS_32_BIT,
			     HCLGE_OPC_STATS_MAC,
			     HCLGE_OPC_STATS_MAC_ALL,
			     HCLGE_OPC_QUERY_32_BIT_REG,
177 178 179 180 181
			     HCLGE_OPC_QUERY_64_BIT_REG,
			     HCLGE_QUERY_CLEAR_MPF_RAS_INT,
			     HCLGE_QUERY_CLEAR_PF_RAS_INT,
			     HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
			     HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
182 183 184 185 186 187 188 189 190 191
	int i;

	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
		if (spec_opcode[i] == opcode)
			return true;
	}

	return false;
}

192 193 194 195 196
struct errcode {
	u32 imp_errcode;
	int common_errno;
};

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
				int num)
{
	struct hclge_desc *desc_to_use;
	int handle = 0;

	while (handle < num) {
		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
		*desc_to_use = desc[handle];
		(hw->cmq.csq.next_to_use)++;
		if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
			hw->cmq.csq.next_to_use = 0;
		handle++;
	}
}

P
Peng Li 已提交
213 214
static int hclge_cmd_convert_err_code(u16 desc_ret)
{
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	struct errcode hclge_cmd_errcode[] = {
		{HCLGE_CMD_EXEC_SUCCESS, 0},
		{HCLGE_CMD_NO_AUTH, -EPERM},
		{HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
		{HCLGE_CMD_QUEUE_FULL, -EXFULL},
		{HCLGE_CMD_NEXT_ERR, -ENOSR},
		{HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
		{HCLGE_CMD_PARA_ERR, -EINVAL},
		{HCLGE_CMD_RESULT_ERR, -ERANGE},
		{HCLGE_CMD_TIMEOUT, -ETIME},
		{HCLGE_CMD_HILINK_ERR, -ENOLINK},
		{HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
		{HCLGE_CMD_INVALID, -EBADR},
	};
	u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
	u32 i;

	for (i = 0; i < errcode_count; i++)
		if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
			return hclge_cmd_errcode[i].common_errno;

	return -EIO;
P
Peng Li 已提交
237 238
}

239 240 241 242 243 244 245 246 247 248
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
				  int num, int ntc)
{
	u16 opcode, desc_ret;
	int handle;

	opcode = le16_to_cpu(desc[0].opcode);
	for (handle = 0; handle < num; handle++) {
		desc[handle] = hw->cmq.csq.desc[ntc];
		ntc++;
249
		if (ntc >= hw->cmq.csq.desc_num)
250 251 252 253 254 255 256 257 258
			ntc = 0;
	}
	if (likely(!hclge_is_special_opcode(opcode)))
		desc_ret = le16_to_cpu(desc[num - 1].retval);
	else
		desc_ret = le16_to_cpu(desc[0].retval);

	hw->cmq.last_status = desc_ret;

P
Peng Li 已提交
259
	return hclge_cmd_convert_err_code(desc_ret);
260 261
}

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
				  int num, int ntc)
{
	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
	bool is_completed = false;
	u32 timeout = 0;
	int handle, ret;

	/**
	 * If the command is sync, wait for the firmware to write back,
	 * if multi descriptors to be sent, use the first one to check
	 */
	if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
		do {
			if (hclge_cmd_csq_done(hw)) {
				is_completed = true;
				break;
			}
			udelay(1);
			timeout++;
		} while (timeout < hw->cmq.tx_timeout);
	}

	if (!is_completed)
		ret = -EBADE;
	else
		ret = hclge_cmd_check_retval(hw, desc, num, ntc);

	/* Clean the command send queue */
	handle = hclge_cmd_csq_clean(hw);
	if (handle < 0)
		ret = handle;
	else if (handle != num)
		dev_warn(&hdev->pdev->dev,
			 "cleaned %d, need to clean %d\n", handle, num);
	return ret;
}

300 301 302 303 304 305 306 307 308 309 310
/**
 * hclge_cmd_send - send command to command queue
 * @hw: pointer to the hw struct
 * @desc: prefilled descriptor for describing the command
 * @num : the number of descriptors to be sent
 *
 * This is the main send command for command queue, it
 * sends the queue, cleans the queue, etc
 **/
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
311
	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
312
	struct hclge_cmq_ring *csq = &hw->cmq.csq;
313
	int ret;
314 315 316 317
	int ntc;

	spin_lock_bh(&hw->cmq.csq.lock);

318 319 320 321 322 323 324 325 326 327
	if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
		spin_unlock_bh(&hw->cmq.csq.lock);
		return -EBUSY;
	}

	if (num > hclge_ring_space(&hw->cmq.csq)) {
		/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
		 * need update the SW HEAD pointer csq->next_to_clean
		 */
		csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
328 329 330 331 332 333 334 335 336
		spin_unlock_bh(&hw->cmq.csq.lock);
		return -EBUSY;
	}

	/**
	 * Record the location of desc in the ring for this time
	 * which will be use for hardware to write back
	 */
	ntc = hw->cmq.csq.next_to_use;
337 338

	hclge_cmd_copy_desc(hw, desc, num);
339 340 341 342

	/* Write to hardware */
	hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);

343
	ret = hclge_cmd_check_result(hw, desc, num, ntc);
344 345 346

	spin_unlock_bh(&hw->cmq.csq.lock);

347
	return ret;
348 349
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
static void hclge_set_default_capability(struct hclge_dev *hdev)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);

	set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
	set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
	set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
}

static void hclge_parse_capability(struct hclge_dev *hdev,
				   struct hclge_query_version_cmd *cmd)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
	u32 caps;

	caps = __le32_to_cpu(cmd->caps[0]);

	if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
		set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
	if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
		set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
	if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
		set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
	if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
		set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
375 376
	if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B))
		set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
377 378
	if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
		set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
379 380
	if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B))
		set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
381 382
}

383 384 385 386 387 388 389 390 391
static __le32 hclge_build_api_caps(void)
{
	u32 api_caps = 0;

	hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);

	return cpu_to_le32(api_caps);
}

392 393
static enum hclge_cmd_status
hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
394
{
395
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
396
	struct hclge_query_version_cmd *resp;
397 398 399 400
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
401
	resp = (struct hclge_query_version_cmd *)desc.data;
402
	resp->api_caps = hclge_build_api_caps();
403

404 405 406 407 408 409 410 411 412
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		return ret;

	hdev->fw_version = le32_to_cpu(resp->firmware);

	ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
					 HNAE3_PCI_REVISION_BIT_SIZE;
	ae_dev->dev_version |= hdev->pdev->revision;
413

414 415 416 417
	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
		hclge_set_default_capability(hdev);

	hclge_parse_capability(hdev, resp);
418

419 420 421
	return ret;
}

422
int hclge_cmd_queue_init(struct hclge_dev *hdev)
423 424 425
{
	int ret;

426 427 428 429
	/* Setup the lock for command queue */
	spin_lock_init(&hdev->hw.cmq.csq.lock);
	spin_lock_init(&hdev->hw.cmq.crq.lock);

430 431 432 433 434 435 436 437
	/* Setup the queue entries for use cmd queue */
	hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
	hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;

	/* Setup Tx write back timeout */
	hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;

	/* Setup queue rings */
438
	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
439 440 441 442 443 444
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"CSQ ring setup error %d\n", ret);
		return ret;
	}

445
	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
446 447 448 449 450 451
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"CRQ ring setup error %d\n", ret);
		goto err_csq;
	}

452 453 454 455 456 457
	return 0;
err_csq:
	hclge_free_cmd_desc(&hdev->hw.cmq.csq);
	return ret;
}

458 459 460 461 462 463 464 465 466 467 468
static int hclge_firmware_compat_config(struct hclge_dev *hdev)
{
	struct hclge_firmware_compat_cmd *req;
	struct hclge_desc desc;
	u32 compat = 0;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);

	req = (struct hclge_firmware_compat_cmd *)desc.data;

	hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
469
	hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
470 471 472 473 474
	req->compat = cpu_to_le32(compat);

	return hclge_cmd_send(&hdev->hw, &desc, 1);
}

475 476 477 478
int hclge_cmd_init(struct hclge_dev *hdev)
{
	int ret;

479
	spin_lock_bh(&hdev->hw.cmq.csq.lock);
480
	spin_lock(&hdev->hw.cmq.crq.lock);
481

482 483 484 485 486
	hdev->hw.cmq.csq.next_to_clean = 0;
	hdev->hw.cmq.csq.next_to_use = 0;
	hdev->hw.cmq.crq.next_to_clean = 0;
	hdev->hw.cmq.crq.next_to_use = 0;

487 488
	hclge_cmd_init_regs(&hdev->hw);

489
	spin_unlock(&hdev->hw.cmq.crq.lock);
490 491
	spin_unlock_bh(&hdev->hw.cmq.csq.lock);

492 493 494 495 496 497
	clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);

	/* Check if there is new reset pending, because the higher level
	 * reset may happen when lower level reset is being processed.
	 */
	if ((hclge_is_reset_pending(hdev))) {
498 499 500
		dev_err(&hdev->pdev->dev,
			"failed to init cmd since reset %#lx pending\n",
			hdev->reset_pending);
501 502
		ret = -EBUSY;
		goto err_cmd_init;
503 504
	}

505 506
	/* get version and device capabilities */
	ret = hclge_cmd_query_version_and_capability(hdev);
507 508
	if (ret) {
		dev_err(&hdev->pdev->dev,
509 510
			"failed to query version and capabilities, ret = %d\n",
			ret);
511
		goto err_cmd_init;
512 513
	}

514
	dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
515
		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
516
				 HNAE3_FW_VERSION_BYTE3_SHIFT),
517
		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
518
				 HNAE3_FW_VERSION_BYTE2_SHIFT),
519
		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
520
				 HNAE3_FW_VERSION_BYTE1_SHIFT),
521
		 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
522
				 HNAE3_FW_VERSION_BYTE0_SHIFT));
523

524 525 526 527 528 529 530 531 532
	/* ask the firmware to enable some features, driver can work without
	 * it.
	 */
	ret = hclge_firmware_compat_config(hdev);
	if (ret)
		dev_warn(&hdev->pdev->dev,
			 "Firmware compatible features not enabled(%d).\n",
			 ret);

533
	return 0;
534 535 536 537 538

err_cmd_init:
	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);

	return ret;
539 540
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
{
	hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
	hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}

void hclge_cmd_uninit(struct hclge_dev *hdev)
{
	spin_lock_bh(&hdev->hw.cmq.csq.lock);
	spin_lock(&hdev->hw.cmq.crq.lock);
	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
	hclge_cmd_uninit_regs(&hdev->hw);
	spin_unlock(&hdev->hw.cmq.crq.lock);
	spin_unlock_bh(&hdev->hw.cmq.csq.lock);

564 565
	hclge_free_cmd_desc(&hdev->hw.cmq.csq);
	hclge_free_cmd_desc(&hdev->hw.cmq.crq);
566
}