hpre_main.c 27.6 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
6
#include <linux/debugfs.h>
7 8 9 10 11 12
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/topology.h>
13
#include <linux/uacce.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#include "hpre.h"

#define HPRE_QUEUE_NUM_V2		1024
#define HPRE_QM_ABNML_INT_MASK		0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT	BIT(0)
#define HPRE_COMM_CNT_CLR_CE		0x0
#define HPRE_CTRL_CNT_CLR_CE		0x301000
#define HPRE_FSM_MAX_CNT		0x301008
#define HPRE_VFG_AXQOS			0x30100c
#define HPRE_VFG_AXCACHE		0x301010
#define HPRE_RDCHN_INI_CFG		0x301014
#define HPRE_AWUSR_FP_CFG		0x301018
#define HPRE_BD_ENDIAN			0x301020
#define HPRE_ECC_BYPASS			0x301024
#define HPRE_RAS_WIDTH_CFG		0x301028
#define HPRE_POISON_BYPASS		0x30102c
#define HPRE_BD_ARUSR_CFG		0x301030
#define HPRE_BD_AWUSR_CFG		0x301034
#define HPRE_TYPES_ENB			0x301038
33 34
#define HPRE_RSA_ENB			BIT(0)
#define HPRE_ECC_ENB			BIT(1)
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
#define HPRE_DATA_RUSER_CFG		0x30103c
#define HPRE_DATA_WUSER_CFG		0x301040
#define HPRE_INT_MASK			0x301400
#define HPRE_INT_STATUS			0x301800
#define HPRE_CORE_INT_ENABLE		0
#define HPRE_CORE_INT_DISABLE		0x003fffff
#define HPRE_RDCHN_INI_ST		0x301a00
#define HPRE_CLSTR_BASE			0x302000
#define HPRE_CORE_EN_OFFSET		0x04
#define HPRE_CORE_INI_CFG_OFFSET	0x20
#define HPRE_CORE_INI_STATUS_OFFSET	0x80
#define HPRE_CORE_HTBT_WARN_OFFSET	0x8c
#define HPRE_CORE_IS_SCHD_OFFSET	0x90

#define HPRE_RAS_CE_ENB			0x301410
50
#define HPRE_HAC_RAS_CE_ENABLE		(BIT(0) | BIT(22) | BIT(23))
51
#define HPRE_RAS_NFE_ENB		0x301414
52
#define HPRE_HAC_RAS_NFE_ENABLE		0x3ffffe
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#define HPRE_RAS_FE_ENB			0x301418
#define HPRE_HAC_RAS_FE_ENABLE		0

#define HPRE_CORE_ENB		(HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
#define HPRE_CORE_INI_CFG	(HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
#define HPRE_HAC_ECC1_CNT		0x301a04
#define HPRE_HAC_ECC2_CNT		0x301a08
#define HPRE_HAC_INT_STATUS		0x301800
#define HPRE_HAC_SOURCE_INT		0x301600
#define HPRE_CLSTR_ADDR_INTRVL		0x1000
#define HPRE_CLUSTER_INQURY		0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT	0x104
#define HPRE_TIMEOUT_ABNML_BIT		6
#define HPRE_PASID_EN_BIT		9
#define HPRE_REG_RD_INTVRL_US		10
#define HPRE_REG_RD_TMOUT_US		1000
#define HPRE_DBGFS_VAL_MAX_LEN		20
#define HPRE_PCI_DEVICE_ID		0xa258
72
#define HPRE_PCI_VF_DEVICE_ID		0xa259
73
#define HPRE_ADDR(qm, offset)		((qm)->io_base + (offset))
74 75 76 77
#define HPRE_QM_USR_CFG_MASK		0xfffffffe
#define HPRE_QM_AXI_CFG_MASK		0xffff
#define HPRE_QM_VFG_AX_MASK		0xff
#define HPRE_BD_USR_MASK		0x3
78 79
#define HPRE_CLUSTER_CORE_MASK_V2	0xf
#define HPRE_CLUSTER_CORE_MASK_V3	0xff
80

81 82 83 84 85 86 87
#define HPRE_AM_OOO_SHUTDOWN_ENB	0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE	BIT(0)
#define HPRE_WR_MSI_PORT		BIT(2)

#define HPRE_CORE_ECC_2BIT_ERR		BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR		BIT(5)

88 89 90 91
#define HPRE_QM_BME_FLR			BIT(7)
#define HPRE_QM_PM_FLR			BIT(11)
#define HPRE_QM_SRIOV_FLR		BIT(12)

92 93 94 95 96
#define HPRE_CLUSTERS_NUM(qm)		\
	(((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : HPRE_CLUSTERS_NUM_V2)
#define HPRE_CLUSTER_CORE_MASK(qm)	\
	(((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTER_CORE_MASK_V3 :\
		HPRE_CLUSTER_CORE_MASK_V2)
97
#define HPRE_VIA_MSI_DSM		1
98 99
#define HPRE_SQE_MASK_OFFSET		8
#define HPRE_SQE_MASK_LEN		24
100 101

static const char hpre_name[] = "hisi_hpre";
102
static struct dentry *hpre_debugfs_root;
103 104
static const struct pci_device_id hpre_dev_ids[] = {
	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
105
	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
106 107 108 109 110 111 112 113 114 115
	{ 0, }
};

MODULE_DEVICE_TABLE(pci, hpre_dev_ids);

struct hpre_hw_error {
	u32 int_msk;
	const char *msg;
};

116 117 118 119 120
static struct hisi_qm_list hpre_devices = {
	.register_to_crypto	= hpre_algs_register,
	.unregister_from_crypto	= hpre_algs_unregister,
};

121 122 123 124 125 126
static const char * const hpre_debug_file_name[] = {
	[HPRE_CURRENT_QM]   = "current_qm",
	[HPRE_CLEAR_ENABLE] = "rdclr_en",
	[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};

127
static const struct hpre_hw_error hpre_hw_errors[] = {
Z
Zaibo Xu 已提交
128 129 130 131 132 133 134 135 136 137 138 139
	{ .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
	{ .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
	{ .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
	{ .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
	{ .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
	{ .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
	{ .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
	{ .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
	{ .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
	{ .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
	{ .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
	{ .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
140 141 142 143 144
	{ .int_msk = BIT(22), .msg = "pt_rng_timeout_int_set"},
	{ .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set"},
	{
		/* sentinel */
	}
145 146
};

147 148 149 150 151 152 153 154 155 156 157
static const u64 hpre_cluster_offsets[] = {
	[HPRE_CLUSTER0] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
	[HPRE_CLUSTER1] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
	[HPRE_CLUSTER2] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
	[HPRE_CLUSTER3] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
};

158
static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
159 160 161 162 163 164 165
	{"CORES_EN_STATUS          ",  HPRE_CORE_EN_OFFSET},
	{"CORES_INI_CFG              ",  HPRE_CORE_INI_CFG_OFFSET},
	{"CORES_INI_STATUS         ",  HPRE_CORE_INI_STATUS_OFFSET},
	{"CORES_HTBT_WARN         ",  HPRE_CORE_HTBT_WARN_OFFSET},
	{"CORES_IS_SCHD               ",  HPRE_CORE_IS_SCHD_OFFSET},
};

166
static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
	{"READ_CLR_EN          ",  HPRE_CTRL_CNT_CLR_CE},
	{"AXQOS                   ",  HPRE_VFG_AXQOS},
	{"AWUSR_CFG              ",  HPRE_AWUSR_FP_CFG},
	{"QM_ARUSR_MCFG1           ",  QM_ARUSER_M_CFG_1},
	{"QM_AWUSR_MCFG1           ",  QM_AWUSER_M_CFG_1},
	{"BD_ENDIAN               ",  HPRE_BD_ENDIAN},
	{"ECC_CHECK_CTRL       ",  HPRE_ECC_BYPASS},
	{"RAS_INT_WIDTH       ",  HPRE_RAS_WIDTH_CFG},
	{"POISON_BYPASS       ",  HPRE_POISON_BYPASS},
	{"BD_ARUSER               ",  HPRE_BD_ARUSR_CFG},
	{"BD_AWUSER               ",  HPRE_BD_AWUSR_CFG},
	{"DATA_ARUSER            ",  HPRE_DATA_RUSER_CFG},
	{"DATA_AWUSER           ",  HPRE_DATA_WUSER_CFG},
	{"INT_STATUS               ",  HPRE_INT_STATUS},
};

183 184 185 186 187 188 189 190 191 192
static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
	"send_cnt",
	"recv_cnt",
	"send_fail_cnt",
	"send_busy_cnt",
	"over_thrhld_cnt",
	"overtime_thrhld",
	"invalid_req_cnt"
};

193 194 195 196 197 198 199 200 201 202 203 204 205
static const struct kernel_param_ops hpre_uacce_mode_ops = {
	.set = uacce_mode_set,
	.get = param_get_int,
};

/*
 * uacce_mode = 0 means hpre only register to crypto,
 * uacce_mode = 1 means hpre both register to crypto and uacce.
 */
static u32 uacce_mode = UACCE_MODE_NOUACCE;
module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);

206
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
207
{
208
	return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
209 210 211
}

static const struct kernel_param_ops hpre_pf_q_num_ops = {
212
	.set = pf_q_num_set,
213 214 215
	.get = param_get_int,
};

216 217
static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
218
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");
219

220 221 222 223 224 225 226 227 228
static const struct kernel_param_ops vfs_num_ops = {
	.set = vfs_num_set,
	.get = param_get_int,
};

static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");

229
struct hisi_qp *hpre_create_qp(void)
230
{
231 232 233
	int node = cpu_to_node(smp_processor_id());
	struct hisi_qp *qp = NULL;
	int ret;
234

235 236 237
	ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
	if (!ret)
		return qp;
238

239
	return NULL;
240 241
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
static void hpre_pasid_enable(struct hisi_qm *qm)
{
	u32 val;

	val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
	val |= BIT(HPRE_PASID_EN_BIT);
	writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
	val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
	val |= BIT(HPRE_PASID_EN_BIT);
	writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
}

static void hpre_pasid_disable(struct hisi_qm *qm)
{
	u32 val;

	val = readl_relaxed(qm->io_base +  HPRE_DATA_RUSER_CFG);
	val &= ~BIT(HPRE_PASID_EN_BIT);
	writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
	val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
	val &= ~BIT(HPRE_PASID_EN_BIT);
	writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
{
	struct device *dev = &qm->pdev->dev;
	union acpi_object *obj;
	guid_t guid;

	if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
		dev_err(dev, "Hpre GUID failed\n");
		return -EINVAL;
	}

	/* Switch over to MSI handling due to non-standard PCI implementation */
	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
				0, HPRE_VIA_MSI_DSM, NULL);
	if (!obj) {
		dev_err(dev, "ACPI handle failed!\n");
		return -EIO;
	}

	ACPI_FREE(obj);

	return 0;
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
static int hpre_set_cluster(struct hisi_qm *qm)
{
	u32 cluster_core_mask = HPRE_CLUSTER_CORE_MASK(qm);
	u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
	struct device *dev = &qm->pdev->dev;
	unsigned long offset;
	u32 val = 0;
	int ret, i;

	for (i = 0; i < clusters_num; i++) {
		offset = i * HPRE_CLSTR_ADDR_INTRVL;

		/* clusters initiating */
		writel(cluster_core_mask,
		       HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
		writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
		ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
					HPRE_CORE_INI_STATUS), val,
					((val & cluster_core_mask) ==
					cluster_core_mask),
					HPRE_REG_RD_INTVRL_US,
					HPRE_REG_RD_TMOUT_US);
		if (ret) {
			dev_err(dev,
				"cluster %d int st status timeout!\n", i);
			return -ETIMEDOUT;
		}
	}

	return 0;
}

322
/*
H
Hui Tang 已提交
323
 * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
324 325 326 327 328 329 330 331 332 333 334 335 336 337
 * Or it may stay in D3 state when we bind and unbind hpre quickly,
 * as it does FLR triggered by hardware.
 */
static void disable_flr_of_bme(struct hisi_qm *qm)
{
	u32 val;

	val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
	val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
	val |= HPRE_QM_PM_FLR;
	writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
	writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
}

338
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
339 340 341
{
	struct device *dev = &qm->pdev->dev;
	u32 val;
342
	int ret;
343 344 345 346 347 348 349 350 351 352

	writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
	writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
	writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));

	/* HPRE need more time, we close this interrupt */
	val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
	val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
	writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));

353 354 355 356 357 358
	if (qm->ver >= QM_HW_V3)
		writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
			HPRE_ADDR(qm, HPRE_TYPES_ENB));
	else
		writel(HPRE_RSA_ENB, HPRE_ADDR(qm, HPRE_TYPES_ENB));

359 360 361 362 363 364 365 366 367 368 369
	writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
	writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
	writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
	writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
	writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
	writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));

	writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
	writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
	writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
	ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
370
					 val & BIT(0),
371 372 373 374 375 376 377
			HPRE_REG_RD_INTVRL_US,
			HPRE_REG_RD_TMOUT_US);
	if (ret) {
		dev_err(dev, "read rd channel timeout fail!\n");
		return -ETIMEDOUT;
	}

378 379 380
	ret = hpre_set_cluster(qm);
	if (ret)
		return -ETIMEDOUT;
381

H
Hui Tang 已提交
382 383 384 385 386
	/* This setting is only needed by Kunpeng 920. */
	if (qm->ver == QM_HW_V2) {
		ret = hpre_cfg_by_dsm(qm);
		if (ret)
			dev_err(dev, "acpi_evaluate_dsm err.\n");
387

H
Hui Tang 已提交
388
		disable_flr_of_bme(qm);
389 390 391 392

		/* Enable data buffer pasid */
		if (qm->use_sva)
			hpre_pasid_enable(qm);
H
Hui Tang 已提交
393
	}
394

395 396 397
	return ret;
}

398 399
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
400
	u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
401 402 403 404 405 406 407 408
	unsigned long offset;
	int i;

	/* clear current_qm */
	writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
	writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);

	/* clear clusterX/cluster_ctrl */
409
	for (i = 0; i < clusters_num; i++) {
410 411 412 413 414 415 416 417 418 419
		offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
		writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
	}

	/* clear rdclr_en */
	writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);

	hisi_qm_debug_regs_clear(qm);
}

420
static void hpre_hw_error_disable(struct hisi_qm *qm)
421
{
S
Shukun Tan 已提交
422 423
	u32 val;

424 425
	/* disable hpre hw error interrupts */
	writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
S
Shukun Tan 已提交
426 427 428 429 430

	/* disable HPRE block master OOO when m-bit error occur */
	val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
	val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
	writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
431 432
}

433
static void hpre_hw_error_enable(struct hisi_qm *qm)
434
{
S
Shukun Tan 已提交
435 436
	u32 val;

437 438 439
	/* clear HPRE hw error source if having */
	writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);

440 441 442 443 444
	/* enable hpre hw error interrupts */
	writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
	writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
	writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
	writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
S
Shukun Tan 已提交
445 446 447 448 449

	/* enable HPRE block master OOO when m-bit error occur */
	val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
	val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
	writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
450 451
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
{
	struct hpre *hpre = container_of(file->debug, struct hpre, debug);

	return &hpre->qm;
}

static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);

	return readl(qm->io_base + QM_DFX_MB_CNT_VF);
}

static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);
469
	u32 num_vfs = qm->vfs_num;
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
	u32 vfq_num, tmp;

	if (val > num_vfs)
		return -EINVAL;

	/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
	if (val == 0) {
		qm->debug.curr_qm_qp_num = qm->qp_num;
	} else {
		vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
		if (val == num_vfs) {
			qm->debug.curr_qm_qp_num =
			qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
		} else {
			qm->debug.curr_qm_qp_num = vfq_num;
		}
	}

	writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
	writel(val, qm->io_base + QM_DFX_DB_CNT_VF);

	tmp = val |
	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);

	tmp = val |
	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);

	return  0;
}

static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);

	return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
	       HPRE_CTRL_CNT_CLR_CE_BIT;
}

static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);
	u32 tmp;

	if (val != 1 && val != 0)
		return -EINVAL;

	tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
	       ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
	writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);

	return  0;
}

static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);
	int cluster_index = file->index - HPRE_CLUSTER_CTRL;
	unsigned long offset = HPRE_CLSTR_BASE +
			       cluster_index * HPRE_CLSTR_ADDR_INTRVL;

	return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
}

static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);
	int cluster_index = file->index - HPRE_CLUSTER_CTRL;
	unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
			       HPRE_CLSTR_ADDR_INTRVL;

	writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);

	return  0;
}

static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
548
				    size_t count, loff_t *pos)
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
{
	struct hpre_debugfs_file *file = filp->private_data;
	char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
	u32 val;
	int ret;

	spin_lock_irq(&file->lock);
	switch (file->type) {
	case HPRE_CURRENT_QM:
		val = hpre_current_qm_read(file);
		break;
	case HPRE_CLEAR_ENABLE:
		val = hpre_clear_enable_read(file);
		break;
	case HPRE_CLUSTER_CTRL:
		val = hpre_cluster_inqry_read(file);
		break;
	default:
		spin_unlock_irq(&file->lock);
		return -EINVAL;
	}
	spin_unlock_irq(&file->lock);
571
	ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
572 573 574 575
	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}

static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
576
				     size_t count, loff_t *pos)
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
{
	struct hpre_debugfs_file *file = filp->private_data;
	char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
	unsigned long val;
	int len, ret;

	if (*pos != 0)
		return 0;

	if (count >= HPRE_DBGFS_VAL_MAX_LEN)
		return -ENOSPC;

	len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
				     pos, buf, count);
	if (len < 0)
		return len;

	tbuf[len] = '\0';
	if (kstrtoul(tbuf, 0, &val))
		return -EFAULT;

	spin_lock_irq(&file->lock);
	switch (file->type) {
	case HPRE_CURRENT_QM:
		ret = hpre_current_qm_write(file, val);
		if (ret)
			goto err_input;
		break;
	case HPRE_CLEAR_ENABLE:
		ret = hpre_clear_enable_write(file, val);
		if (ret)
			goto err_input;
		break;
	case HPRE_CLUSTER_CTRL:
		ret = hpre_cluster_inqry_write(file, val);
		if (ret)
			goto err_input;
		break;
	default:
		ret = -EINVAL;
		goto err_input;
	}
	spin_unlock_irq(&file->lock);

	return count;

err_input:
	spin_unlock_irq(&file->lock);
	return ret;
}

static const struct file_operations hpre_ctrl_debug_fops = {
	.owner = THIS_MODULE,
	.open = simple_open,
	.read = hpre_ctrl_debug_read,
	.write = hpre_ctrl_debug_write,
};

635 636 637 638 639 640 641 642 643 644 645 646
static int hpre_debugfs_atomic64_get(void *data, u64 *val)
{
	struct hpre_dfx *dfx_item = data;

	*val = atomic64_read(&dfx_item->value);

	return 0;
}

static int hpre_debugfs_atomic64_set(void *data, u64 val)
{
	struct hpre_dfx *dfx_item = data;
647
	struct hpre_dfx *hpre_dfx = NULL;
648

649 650 651 652
	if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
		hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
		atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
	} else if (val) {
653
		return -EINVAL;
654
	}
655 656 657 658 659 660 661 662 663

	atomic64_set(&dfx_item->value, val);

	return 0;
}

DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
			 hpre_debugfs_atomic64_set, "%llu\n");

664
static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
665 666
				    enum hpre_ctrl_dbgfs_file type, int indx)
{
667 668
	struct hpre *hpre = container_of(qm, struct hpre, qm);
	struct hpre_debug *dbg = &hpre->debug;
669
	struct dentry *file_dir;
670 671 672 673

	if (dir)
		file_dir = dir;
	else
674
		file_dir = qm->debug.debug_root;
675 676 677 678 679 680 681 682

	if (type >= HPRE_DEBUG_FILE_NUM)
		return -EINVAL;

	spin_lock_init(&dbg->files[indx].lock);
	dbg->files[indx].debug = dbg;
	dbg->files[indx].type = type;
	dbg->files[indx].index = indx;
683 684
	debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
			    dbg->files + indx, &hpre_ctrl_debug_fops);
685 686 687 688

	return 0;
}

689
static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
690 691 692 693 694 695 696 697 698 699 700 701
{
	struct device *dev = &qm->pdev->dev;
	struct debugfs_regset32 *regset;

	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
	if (!regset)
		return -ENOMEM;

	regset->regs = hpre_com_dfx_regs;
	regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
	regset->base = qm->io_base;

702
	debugfs_create_regset32("regs", 0444,  qm->debug.debug_root, regset);
703 704 705
	return 0;
}

706
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
707
{
708
	u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
709 710 711
	struct device *dev = &qm->pdev->dev;
	char buf[HPRE_DBGFS_VAL_MAX_LEN];
	struct debugfs_regset32 *regset;
712
	struct dentry *tmp_d;
713 714
	int i, ret;

715
	for (i = 0; i < clusters_num; i++) {
716 717 718
		ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
		if (ret < 0)
			return -EINVAL;
719
		tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
720 721 722 723 724 725 726 727 728

		regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
		if (!regset)
			return -ENOMEM;

		regset->regs = hpre_cluster_dfx_regs;
		regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
		regset->base = qm->io_base + hpre_cluster_offsets[i];

729
		debugfs_create_regset32("regs", 0444, tmp_d, regset);
730
		ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
731 732 733 734 735 736 737 738
					       i + HPRE_CLUSTER_CTRL);
		if (ret)
			return ret;
	}

	return 0;
}

739
static int hpre_ctrl_debug_init(struct hisi_qm *qm)
740 741 742
{
	int ret;

743
	ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
744 745 746 747
				       HPRE_CURRENT_QM);
	if (ret)
		return ret;

748
	ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
749 750 751 752
				       HPRE_CLEAR_ENABLE);
	if (ret)
		return ret;

753
	ret = hpre_pf_comm_regs_debugfs_init(qm);
754 755 756
	if (ret)
		return ret;

757
	return hpre_cluster_debugfs_init(qm);
758 759
}

760
static void hpre_dfx_debug_init(struct hisi_qm *qm)
761
{
762
	struct hpre *hpre = container_of(qm, struct hpre, qm);
763 764 765 766 767 768 769 770 771 772 773 774
	struct hpre_dfx *dfx = hpre->debug.dfx;
	struct dentry *parent;
	int i;

	parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
	for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
		dfx[i].type = i;
		debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
				    &hpre_atomic64_ops);
	}
}

775
static int hpre_debugfs_init(struct hisi_qm *qm)
776 777 778 779
{
	struct device *dev = &qm->pdev->dev;
	int ret;

780 781 782
	qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
						  hpre_debugfs_root);

783 784
	qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
	qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
785
	hisi_qm_debug_init(qm);
786 787

	if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
788
		ret = hpre_ctrl_debug_init(qm);
789 790 791
		if (ret)
			goto failed_to_create;
	}
792

793
	hpre_dfx_debug_init(qm);
794

795 796 797 798 799 800 801
	return 0;

failed_to_create:
	debugfs_remove_recursive(qm->debug.debug_root);
	return ret;
}

802
static void hpre_debugfs_exit(struct hisi_qm *qm)
803 804 805 806
{
	debugfs_remove_recursive(qm->debug.debug_root);
}

807
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
808
{
809
	if (pdev->revision == QM_HW_V1) {
810 811 812 813
		pci_warn(pdev, "HPRE version 1 is not supported!\n");
		return -EINVAL;
	}

814 815 816 817
	if (pdev->revision >= QM_HW_V3)
		qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
	else
		qm->algs = "rsa\ndh\n";
818
	qm->mode = uacce_mode;
819
	qm->pdev = pdev;
820
	qm->ver = pdev->revision;
821 822
	qm->sqe_size = HPRE_SQE_SIZE;
	qm->dev_name = hpre_name;
823

824 825 826
	qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
			QM_HW_PF : QM_HW_VF;
	if (qm->fun_type == QM_HW_PF) {
827
		qm->qp_base = HPRE_PF_DEF_Q_BASE;
828
		qm->qp_num = pf_q_num;
829
		qm->debug.curr_qm_qp_num = pf_q_num;
830
		qm->qm_list = &hpre_devices;
831
	}
832

833
	return hisi_qm_init(qm);
834 835
}

836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
	const struct hpre_hw_error *err = hpre_hw_errors;
	struct device *dev = &qm->pdev->dev;

	while (err->msg) {
		if (err->int_msk & err_sts)
			dev_warn(dev, "%s [error status=0x%x] found\n",
				 err->msg, err->int_msk);
		err++;
	}
}

static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
{
	return readl(qm->io_base + HPRE_HAC_INT_STATUS);
}

854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
	writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}

static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
	u32 value;

	value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
	writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
	       HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
	writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
	       HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}

870
static const struct hisi_qm_err_ini hpre_err_ini = {
871
	.hw_init		= hpre_set_user_domain_and_cache,
872 873 874
	.hw_err_enable		= hpre_hw_error_enable,
	.hw_err_disable		= hpre_hw_error_disable,
	.get_dev_hw_err_status	= hpre_get_hw_err_status,
875
	.clear_dev_hw_err_status = hpre_clear_hw_err_status,
876
	.log_dev_hw_err		= hpre_log_hw_error,
877
	.open_axi_master_ooo	= hpre_open_axi_master_ooo,
878 879 880 881
	.err_info		= {
		.ce			= QM_BASE_CE,
		.nfe			= QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
		.fe			= 0,
882 883 884 885
		.ecc_2bits_mask		= HPRE_CORE_ECC_2BIT_ERR |
					  HPRE_OOO_ECC_2BIT_ERR,
		.msi_wr_port		= HPRE_WR_MSI_PORT,
		.acpi_rst		= "HRST",
886 887
	}
};
888 889 890 891 892 893 894 895

static int hpre_pf_probe_init(struct hpre *hpre)
{
	struct hisi_qm *qm = &hpre->qm;
	int ret;

	qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;

896
	ret = hpre_set_user_domain_and_cache(qm);
897 898 899
	if (ret)
		return ret;

900 901
	qm->err_ini = &hpre_err_ini;
	hisi_qm_dev_err_init(qm);
902 903 904 905

	return 0;
}

906 907 908
static int hpre_probe_init(struct hpre *hpre)
{
	struct hisi_qm *qm = &hpre->qm;
909
	int ret;
910

911
	if (qm->fun_type == QM_HW_PF) {
912
		ret = hpre_pf_probe_init(hpre);
913 914 915
		if (ret)
			return ret;
	}
916

917
	return 0;
918 919
}

920 921 922 923 924 925 926 927 928 929 930
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct hisi_qm *qm;
	struct hpre *hpre;
	int ret;

	hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
	if (!hpre)
		return -ENOMEM;

	qm = &hpre->qm;
931 932 933
	ret = hpre_qm_init(qm, pdev);
	if (ret) {
		pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
934
		return ret;
935
	}
936

937 938 939 940
	ret = hpre_probe_init(hpre);
	if (ret) {
		pci_err(pdev, "Failed to probe (%d)!\n", ret);
		goto err_with_qm_init;
941
	}
942 943 944 945 946

	ret = hisi_qm_start(qm);
	if (ret)
		goto err_with_err_init;

947
	ret = hpre_debugfs_init(qm);
948 949 950
	if (ret)
		dev_warn(&pdev->dev, "init debugfs fail!\n");

951
	ret = hisi_qm_alg_register(qm, &hpre_devices);
952 953 954 955
	if (ret < 0) {
		pci_err(pdev, "fail to register algs to crypto!\n");
		goto err_with_qm_start;
	}
956

957 958 959 960 961 962 963 964
	if (qm->uacce) {
		ret = uacce_register(qm->uacce);
		if (ret) {
			pci_err(pdev, "failed to register uacce (%d)!\n", ret);
			goto err_with_alg_register;
		}
	}

965 966 967
	if (qm->fun_type == QM_HW_PF && vfs_num) {
		ret = hisi_qm_sriov_enable(pdev, vfs_num);
		if (ret < 0)
968
			goto err_with_alg_register;
969 970
	}

971 972
	return 0;

973 974
err_with_alg_register:
	hisi_qm_alg_unregister(qm, &hpre_devices);
975

976
err_with_qm_start:
977
	hpre_debugfs_exit(qm);
978
	hisi_qm_stop(qm, QM_NORMAL);
979 980

err_with_err_init:
981
	hisi_qm_dev_err_uninit(qm);
982 983 984 985 986 987 988 989 990

err_with_qm_init:
	hisi_qm_uninit(qm);

	return ret;
}

static void hpre_remove(struct pci_dev *pdev)
{
991
	struct hisi_qm *qm = pci_get_drvdata(pdev);
992
	int ret;
993

994
	hisi_qm_wait_task_finish(qm, &hpre_devices);
995
	hisi_qm_alg_unregister(qm, &hpre_devices);
996
	if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
997
		ret = hisi_qm_sriov_disable(pdev, true);
998 999 1000 1001 1002
		if (ret) {
			pci_err(pdev, "Disable SRIOV fail!\n");
			return;
		}
	}
1003 1004 1005 1006

	hpre_debugfs_exit(qm);
	hisi_qm_stop(qm, QM_NORMAL);

1007
	if (qm->fun_type == QM_HW_PF) {
1008
		if (qm->use_sva && qm->ver == QM_HW_V2)
1009
			hpre_pasid_disable(qm);
1010 1011
		hpre_cnt_regs_clear(qm);
		qm->debug.curr_qm_qp_num = 0;
1012
		hisi_qm_dev_err_uninit(qm);
1013 1014
	}

1015 1016 1017 1018 1019
	hisi_qm_uninit(qm);
}


static const struct pci_error_handlers hpre_err_handler = {
1020
	.error_detected		= hisi_qm_dev_err_detected,
1021
	.slot_reset		= hisi_qm_dev_slot_reset,
S
Shukun Tan 已提交
1022 1023
	.reset_prepare		= hisi_qm_reset_prepare,
	.reset_done		= hisi_qm_reset_done,
1024 1025 1026 1027 1028 1029 1030
};

static struct pci_driver hpre_pci_driver = {
	.name			= hpre_name,
	.id_table		= hpre_dev_ids,
	.probe			= hpre_probe,
	.remove			= hpre_remove,
1031 1032
	.sriov_configure	= IS_ENABLED(CONFIG_PCI_IOV) ?
				  hisi_qm_sriov_configure : NULL,
1033
	.err_handler		= &hpre_err_handler,
1034
	.shutdown		= hisi_qm_dev_shutdown,
1035 1036
};

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
static void hpre_register_debugfs(void)
{
	if (!debugfs_initialized())
		return;

	hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
}

static void hpre_unregister_debugfs(void)
{
	debugfs_remove_recursive(hpre_debugfs_root);
}

1050 1051 1052 1053
static int __init hpre_init(void)
{
	int ret;

1054
	hisi_qm_init_list(&hpre_devices);
1055 1056
	hpre_register_debugfs();

1057
	ret = pci_register_driver(&hpre_pci_driver);
1058 1059
	if (ret) {
		hpre_unregister_debugfs();
1060
		pr_err("hpre: can't register hisi hpre driver.\n");
1061
	}
1062 1063 1064 1065 1066 1067 1068

	return ret;
}

static void __exit hpre_exit(void)
{
	pci_unregister_driver(&hpre_pci_driver);
1069
	hpre_unregister_debugfs();
1070 1071 1072 1073 1074 1075 1076 1077
}

module_init(hpre_init);
module_exit(hpre_exit);

MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");