hpre_main.c 39.5 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
6
#include <linux/debugfs.h>
7 8 9 10 11
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
12
#include <linux/pm_runtime.h>
13
#include <linux/topology.h>
14
#include <linux/uacce.h>
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#include "hpre.h"

#define HPRE_QM_ABNML_INT_MASK		0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT	BIT(0)
#define HPRE_COMM_CNT_CLR_CE		0x0
#define HPRE_CTRL_CNT_CLR_CE		0x301000
#define HPRE_FSM_MAX_CNT		0x301008
#define HPRE_VFG_AXQOS			0x30100c
#define HPRE_VFG_AXCACHE		0x301010
#define HPRE_RDCHN_INI_CFG		0x301014
#define HPRE_AWUSR_FP_CFG		0x301018
#define HPRE_BD_ENDIAN			0x301020
#define HPRE_ECC_BYPASS			0x301024
#define HPRE_RAS_WIDTH_CFG		0x301028
#define HPRE_POISON_BYPASS		0x30102c
#define HPRE_BD_ARUSR_CFG		0x301030
#define HPRE_BD_AWUSR_CFG		0x301034
#define HPRE_TYPES_ENB			0x301038
33 34
#define HPRE_RSA_ENB			BIT(0)
#define HPRE_ECC_ENB			BIT(1)
35 36 37 38
#define HPRE_DATA_RUSER_CFG		0x30103c
#define HPRE_DATA_WUSER_CFG		0x301040
#define HPRE_INT_MASK			0x301400
#define HPRE_INT_STATUS			0x301800
39 40 41 42 43 44
#define HPRE_HAC_INT_MSK		0x301400
#define HPRE_HAC_RAS_CE_ENB		0x301410
#define HPRE_HAC_RAS_NFE_ENB		0x301414
#define HPRE_HAC_RAS_FE_ENB		0x301418
#define HPRE_HAC_INT_SET		0x301500
#define HPRE_RNG_TIMEOUT_NUM		0x301A34
45
#define HPRE_CORE_INT_ENABLE		0
46
#define HPRE_CORE_INT_DISABLE		GENMASK(21, 0)
47 48 49 50 51 52 53 54 55 56 57
#define HPRE_RDCHN_INI_ST		0x301a00
#define HPRE_CLSTR_BASE			0x302000
#define HPRE_CORE_EN_OFFSET		0x04
#define HPRE_CORE_INI_CFG_OFFSET	0x20
#define HPRE_CORE_INI_STATUS_OFFSET	0x80
#define HPRE_CORE_HTBT_WARN_OFFSET	0x8c
#define HPRE_CORE_IS_SCHD_OFFSET	0x90

#define HPRE_RAS_CE_ENB			0x301410
#define HPRE_RAS_NFE_ENB		0x301414
#define HPRE_RAS_FE_ENB			0x301418
58
#define HPRE_OOO_SHUTDOWN_SEL		0x301a3c
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
#define HPRE_HAC_RAS_FE_ENABLE		0

#define HPRE_CORE_ENB		(HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
#define HPRE_CORE_INI_CFG	(HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
#define HPRE_HAC_ECC1_CNT		0x301a04
#define HPRE_HAC_ECC2_CNT		0x301a08
#define HPRE_HAC_SOURCE_INT		0x301600
#define HPRE_CLSTR_ADDR_INTRVL		0x1000
#define HPRE_CLUSTER_INQURY		0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT	0x104
#define HPRE_TIMEOUT_ABNML_BIT		6
#define HPRE_PASID_EN_BIT		9
#define HPRE_REG_RD_INTVRL_US		10
#define HPRE_REG_RD_TMOUT_US		1000
#define HPRE_DBGFS_VAL_MAX_LEN		20
75
#define PCI_DEVICE_ID_HUAWEI_HPRE_PF	0xa258
76 77 78 79
#define HPRE_QM_USR_CFG_MASK		GENMASK(31, 1)
#define HPRE_QM_AXI_CFG_MASK		GENMASK(15, 0)
#define HPRE_QM_VFG_AX_MASK		GENMASK(7, 0)
#define HPRE_BD_USR_MASK		GENMASK(1, 0)
80 81 82 83 84
#define HPRE_PREFETCH_CFG		0x301130
#define HPRE_SVA_PREFTCH_DFX		0x30115C
#define HPRE_PREFETCH_ENABLE		(~(BIT(0) | BIT(30)))
#define HPRE_PREFETCH_DISABLE		BIT(30)
#define HPRE_SVA_DISABLE_READY		(BIT(4) | BIT(8))
85

86 87 88 89 90 91 92 93 94 95
/* clock gate */
#define HPRE_CLKGATE_CTL		0x301a10
#define HPRE_PEH_CFG_AUTO_GATE		0x301a2c
#define HPRE_CLUSTER_DYN_CTL		0x302010
#define HPRE_CORE_SHB_CFG		0x302088
#define HPRE_CLKGATE_CTL_EN		BIT(0)
#define HPRE_PEH_CFG_AUTO_GATE_EN	BIT(0)
#define HPRE_CLUSTER_DYN_CTL_EN		BIT(0)
#define HPRE_CORE_GATE_EN		(BIT(30) | BIT(31))

96 97 98 99 100 101 102
#define HPRE_AM_OOO_SHUTDOWN_ENB	0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE	BIT(0)
#define HPRE_WR_MSI_PORT		BIT(2)

#define HPRE_CORE_ECC_2BIT_ERR		BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR		BIT(5)

103 104 105 106
#define HPRE_QM_BME_FLR			BIT(7)
#define HPRE_QM_PM_FLR			BIT(11)
#define HPRE_QM_SRIOV_FLR		BIT(12)

107
#define HPRE_SHAPER_TYPE_RATE		640
108
#define HPRE_VIA_MSI_DSM		1
109 110
#define HPRE_SQE_MASK_OFFSET		8
#define HPRE_SQE_MASK_LEN		24
111

112 113 114 115 116 117 118 119 120
#define HPRE_DFX_BASE		0x301000
#define HPRE_DFX_COMMON1		0x301400
#define HPRE_DFX_COMMON2		0x301A00
#define HPRE_DFX_CORE		0x302000
#define HPRE_DFX_BASE_LEN		0x55
#define HPRE_DFX_COMMON1_LEN		0x41
#define HPRE_DFX_COMMON2_LEN		0xE
#define HPRE_DFX_CORE_LEN		0x43

121 122
#define HPRE_DEV_ALG_MAX_LEN	256

123
static const char hpre_name[] = "hisi_hpre";
124
static struct dentry *hpre_debugfs_root;
125
static const struct pci_device_id hpre_dev_ids[] = {
126 127
	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) },
	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
128 129 130 131 132 133 134 135 136 137
	{ 0, }
};

MODULE_DEVICE_TABLE(pci, hpre_dev_ids);

struct hpre_hw_error {
	u32 int_msk;
	const char *msg;
};

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
struct hpre_dev_alg {
	u32 alg_msk;
	const char *alg;
};

static const struct hpre_dev_alg hpre_dev_algs[] = {
	{
		.alg_msk = BIT(0),
		.alg = "rsa\n"
	}, {
		.alg_msk = BIT(1),
		.alg = "dh\n"
	}, {
		.alg_msk = BIT(2),
		.alg = "ecdh\n"
	}, {
		.alg_msk = BIT(3),
		.alg = "ecdsa\n"
	}, {
		.alg_msk = BIT(4),
		.alg = "sm2\n"
	}, {
		.alg_msk = BIT(5),
		.alg = "x25519\n"
	}, {
		.alg_msk = BIT(6),
		.alg = "x448\n"
	}, {
		/* sentinel */
	}
};

170 171 172 173 174
static struct hisi_qm_list hpre_devices = {
	.register_to_crypto	= hpre_algs_register,
	.unregister_from_crypto	= hpre_algs_unregister,
};

175 176 177 178 179
static const char * const hpre_debug_file_name[] = {
	[HPRE_CLEAR_ENABLE] = "rdclr_en",
	[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};

180 181 182 183 184 185 186 187 188
enum hpre_cap_type {
	HPRE_QM_NFE_MASK_CAP,
	HPRE_QM_RESET_MASK_CAP,
	HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
	HPRE_QM_CE_MASK_CAP,
	HPRE_NFE_MASK_CAP,
	HPRE_RESET_MASK_CAP,
	HPRE_OOO_SHUTDOWN_MASK_CAP,
	HPRE_CE_MASK_CAP,
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	HPRE_CLUSTER_NUM_CAP,
	HPRE_CORE_TYPE_NUM_CAP,
	HPRE_CORE_NUM_CAP,
	HPRE_CLUSTER_CORE_NUM_CAP,
	HPRE_CORE_ENABLE_BITMAP_CAP,
	HPRE_DRV_ALG_BITMAP_CAP,
	HPRE_DEV_ALG_BITMAP_CAP,
	HPRE_CORE1_ALG_BITMAP_CAP,
	HPRE_CORE2_ALG_BITMAP_CAP,
	HPRE_CORE3_ALG_BITMAP_CAP,
	HPRE_CORE4_ALG_BITMAP_CAP,
	HPRE_CORE5_ALG_BITMAP_CAP,
	HPRE_CORE6_ALG_BITMAP_CAP,
	HPRE_CORE7_ALG_BITMAP_CAP,
	HPRE_CORE8_ALG_BITMAP_CAP,
	HPRE_CORE9_ALG_BITMAP_CAP,
	HPRE_CORE10_ALG_BITMAP_CAP
206 207 208 209 210 211 212 213 214 215 216
};

static const struct hisi_qm_cap_info hpre_basic_info[] = {
	{HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
	{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
	{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
	{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
	{HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
	{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
	{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
	{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
	{HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0,  0x4, 0x1},
	{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
	{HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
	{HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
	{HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
	{HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
	{HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
	{HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
	{HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
	{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
234 235
};

236
static const struct hpre_hw_error hpre_hw_errors[] = {
237
	{
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
		.int_msk = BIT(0),
		.msg = "core_ecc_1bit_err_int_set"
	}, {
		.int_msk = BIT(1),
		.msg = "core_ecc_2bit_err_int_set"
	}, {
		.int_msk = BIT(2),
		.msg = "dat_wb_poison_int_set"
	}, {
		.int_msk = BIT(3),
		.msg = "dat_rd_poison_int_set"
	}, {
		.int_msk = BIT(4),
		.msg = "bd_rd_poison_int_set"
	}, {
		.int_msk = BIT(5),
		.msg = "ooo_ecc_2bit_err_int_set"
	}, {
		.int_msk = BIT(6),
		.msg = "cluster1_shb_timeout_int_set"
	}, {
		.int_msk = BIT(7),
		.msg = "cluster2_shb_timeout_int_set"
	}, {
		.int_msk = BIT(8),
		.msg = "cluster3_shb_timeout_int_set"
	}, {
		.int_msk = BIT(9),
		.msg = "cluster4_shb_timeout_int_set"
	}, {
		.int_msk = GENMASK(15, 10),
		.msg = "ooo_rdrsp_err_int_set"
	}, {
		.int_msk = GENMASK(21, 16),
		.msg = "ooo_wrrsp_err_int_set"
	}, {
		.int_msk = BIT(22),
		.msg = "pt_rng_timeout_int_set"
	}, {
		.int_msk = BIT(23),
		.msg = "sva_fsm_timeout_int_set"
	}, {
280 281
		/* sentinel */
	}
282 283
};

284 285 286 287 288 289 290 291 292 293 294
static const u64 hpre_cluster_offsets[] = {
	[HPRE_CLUSTER0] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
	[HPRE_CLUSTER1] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
	[HPRE_CLUSTER2] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
	[HPRE_CLUSTER3] =
		HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
};

295
static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
296 297 298 299 300
	{"CORES_EN_STATUS     ",  HPRE_CORE_EN_OFFSET},
	{"CORES_INI_CFG       ",  HPRE_CORE_INI_CFG_OFFSET},
	{"CORES_INI_STATUS    ",  HPRE_CORE_INI_STATUS_OFFSET},
	{"CORES_HTBT_WARN     ",  HPRE_CORE_HTBT_WARN_OFFSET},
	{"CORES_IS_SCHD       ",  HPRE_CORE_IS_SCHD_OFFSET},
301 302
};

303
static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	{"READ_CLR_EN     ",  HPRE_CTRL_CNT_CLR_CE},
	{"AXQOS           ",  HPRE_VFG_AXQOS},
	{"AWUSR_CFG       ",  HPRE_AWUSR_FP_CFG},
	{"BD_ENDIAN       ",  HPRE_BD_ENDIAN},
	{"ECC_CHECK_CTRL  ",  HPRE_ECC_BYPASS},
	{"RAS_INT_WIDTH   ",  HPRE_RAS_WIDTH_CFG},
	{"POISON_BYPASS   ",  HPRE_POISON_BYPASS},
	{"BD_ARUSER       ",  HPRE_BD_ARUSR_CFG},
	{"BD_AWUSER       ",  HPRE_BD_AWUSR_CFG},
	{"DATA_ARUSER     ",  HPRE_DATA_RUSER_CFG},
	{"DATA_AWUSER     ",  HPRE_DATA_WUSER_CFG},
	{"INT_STATUS      ",  HPRE_INT_STATUS},
	{"INT_MASK        ",  HPRE_HAC_INT_MSK},
	{"RAS_CE_ENB      ",  HPRE_HAC_RAS_CE_ENB},
	{"RAS_NFE_ENB     ",  HPRE_HAC_RAS_NFE_ENB},
	{"RAS_FE_ENB      ",  HPRE_HAC_RAS_FE_ENB},
	{"INT_SET         ",  HPRE_HAC_INT_SET},
	{"RNG_TIMEOUT_NUM ",  HPRE_RNG_TIMEOUT_NUM},
322 323
};

324 325 326 327 328 329 330 331 332 333
static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
	"send_cnt",
	"recv_cnt",
	"send_fail_cnt",
	"send_busy_cnt",
	"over_thrhld_cnt",
	"overtime_thrhld",
	"invalid_req_cnt"
};

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
/* define the HPRE's dfx regs region and region length */
static struct dfx_diff_registers hpre_diff_regs[] = {
	{
		.reg_offset = HPRE_DFX_BASE,
		.reg_len = HPRE_DFX_BASE_LEN,
	}, {
		.reg_offset = HPRE_DFX_COMMON1,
		.reg_len = HPRE_DFX_COMMON1_LEN,
	}, {
		.reg_offset = HPRE_DFX_COMMON2,
		.reg_len = HPRE_DFX_COMMON2_LEN,
	}, {
		.reg_offset = HPRE_DFX_CORE,
		.reg_len = HPRE_DFX_CORE_LEN,
	},
};

351 352 353 354 355 356 357 358 359 360 361
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
	u32 cap_val;

	cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
	if (alg & cap_val)
		return true;

	return false;
}

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
static int hpre_set_qm_algs(struct hisi_qm *qm)
{
	struct device *dev = &qm->pdev->dev;
	char *algs, *ptr;
	u32 alg_msk;
	int i;

	if (!qm->use_sva)
		return 0;

	algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
	if (!algs)
		return -ENOMEM;

	alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);

	for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
		if (alg_msk & hpre_dev_algs[i].alg_msk)
			strcat(algs, hpre_dev_algs[i].alg);

	ptr = strrchr(algs, '\n');
	if (ptr)
		*ptr = '\0';

	qm->uacce->algs = algs;

	return 0;
}

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
static int hpre_diff_regs_show(struct seq_file *s, void *unused)
{
	struct hisi_qm *qm = s->private;

	hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
					ARRAY_SIZE(hpre_diff_regs));

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs);

static int hpre_com_regs_show(struct seq_file *s, void *unused)
{
	hisi_qm_regs_dump(s, s->private);

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);

static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
{
	hisi_qm_regs_dump(s, s->private);

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);

421 422 423 424 425 426 427 428 429 430 431 432 433
static const struct kernel_param_ops hpre_uacce_mode_ops = {
	.set = uacce_mode_set,
	.get = param_get_int,
};

/*
 * uacce_mode = 0 means hpre only register to crypto,
 * uacce_mode = 1 means hpre both register to crypto and uacce.
 */
static u32 uacce_mode = UACCE_MODE_NOUACCE;
module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);

434
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
435
{
436
	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
437 438 439
}

static const struct kernel_param_ops hpre_pf_q_num_ops = {
440
	.set = pf_q_num_set,
441 442 443
	.get = param_get_int,
};

444 445
static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
446
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");
447

448 449 450 451 452 453 454 455 456
static const struct kernel_param_ops vfs_num_ops = {
	.set = vfs_num_set,
	.get = param_get_int,
};

static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");

457 458
static inline int hpre_cluster_num(struct hisi_qm *qm)
{
459
	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
460 461 462 463
}

static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
{
464
	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
465 466
}

467
struct hisi_qp *hpre_create_qp(u8 type)
468
{
469 470 471
	int node = cpu_to_node(smp_processor_id());
	struct hisi_qp *qp = NULL;
	int ret;
472

473 474 475 476 477 478 479 480
	if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
		return NULL;

	/*
	 * type: 0 - RSA/DH. algorithm supported in V2,
	 *       1 - ECC algorithm in V3.
	 */
	ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
481 482
	if (!ret)
		return qp;
483

484
	return NULL;
485 486
}

487
static void hpre_config_pasid(struct hisi_qm *qm)
488
{
489
	u32 val1, val2;
490

491 492 493 494 495 496 497 498 499 500 501 502 503 504
	if (qm->ver >= QM_HW_V3)
		return;

	val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
	val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
	if (qm->use_sva) {
		val1 |= BIT(HPRE_PASID_EN_BIT);
		val2 |= BIT(HPRE_PASID_EN_BIT);
	} else {
		val1 &= ~BIT(HPRE_PASID_EN_BIT);
		val2 &= ~BIT(HPRE_PASID_EN_BIT);
	}
	writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
	writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
505 506
}

507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
{
	struct device *dev = &qm->pdev->dev;
	union acpi_object *obj;
	guid_t guid;

	if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
		dev_err(dev, "Hpre GUID failed\n");
		return -EINVAL;
	}

	/* Switch over to MSI handling due to non-standard PCI implementation */
	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
				0, HPRE_VIA_MSI_DSM, NULL);
	if (!obj) {
		dev_err(dev, "ACPI handle failed!\n");
		return -EIO;
	}

	ACPI_FREE(obj);

	return 0;
}

531 532
static int hpre_set_cluster(struct hisi_qm *qm)
{
533 534
	u32 cluster_core_mask = hpre_cluster_core_mask(qm);
	u8 clusters_num = hpre_cluster_num(qm);
535 536 537 538 539 540 541 542 543 544
	struct device *dev = &qm->pdev->dev;
	unsigned long offset;
	u32 val = 0;
	int ret, i;

	for (i = 0; i < clusters_num; i++) {
		offset = i * HPRE_CLSTR_ADDR_INTRVL;

		/* clusters initiating */
		writel(cluster_core_mask,
545 546 547 548
		       qm->io_base + offset + HPRE_CORE_ENB);
		writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
		ret = readl_relaxed_poll_timeout(qm->io_base + offset +
					HPRE_CORE_INI_STATUS, val,
549 550 551 552 553 554 555 556 557 558 559 560 561 562
					((val & cluster_core_mask) ==
					cluster_core_mask),
					HPRE_REG_RD_INTVRL_US,
					HPRE_REG_RD_TMOUT_US);
		if (ret) {
			dev_err(dev,
				"cluster %d int st status timeout!\n", i);
			return -ETIMEDOUT;
		}
	}

	return 0;
}

563
/*
564
 * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
565 566 567 568 569 570 571
 * Or it may stay in D3 state when we bind and unbind hpre quickly,
 * as it does FLR triggered by hardware.
 */
static void disable_flr_of_bme(struct hisi_qm *qm)
{
	u32 val;

572
	val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
573 574
	val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
	val |= HPRE_QM_PM_FLR;
575 576
	writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
	writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
577 578
}

579 580 581 582 583
static void hpre_open_sva_prefetch(struct hisi_qm *qm)
{
	u32 val;
	int ret;

584
	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
		return;

	/* Enable prefetch */
	val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
	val &= HPRE_PREFETCH_ENABLE;
	writel(val, qm->io_base + HPRE_PREFETCH_CFG);

	ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
					 val, !(val & HPRE_PREFETCH_DISABLE),
					 HPRE_REG_RD_INTVRL_US,
					 HPRE_REG_RD_TMOUT_US);
	if (ret)
		pci_err(qm->pdev, "failed to open sva prefetch\n");
}

static void hpre_close_sva_prefetch(struct hisi_qm *qm)
{
	u32 val;
	int ret;

605
	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
606 607 608 609 610 611 612 613 614 615 616 617 618 619
		return;

	val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
	val |= HPRE_PREFETCH_DISABLE;
	writel(val, qm->io_base + HPRE_PREFETCH_CFG);

	ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
					 val, !(val & HPRE_SVA_DISABLE_READY),
					 HPRE_REG_RD_INTVRL_US,
					 HPRE_REG_RD_TMOUT_US);
	if (ret)
		pci_err(qm->pdev, "failed to close sva prefetch\n");
}

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
static void hpre_enable_clock_gate(struct hisi_qm *qm)
{
	u32 val;

	if (qm->ver < QM_HW_V3)
		return;

	val = readl(qm->io_base + HPRE_CLKGATE_CTL);
	val |= HPRE_CLKGATE_CTL_EN;
	writel(val, qm->io_base + HPRE_CLKGATE_CTL);

	val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
	val |= HPRE_PEH_CFG_AUTO_GATE_EN;
	writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);

	val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
	val |= HPRE_CLUSTER_DYN_CTL_EN;
	writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);

	val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
	val |= HPRE_CORE_GATE_EN;
	writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
}

static void hpre_disable_clock_gate(struct hisi_qm *qm)
{
	u32 val;

	if (qm->ver < QM_HW_V3)
		return;

	val = readl(qm->io_base + HPRE_CLKGATE_CTL);
	val &= ~HPRE_CLKGATE_CTL_EN;
	writel(val, qm->io_base + HPRE_CLKGATE_CTL);

	val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
	val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
	writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);

	val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
	val &= ~HPRE_CLUSTER_DYN_CTL_EN;
	writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);

	val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
	val &= ~HPRE_CORE_GATE_EN;
	writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
}

668
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
669 670 671
{
	struct device *dev = &qm->pdev->dev;
	u32 val;
672
	int ret;
673

674 675 676
	/* disabel dynamic clock gate before sram init */
	hpre_disable_clock_gate(qm);

677 678 679
	writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
	writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
	writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
680 681

	/* HPRE need more time, we close this interrupt */
682
	val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
683
	val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
684
	writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
685

686 687
	if (qm->ver >= QM_HW_V3)
		writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
688
			qm->io_base + HPRE_TYPES_ENB);
689
	else
690 691 692 693 694 695 696 697 698 699 700 701 702 703
		writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);

	writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
	writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
	writel(0x0, qm->io_base + HPRE_INT_MASK);
	writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
	writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
	writel(0x0, qm->io_base + HPRE_ECC_BYPASS);

	writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
	writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
	writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
	ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
			val & BIT(0),
704 705 706 707 708 709 710
			HPRE_REG_RD_INTVRL_US,
			HPRE_REG_RD_TMOUT_US);
	if (ret) {
		dev_err(dev, "read rd channel timeout fail!\n");
		return -ETIMEDOUT;
	}

711 712 713
	ret = hpre_set_cluster(qm);
	if (ret)
		return -ETIMEDOUT;
714

H
Hui Tang 已提交
715 716 717 718
	/* This setting is only needed by Kunpeng 920. */
	if (qm->ver == QM_HW_V2) {
		ret = hpre_cfg_by_dsm(qm);
		if (ret)
719
			return ret;
720

H
Hui Tang 已提交
721 722
		disable_flr_of_bme(qm);
	}
723

724 725 726
	/* Config data buffer pasid needed by Kunpeng 920 */
	hpre_config_pasid(qm);

727 728
	hpre_enable_clock_gate(qm);

729 730 731
	return ret;
}

732 733
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
734
	u8 clusters_num = hpre_cluster_num(qm);
735 736 737 738
	unsigned long offset;
	int i;

	/* clear clusterX/cluster_ctrl */
739
	for (i = 0; i < clusters_num; i++) {
740 741 742 743 744 745 746 747 748 749
		offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
		writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
	}

	/* clear rdclr_en */
	writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);

	hisi_qm_debug_regs_clear(qm);
}

750
static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
751
{
752 753 754 755 756
	u32 val1, val2;

	val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
	if (enable) {
		val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
757 758
		val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
					   HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
759 760 761 762 763 764 765
	} else {
		val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
		val2 = 0x0;
	}

	if (qm->ver > QM_HW_V2)
		writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
S
Shukun Tan 已提交
766

767 768 769 770 771
	writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}

static void hpre_hw_error_disable(struct hisi_qm *qm)
{
772
	u32 ce, nfe;
S
Shukun Tan 已提交
773

774 775 776 777 778
	ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
	nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);

	/* disable hpre hw error interrupts */
	writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
779 780
	/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
	hpre_master_ooo_ctrl(qm, false);
781 782
}

783
static void hpre_hw_error_enable(struct hisi_qm *qm)
784
{
785 786 787 788 789
	u32 ce, nfe;

	ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
	nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);

790
	/* clear HPRE hw error source if having */
791
	writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
792

793
	/* configure error type */
794 795
	writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
	writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
796
	writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
S
Shukun Tan 已提交
797

798 799
	/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
	hpre_master_ooo_ctrl(qm, true);
800 801 802

	/* enable hpre hw error interrupts */
	writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
803 804
}

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
{
	struct hpre *hpre = container_of(file->debug, struct hpre, debug);

	return &hpre->qm;
}

static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);

	return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
	       HPRE_CTRL_CNT_CLR_CE_BIT;
}

static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);
	u32 tmp;

	if (val != 1 && val != 0)
		return -EINVAL;

	tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
	       ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
	writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);

832
	return 0;
833 834 835 836 837 838 839 840 841 842 843 844
}

static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
{
	struct hisi_qm *qm = hpre_file_to_qm(file);
	int cluster_index = file->index - HPRE_CLUSTER_CTRL;
	unsigned long offset = HPRE_CLSTR_BASE +
			       cluster_index * HPRE_CLSTR_ADDR_INTRVL;

	return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
}

845
static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
846 847 848 849 850 851 852 853 854 855
{
	struct hisi_qm *qm = hpre_file_to_qm(file);
	int cluster_index = file->index - HPRE_CLUSTER_CTRL;
	unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
			       HPRE_CLSTR_ADDR_INTRVL;

	writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
}

static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
856
				    size_t count, loff_t *pos)
857 858
{
	struct hpre_debugfs_file *file = filp->private_data;
859
	struct hisi_qm *qm = hpre_file_to_qm(file);
860 861 862 863
	char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
	u32 val;
	int ret;

864 865 866 867
	ret = hisi_qm_get_dfx_access(qm);
	if (ret)
		return ret;

868 869 870 871 872 873 874 875 876
	spin_lock_irq(&file->lock);
	switch (file->type) {
	case HPRE_CLEAR_ENABLE:
		val = hpre_clear_enable_read(file);
		break;
	case HPRE_CLUSTER_CTRL:
		val = hpre_cluster_inqry_read(file);
		break;
	default:
877
		goto err_input;
878 879
	}
	spin_unlock_irq(&file->lock);
880 881

	hisi_qm_put_dfx_access(qm);
882
	ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
883
	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
884 885 886 887 888

err_input:
	spin_unlock_irq(&file->lock);
	hisi_qm_put_dfx_access(qm);
	return -EINVAL;
889 890 891
}

static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
892
				     size_t count, loff_t *pos)
893 894
{
	struct hpre_debugfs_file *file = filp->private_data;
895
	struct hisi_qm *qm = hpre_file_to_qm(file);
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
	char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
	unsigned long val;
	int len, ret;

	if (*pos != 0)
		return 0;

	if (count >= HPRE_DBGFS_VAL_MAX_LEN)
		return -ENOSPC;

	len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
				     pos, buf, count);
	if (len < 0)
		return len;

	tbuf[len] = '\0';
	if (kstrtoul(tbuf, 0, &val))
		return -EFAULT;

915 916 917 918
	ret = hisi_qm_get_dfx_access(qm);
	if (ret)
		return ret;

919 920 921 922 923 924 925 926
	spin_lock_irq(&file->lock);
	switch (file->type) {
	case HPRE_CLEAR_ENABLE:
		ret = hpre_clear_enable_write(file, val);
		if (ret)
			goto err_input;
		break;
	case HPRE_CLUSTER_CTRL:
927
		hpre_cluster_inqry_write(file, val);
928 929 930 931 932 933
		break;
	default:
		ret = -EINVAL;
		goto err_input;
	}

934
	ret = count;
935 936 937

err_input:
	spin_unlock_irq(&file->lock);
938
	hisi_qm_put_dfx_access(qm);
939 940 941 942 943 944 945 946 947 948
	return ret;
}

static const struct file_operations hpre_ctrl_debug_fops = {
	.owner = THIS_MODULE,
	.open = simple_open,
	.read = hpre_ctrl_debug_read,
	.write = hpre_ctrl_debug_write,
};

949 950 951 952 953 954 955 956 957 958 959 960
static int hpre_debugfs_atomic64_get(void *data, u64 *val)
{
	struct hpre_dfx *dfx_item = data;

	*val = atomic64_read(&dfx_item->value);

	return 0;
}

static int hpre_debugfs_atomic64_set(void *data, u64 val)
{
	struct hpre_dfx *dfx_item = data;
961
	struct hpre_dfx *hpre_dfx = NULL;
962

963 964 965 966
	if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
		hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
		atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
	} else if (val) {
967
		return -EINVAL;
968
	}
969 970 971 972 973 974 975 976 977

	atomic64_set(&dfx_item->value, val);

	return 0;
}

DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
			 hpre_debugfs_atomic64_set, "%llu\n");

978
static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
979 980
				    enum hpre_ctrl_dbgfs_file type, int indx)
{
981 982
	struct hpre *hpre = container_of(qm, struct hpre, qm);
	struct hpre_debug *dbg = &hpre->debug;
983
	struct dentry *file_dir;
984 985 986 987

	if (dir)
		file_dir = dir;
	else
988
		file_dir = qm->debug.debug_root;
989 990 991 992 993 994 995 996

	if (type >= HPRE_DEBUG_FILE_NUM)
		return -EINVAL;

	spin_lock_init(&dbg->files[indx].lock);
	dbg->files[indx].debug = dbg;
	dbg->files[indx].type = type;
	dbg->files[indx].index = indx;
997 998
	debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
			    dbg->files + indx, &hpre_ctrl_debug_fops);
999 1000 1001 1002

	return 0;
}

1003
static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
{
	struct device *dev = &qm->pdev->dev;
	struct debugfs_regset32 *regset;

	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
	if (!regset)
		return -ENOMEM;

	regset->regs = hpre_com_dfx_regs;
	regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
	regset->base = qm->io_base;
1015
	regset->dev = dev;
1016

1017 1018 1019
	debugfs_create_file("regs", 0444, qm->debug.debug_root,
			    regset, &hpre_com_regs_fops);

1020 1021 1022
	return 0;
}

1023
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
1024
{
1025
	u8 clusters_num = hpre_cluster_num(qm);
1026 1027 1028
	struct device *dev = &qm->pdev->dev;
	char buf[HPRE_DBGFS_VAL_MAX_LEN];
	struct debugfs_regset32 *regset;
1029
	struct dentry *tmp_d;
1030 1031
	int i, ret;

1032
	for (i = 0; i < clusters_num; i++) {
1033 1034 1035
		ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
		if (ret < 0)
			return -EINVAL;
1036
		tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
1037 1038 1039 1040 1041 1042 1043 1044

		regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
		if (!regset)
			return -ENOMEM;

		regset->regs = hpre_cluster_dfx_regs;
		regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
		regset->base = qm->io_base + hpre_cluster_offsets[i];
1045
		regset->dev = dev;
1046

1047 1048
		debugfs_create_file("regs", 0444, tmp_d, regset,
				    &hpre_cluster_regs_fops);
1049
		ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
1050 1051 1052 1053 1054 1055 1056 1057
					       i + HPRE_CLUSTER_CTRL);
		if (ret)
			return ret;
	}

	return 0;
}

1058
static int hpre_ctrl_debug_init(struct hisi_qm *qm)
1059 1060 1061
{
	int ret;

1062
	ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
1063 1064 1065 1066
				       HPRE_CLEAR_ENABLE);
	if (ret)
		return ret;

1067
	ret = hpre_pf_comm_regs_debugfs_init(qm);
1068 1069 1070
	if (ret)
		return ret;

1071
	return hpre_cluster_debugfs_init(qm);
1072 1073
}

1074
static void hpre_dfx_debug_init(struct hisi_qm *qm)
1075
{
1076
	struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
1077
	struct hpre *hpre = container_of(qm, struct hpre, qm);
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	struct hpre_dfx *dfx = hpre->debug.dfx;
	struct dentry *parent;
	int i;

	parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
	for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
		dfx[i].type = i;
		debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
				    &hpre_atomic64_ops);
	}
1088 1089 1090 1091

	if (qm->fun_type == QM_HW_PF && hpre_regs)
		debugfs_create_file("diff_regs", 0444, parent,
				      qm, &hpre_diff_regs_fops);
1092 1093
}

1094
static int hpre_debugfs_init(struct hisi_qm *qm)
1095 1096 1097 1098
{
	struct device *dev = &qm->pdev->dev;
	int ret;

1099 1100 1101
	qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
						  hpre_debugfs_root);

1102 1103
	qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
	qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
1104
	ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
1105 1106 1107 1108 1109
	if (ret) {
		dev_warn(dev, "Failed to init HPRE diff regs!\n");
		goto debugfs_remove;
	}

1110
	hisi_qm_debug_init(qm);
1111

1112
	if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
1113
		ret = hpre_ctrl_debug_init(qm);
1114 1115 1116
		if (ret)
			goto failed_to_create;
	}
1117

1118
	hpre_dfx_debug_init(qm);
1119

1120 1121 1122
	return 0;

failed_to_create:
1123
	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1124
debugfs_remove:
1125 1126 1127 1128
	debugfs_remove_recursive(qm->debug.debug_root);
	return ret;
}

1129
static void hpre_debugfs_exit(struct hisi_qm *qm)
1130
{
1131
	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1132

1133 1134 1135
	debugfs_remove_recursive(qm->debug.debug_root);
}

1136
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1137
{
1138 1139
	int ret;

1140
	if (pdev->revision == QM_HW_V1) {
1141 1142 1143 1144
		pci_warn(pdev, "HPRE version 1 is not supported!\n");
		return -EINVAL;
	}

1145
	qm->mode = uacce_mode;
1146
	qm->pdev = pdev;
1147
	qm->ver = pdev->revision;
1148 1149
	qm->sqe_size = HPRE_SQE_SIZE;
	qm->dev_name = hpre_name;
1150

1151
	qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ?
1152 1153
			QM_HW_PF : QM_HW_VF;
	if (qm->fun_type == QM_HW_PF) {
1154
		qm->qp_base = HPRE_PF_DEF_Q_BASE;
1155
		qm->qp_num = pf_q_num;
1156
		qm->debug.curr_qm_qp_num = pf_q_num;
1157
		qm->qm_list = &hpre_devices;
1158
	}
1159

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	ret = hisi_qm_init(qm);
	if (ret) {
		pci_err(pdev, "Failed to init hpre qm configures!\n");
		return ret;
	}

	ret = hpre_set_qm_algs(qm);
	if (ret) {
		pci_err(pdev, "Failed to set hpre algs!\n");
		hisi_qm_uninit(qm);
	}

	return ret;
1173 1174
}

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
static int hpre_show_last_regs_init(struct hisi_qm *qm)
{
	int cluster_dfx_regs_num =  ARRAY_SIZE(hpre_cluster_dfx_regs);
	int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
	u8 clusters_num = hpre_cluster_num(qm);
	struct qm_debug *debug = &qm->debug;
	void __iomem *io_base;
	int i, j, idx;

	debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
			com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
	if (!debug->last_words)
		return -ENOMEM;

	for (i = 0; i < com_dfx_regs_num; i++)
		debug->last_words[i] = readl_relaxed(qm->io_base +
						hpre_com_dfx_regs[i].offset);

	for (i = 0; i < clusters_num; i++) {
		io_base = qm->io_base + hpre_cluster_offsets[i];
		for (j = 0; j < cluster_dfx_regs_num; j++) {
			idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
			debug->last_words[idx] = readl_relaxed(
				io_base + hpre_cluster_dfx_regs[j].offset);
		}
	}

	return 0;
}

static void hpre_show_last_regs_uninit(struct hisi_qm *qm)
{
	struct qm_debug *debug = &qm->debug;

	if (qm->fun_type == QM_HW_VF || !debug->last_words)
		return;

	kfree(debug->last_words);
	debug->last_words = NULL;
}

static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
{
	int cluster_dfx_regs_num =  ARRAY_SIZE(hpre_cluster_dfx_regs);
	int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
	u8 clusters_num = hpre_cluster_num(qm);
	struct qm_debug *debug = &qm->debug;
	struct pci_dev *pdev = qm->pdev;
	void __iomem *io_base;
	int i, j, idx;
	u32 val;

	if (qm->fun_type == QM_HW_VF || !debug->last_words)
		return;

	/* dumps last word of the debugging registers during controller reset */
	for (i = 0; i < com_dfx_regs_num; i++) {
		val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset);
		if (debug->last_words[i] != val)
			pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n",
			  hpre_com_dfx_regs[i].name, debug->last_words[i], val);
	}

	for (i = 0; i < clusters_num; i++) {
		io_base = qm->io_base + hpre_cluster_offsets[i];
		for (j = 0; j <  cluster_dfx_regs_num; j++) {
			val = readl_relaxed(io_base +
					     hpre_cluster_dfx_regs[j].offset);
			idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
			if (debug->last_words[idx] != val)
				pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n",
				i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val);
		}
	}
}

1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
	const struct hpre_hw_error *err = hpre_hw_errors;
	struct device *dev = &qm->pdev->dev;

	while (err->msg) {
		if (err->int_msk & err_sts)
			dev_warn(dev, "%s [error status=0x%x] found\n",
				 err->msg, err->int_msk);
		err++;
	}
}

static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
{
1266
	return readl(qm->io_base + HPRE_INT_STATUS);
1267 1268
}

1269 1270
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
1271 1272
	u32 nfe;

1273
	writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
1274 1275
	nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
	writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
1276 1277 1278 1279 1280 1281 1282 1283
}

static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
	u32 value;

	value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
	writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
1284
	       qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1285
	writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
1286
	       qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1287 1288
}

1289 1290 1291 1292
static void hpre_err_info_init(struct hisi_qm *qm)
{
	struct hisi_qm_err_info *err_info = &qm->err_info;

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
	err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
	err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
	err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
	err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
	err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
			HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
	err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
			HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
	err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
			HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
	err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
			HPRE_RESET_MASK_CAP, qm->cap_ver);
1305 1306 1307 1308
	err_info->msi_wr_port = HPRE_WR_MSI_PORT;
	err_info->acpi_rst = "HRST";
}

1309
static const struct hisi_qm_err_ini hpre_err_ini = {
1310
	.hw_init		= hpre_set_user_domain_and_cache,
1311 1312 1313
	.hw_err_enable		= hpre_hw_error_enable,
	.hw_err_disable		= hpre_hw_error_disable,
	.get_dev_hw_err_status	= hpre_get_hw_err_status,
1314
	.clear_dev_hw_err_status = hpre_clear_hw_err_status,
1315
	.log_dev_hw_err		= hpre_log_hw_error,
1316
	.open_axi_master_ooo	= hpre_open_axi_master_ooo,
1317 1318
	.open_sva_prefetch	= hpre_open_sva_prefetch,
	.close_sva_prefetch	= hpre_close_sva_prefetch,
1319
	.show_last_dfx_regs	= hpre_show_last_dfx_regs,
1320
	.err_info_init		= hpre_err_info_init,
1321
};
1322 1323 1324 1325 1326 1327

static int hpre_pf_probe_init(struct hpre *hpre)
{
	struct hisi_qm *qm = &hpre->qm;
	int ret;

1328
	ret = hpre_set_user_domain_and_cache(qm);
1329 1330 1331
	if (ret)
		return ret;

1332 1333
	hpre_open_sva_prefetch(qm);

1334
	qm->err_ini = &hpre_err_ini;
1335
	qm->err_ini->err_info_init(qm);
1336
	hisi_qm_dev_err_init(qm);
1337 1338 1339
	ret = hpre_show_last_regs_init(qm);
	if (ret)
		pci_err(qm->pdev, "Failed to init last word regs!\n");
1340

1341
	return ret;
1342 1343
}

1344 1345
static int hpre_probe_init(struct hpre *hpre)
{
1346
	u32 type_rate = HPRE_SHAPER_TYPE_RATE;
1347
	struct hisi_qm *qm = &hpre->qm;
1348
	int ret;
1349

1350
	if (qm->fun_type == QM_HW_PF) {
1351
		ret = hpre_pf_probe_init(hpre);
1352 1353
		if (ret)
			return ret;
1354 1355 1356 1357 1358
		/* Enable shaper type 0 */
		if (qm->ver >= QM_HW_V3) {
			type_rate |= QM_SHAPER_ENABLE;
			qm->type_rate = type_rate;
		}
1359
	}
1360

1361
	return 0;
1362 1363
}

1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct hisi_qm *qm;
	struct hpre *hpre;
	int ret;

	hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
	if (!hpre)
		return -ENOMEM;

	qm = &hpre->qm;
1375 1376 1377
	ret = hpre_qm_init(qm, pdev);
	if (ret) {
		pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
1378
		return ret;
1379
	}
1380

1381 1382 1383 1384
	ret = hpre_probe_init(hpre);
	if (ret) {
		pci_err(pdev, "Failed to probe (%d)!\n", ret);
		goto err_with_qm_init;
1385
	}
1386 1387 1388 1389 1390

	ret = hisi_qm_start(qm);
	if (ret)
		goto err_with_err_init;

1391
	ret = hpre_debugfs_init(qm);
1392 1393 1394
	if (ret)
		dev_warn(&pdev->dev, "init debugfs fail!\n");

1395
	ret = hisi_qm_alg_register(qm, &hpre_devices);
1396 1397 1398 1399
	if (ret < 0) {
		pci_err(pdev, "fail to register algs to crypto!\n");
		goto err_with_qm_start;
	}
1400

1401 1402 1403 1404 1405 1406 1407 1408
	if (qm->uacce) {
		ret = uacce_register(qm->uacce);
		if (ret) {
			pci_err(pdev, "failed to register uacce (%d)!\n", ret);
			goto err_with_alg_register;
		}
	}

1409 1410 1411
	if (qm->fun_type == QM_HW_PF && vfs_num) {
		ret = hisi_qm_sriov_enable(pdev, vfs_num);
		if (ret < 0)
1412
			goto err_with_alg_register;
1413 1414
	}

1415 1416
	hisi_qm_pm_init(qm);

1417 1418
	return 0;

1419 1420
err_with_alg_register:
	hisi_qm_alg_unregister(qm, &hpre_devices);
1421

1422
err_with_qm_start:
1423
	hpre_debugfs_exit(qm);
1424
	hisi_qm_stop(qm, QM_NORMAL);
1425 1426

err_with_err_init:
1427
	hpre_show_last_regs_uninit(qm);
1428
	hisi_qm_dev_err_uninit(qm);
1429 1430 1431 1432 1433 1434 1435 1436 1437

err_with_qm_init:
	hisi_qm_uninit(qm);

	return ret;
}

static void hpre_remove(struct pci_dev *pdev)
{
1438
	struct hisi_qm *qm = pci_get_drvdata(pdev);
1439

1440
	hisi_qm_pm_uninit(qm);
1441
	hisi_qm_wait_task_finish(qm, &hpre_devices);
1442
	hisi_qm_alg_unregister(qm, &hpre_devices);
1443 1444
	if (qm->fun_type == QM_HW_PF && qm->vfs_num)
		hisi_qm_sriov_disable(pdev, true);
1445 1446 1447 1448

	hpre_debugfs_exit(qm);
	hisi_qm_stop(qm, QM_NORMAL);

1449 1450 1451
	if (qm->fun_type == QM_HW_PF) {
		hpre_cnt_regs_clear(qm);
		qm->debug.curr_qm_qp_num = 0;
1452
		hpre_show_last_regs_uninit(qm);
1453
		hisi_qm_dev_err_uninit(qm);
1454 1455
	}

1456 1457 1458
	hisi_qm_uninit(qm);
}

1459 1460 1461 1462
static const struct dev_pm_ops hpre_pm_ops = {
	SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
};

1463
static const struct pci_error_handlers hpre_err_handler = {
1464
	.error_detected		= hisi_qm_dev_err_detected,
1465
	.slot_reset		= hisi_qm_dev_slot_reset,
S
Shukun Tan 已提交
1466 1467
	.reset_prepare		= hisi_qm_reset_prepare,
	.reset_done		= hisi_qm_reset_done,
1468 1469 1470 1471 1472 1473 1474
};

static struct pci_driver hpre_pci_driver = {
	.name			= hpre_name,
	.id_table		= hpre_dev_ids,
	.probe			= hpre_probe,
	.remove			= hpre_remove,
1475 1476
	.sriov_configure	= IS_ENABLED(CONFIG_PCI_IOV) ?
				  hisi_qm_sriov_configure : NULL,
1477
	.err_handler		= &hpre_err_handler,
1478
	.shutdown		= hisi_qm_dev_shutdown,
1479
	.driver.pm		= &hpre_pm_ops,
1480 1481
};

1482 1483 1484 1485 1486 1487
struct pci_driver *hisi_hpre_get_pf_driver(void)
{
	return &hpre_pci_driver;
}
EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver);

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
static void hpre_register_debugfs(void)
{
	if (!debugfs_initialized())
		return;

	hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
}

static void hpre_unregister_debugfs(void)
{
	debugfs_remove_recursive(hpre_debugfs_root);
}

1501 1502 1503 1504
static int __init hpre_init(void)
{
	int ret;

1505
	hisi_qm_init_list(&hpre_devices);
1506 1507
	hpre_register_debugfs();

1508
	ret = pci_register_driver(&hpre_pci_driver);
1509 1510
	if (ret) {
		hpre_unregister_debugfs();
1511
		pr_err("hpre: can't register hisi hpre driver.\n");
1512
	}
1513 1514 1515 1516 1517 1518 1519

	return ret;
}

static void __exit hpre_exit(void)
{
	pci_unregister_driver(&hpre_pci_driver);
1520
	hpre_unregister_debugfs();
1521 1522 1523 1524 1525 1526 1527
}

module_init(hpre_init);
module_exit(hpre_exit);

MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1528
MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
1529
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");