sec_main.c 24.7 KB
Newer Older
1 2 3 4 5 6
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */

#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
7
#include <linux/debugfs.h>
8 9
#include <linux/init.h>
#include <linux/io.h>
10
#include <linux/iommu.h>
11 12 13 14 15
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
16
#include <linux/uacce.h>
17 18 19

#include "sec.h"

20
#define SEC_VF_NUM			63
21 22
#define SEC_QUEUE_NUM_V1		4096
#define SEC_PF_PCI_DEVICE_ID		0xa255
23
#define SEC_VF_PCI_DEVICE_ID		0xa256
24

25 26 27
#define SEC_BD_ERR_CHK_EN0		0xEFFFFFFF
#define SEC_BD_ERR_CHK_EN1		0x7ffff7fd
#define SEC_BD_ERR_CHK_EN3		0xffffbfff
28 29 30

#define SEC_SQE_SIZE			128
#define SEC_SQ_SIZE			(SEC_SQE_SIZE * QM_Q_DEPTH)
31
#define SEC_PF_DEF_Q_NUM		256
32
#define SEC_PF_DEF_Q_BASE		0
33
#define SEC_CTX_Q_NUM_DEF		2
34
#define SEC_CTX_Q_NUM_MAX		32
35

36
#define SEC_CTRL_CNT_CLR_CE		0x301120
37
#define SEC_CTRL_CNT_CLR_CE_BIT	BIT(0)
38 39 40 41
#define SEC_CORE_INT_SOURCE		0x301010
#define SEC_CORE_INT_MASK		0x301000
#define SEC_CORE_INT_STATUS		0x301008
#define SEC_CORE_SRAM_ECC_ERR_INFO	0x301C14
42 43
#define SEC_ECC_NUM			16
#define SEC_ECC_MASH			0xFF
44
#define SEC_CORE_INT_DISABLE		0x0
45 46
#define SEC_CORE_INT_ENABLE		0x7c1ff
#define SEC_CORE_INT_CLEAR		0x7c1ff
47
#define SEC_SAA_ENABLE			0x17f
48

49 50 51
#define SEC_RAS_CE_REG			0x301050
#define SEC_RAS_FE_REG			0x301054
#define SEC_RAS_NFE_REG			0x301058
52 53
#define SEC_RAS_CE_ENB_MSK		0x88
#define SEC_RAS_FE_ENB_MSK		0x0
54
#define SEC_RAS_NFE_ENB_MSK		0x7c177
55 56 57
#define SEC_RAS_DISABLE		0x0
#define SEC_MEM_START_INIT_REG	0x301100
#define SEC_MEM_INIT_DONE_REG		0x301104
58

59
#define SEC_CONTROL_REG		0x301200
60 61 62 63 64 65
#define SEC_TRNG_EN_SHIFT		8
#define SEC_CLK_GATE_ENABLE		BIT(3)
#define SEC_CLK_GATE_DISABLE		(~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE	BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE	0xFFFFEFFF

66 67 68 69 70 71
#define SEC_INTERFACE_USER_CTRL0_REG	0x301220
#define SEC_INTERFACE_USER_CTRL1_REG	0x301224
#define SEC_SAA_EN_REG			0x301270
#define SEC_BD_ERR_CHK_EN_REG0		0x301380
#define SEC_BD_ERR_CHK_EN_REG1		0x301384
#define SEC_BD_ERR_CHK_EN_REG3		0x30138c
72 73 74

#define SEC_USER0_SMMU_NORMAL		(BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL		(BIT(31) | BIT(23) | BIT(15) | BIT(7))
75 76 77 78 79 80 81 82 83 84
#define SEC_USER1_ENABLE_CONTEXT_SSV	BIT(24)
#define SEC_USER1_ENABLE_DATA_SSV	BIT(16)
#define SEC_USER1_WB_CONTEXT_SSV	BIT(8)
#define SEC_USER1_WB_DATA_SSV		BIT(0)
#define SEC_USER1_SVA_SET		(SEC_USER1_ENABLE_CONTEXT_SSV | \
					SEC_USER1_ENABLE_DATA_SSV | \
					SEC_USER1_WB_CONTEXT_SSV |  \
					SEC_USER1_WB_DATA_SSV)
#define SEC_USER1_SMMU_SVA		(SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
#define SEC_USER1_SMMU_MASK		(~SEC_USER1_SVA_SET)
85 86 87 88
#define SEC_CORE_INT_STATUS_M_ECC	BIT(2)

#define SEC_DELAY_10_US			10
#define SEC_POLL_TIMEOUT_US		1000
89
#define SEC_DBGFS_VAL_MAX_LEN		20
90
#define SEC_SINGLE_PORT_MAX_TRANS	0x2060
91

92 93 94
#define SEC_SQE_MASK_OFFSET		64
#define SEC_SQE_MASK_LEN		48

95 96 97 98 99
struct sec_hw_error {
	u32 int_msk;
	const char *msg;
};

100 101 102 103 104
struct sec_dfx_item {
	const char *name;
	u32 offset;
};

105
static const char sec_name[] = "hisi_sec2";
106
static struct dentry *sec_debugfs_root;
107 108 109 110 111

static struct hisi_qm_list sec_devices = {
	.register_to_crypto	= sec_register_to_crypto,
	.unregister_from_crypto	= sec_unregister_from_crypto,
};
112 113

static const struct sec_hw_error sec_hw_errors[] = {
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
	{
		.int_msk = BIT(0),
		.msg = "sec_axi_rresp_err_rint"
	},
	{
		.int_msk = BIT(1),
		.msg = "sec_axi_bresp_err_rint"
	},
	{
		.int_msk = BIT(2),
		.msg = "sec_ecc_2bit_err_rint"
	},
	{
		.int_msk = BIT(3),
		.msg = "sec_ecc_1bit_err_rint"
	},
	{
		.int_msk = BIT(4),
		.msg = "sec_req_trng_timeout_rint"
	},
	{
		.int_msk = BIT(5),
		.msg = "sec_fsm_hbeat_rint"
	},
	{
		.int_msk = BIT(6),
		.msg = "sec_channel_req_rng_timeout_rint"
	},
	{
		.int_msk = BIT(7),
		.msg = "sec_bd_err_rint"
	},
	{
		.int_msk = BIT(8),
		.msg = "sec_chain_buff_err_rint"
	},
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
	{
		.int_msk = BIT(14),
		.msg = "sec_no_secure_access"
	},
	{
		.int_msk = BIT(15),
		.msg = "sec_wrapping_key_auth_err"
	},
	{
		.int_msk = BIT(16),
		.msg = "sec_km_key_crc_fail"
	},
	{
		.int_msk = BIT(17),
		.msg = "sec_axi_poison_err"
	},
	{
		.int_msk = BIT(18),
		.msg = "sec_sva_err"
	},
170
	{}
171 172
};

173 174 175 176
static const char * const sec_dbg_file_name[] = {
	[SEC_CLEAR_ENABLE] = "clear_enable",
};

177 178 179 180
static struct sec_dfx_item sec_dfx_labels[] = {
	{"send_cnt", offsetof(struct sec_dfx, send_cnt)},
	{"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
	{"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
181
	{"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
182 183 184 185 186
	{"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
	{"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
	{"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
};

187
static const struct debugfs_reg32 sec_dfx_regs[] = {
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	{"SEC_PF_ABNORMAL_INT_SOURCE    ",  0x301010},
	{"SEC_SAA_EN                    ",  0x301270},
	{"SEC_BD_LATENCY_MIN            ",  0x301600},
	{"SEC_BD_LATENCY_MAX            ",  0x301608},
	{"SEC_BD_LATENCY_AVG            ",  0x30160C},
	{"SEC_BD_NUM_IN_SAA0            ",  0x301670},
	{"SEC_BD_NUM_IN_SAA1            ",  0x301674},
	{"SEC_BD_NUM_IN_SEC             ",  0x301680},
	{"SEC_ECC_1BIT_CNT              ",  0x301C00},
	{"SEC_ECC_1BIT_INFO             ",  0x301C04},
	{"SEC_ECC_2BIT_CNT              ",  0x301C10},
	{"SEC_ECC_2BIT_INFO             ",  0x301C14},
	{"SEC_BD_SAA0                   ",  0x301C20},
	{"SEC_BD_SAA1                   ",  0x301C24},
	{"SEC_BD_SAA2                   ",  0x301C28},
	{"SEC_BD_SAA3                   ",  0x301C2C},
	{"SEC_BD_SAA4                   ",  0x301C30},
	{"SEC_BD_SAA5                   ",  0x301C34},
	{"SEC_BD_SAA6                   ",  0x301C38},
	{"SEC_BD_SAA7                   ",  0x301C3C},
	{"SEC_BD_SAA8                   ",  0x301C40},
};

211 212
static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
213
	return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
214 215 216 217 218 219
}

static const struct kernel_param_ops sec_pf_q_num_ops = {
	.set = sec_pf_q_num_set,
	.get = param_get_int,
};
220

221 222
static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
223
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
224 225 226 227 228 229 230 231 232 233 234 235 236

static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
{
	u32 ctx_q_num;
	int ret;

	if (!val)
		return -EINVAL;

	ret = kstrtou32(val, 10, &ctx_q_num);
	if (ret)
		return -EINVAL;

237
	if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
238 239 240 241 242 243 244 245 246 247 248 249 250
		pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
		return -EINVAL;
	}

	return param_set_int(val, kp);
}

static const struct kernel_param_ops sec_ctx_q_num_ops = {
	.set = sec_ctx_q_num_set,
	.get = param_get_int,
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
251
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
252

253 254 255 256 257 258 259 260 261
static const struct kernel_param_ops vfs_num_ops = {
	.set = vfs_num_set,
	.get = param_get_int,
};

static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
{
	hisi_qm_free_qps(qps, qp_num);
	kfree(qps);
}

struct hisi_qp **sec_create_qps(void)
{
	int node = cpu_to_node(smp_processor_id());
	u32 ctx_num = ctx_q_num;
	struct hisi_qp **qps;
	int ret;

	qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
	if (!qps)
		return NULL;

	ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
	if (!ret)
		return qps;

	kfree(qps);
	return NULL;
}

287 288 289 290 291 292 293 294 295 296 297 298
static const struct kernel_param_ops sec_uacce_mode_ops = {
	.set = uacce_mode_set,
	.get = param_get_int,
};

/*
 * uacce_mode = 0 means sec only register to crypto,
 * uacce_mode = 1 means sec both register to crypto and uacce.
 */
static u32 uacce_mode = UACCE_MODE_NOUACCE;
module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
299

300 301
static const struct pci_device_id sec_dev_ids[] = {
	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
302
	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
303 304 305 306
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);

307
static u8 sec_get_endian(struct hisi_qm *qm)
308 309 310
{
	u32 reg;

311 312 313 314 315 316 317 318 319
	/*
	 * As for VF, it is a wrong way to get endian setting by
	 * reading a register of the engine
	 */
	if (qm->pdev->is_virtfn) {
		dev_err_ratelimited(&qm->pdev->dev,
				    "cannot access a register in VF!\n");
		return SEC_LE;
	}
320
	reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
321 322 323 324 325 326 327 328 329 330 331 332 333
	/* BD little endian mode */
	if (!(reg & BIT(0)))
		return SEC_LE;

	/* BD 32-bits big endian mode */
	else if (!(reg & BIT(1)))
		return SEC_32BE;

	/* BD 64-bits big endian mode */
	else
		return SEC_64BE;
}

334
static int sec_engine_init(struct hisi_qm *qm)
335 336 337 338 339
{
	int ret;
	u32 reg;

	/* disable clock gate control */
340
	reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
341
	reg &= SEC_CLK_GATE_DISABLE;
342
	writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
343

344
	writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
345

346
	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
347 348 349
					 reg, reg & 0x1, SEC_DELAY_10_US,
					 SEC_POLL_TIMEOUT_US);
	if (ret) {
350
		pci_err(qm->pdev, "fail to init sec mem\n");
351 352 353
		return ret;
	}

354
	reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
355
	reg |= (0x1 << SEC_TRNG_EN_SHIFT);
356
	writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
357

358
	reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
359
	reg |= SEC_USER0_SMMU_NORMAL;
360
	writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
361

362
	reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
363
	reg &= SEC_USER1_SMMU_MASK;
364
	if (qm->use_sva && qm->ver == QM_HW_V2)
365 366 367
		reg |= SEC_USER1_SMMU_SVA;
	else
		reg |= SEC_USER1_SMMU_NORMAL;
368
	writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
369

370 371 372
	writel(SEC_SINGLE_PORT_MAX_TRANS,
	       qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);

373
	writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
374 375 376

	/* Enable sm4 extra mode, as ctr/ecb */
	writel_relaxed(SEC_BD_ERR_CHK_EN0,
377
		       qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
378
	/* Enable sm4 xts mode multiple iv */
379
	writel_relaxed(SEC_BD_ERR_CHK_EN1,
380
		       qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
381
	writel_relaxed(SEC_BD_ERR_CHK_EN3,
382
		       qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
383 384

	/* config endian */
385
	reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
386
	reg |= sec_get_endian(qm);
387
	writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
388 389 390 391

	return 0;
}

392
static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
{
	/* qm user domain */
	writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
	writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
	writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
	writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
	writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);

	/* qm cache */
	writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
	writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);

	/* disable FLR triggered by BME(bus master enable) */
	writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
	writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);

	/* enable sqc,cqc writeback */
	writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
	       CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
	       FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);

414
	return sec_engine_init(qm);
415 416
}

417 418 419
/* sec_debug_regs_clear() - clear the sec debug regs */
static void sec_debug_regs_clear(struct hisi_qm *qm)
{
420 421 422 423 424 425 426
	int i;

	/* clear sec dfx regs */
	writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
	for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
		readl(qm->io_base + sec_dfx_regs[i].offset);

427 428 429 430 431 432
	/* clear rdclr_en */
	writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);

	hisi_qm_debug_regs_clear(qm);
}

433
static void sec_hw_error_enable(struct hisi_qm *qm)
434 435 436 437 438
{
	u32 val;

	if (qm->ver == QM_HW_V1) {
		writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
439
		pci_info(qm->pdev, "V1 not support hw error handle\n");
440 441 442
		return;
	}

443
	val = readl(qm->io_base + SEC_CONTROL_REG);
444 445

	/* clear SEC hw error source if having */
446
	writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
447 448 449 450 451 452 453 454 455 456 457 458

	/* enable SEC hw error interrupts */
	writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);

	/* enable RAS int */
	writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
	writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
	writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);

	/* enable SEC block master OOO when m-bit error occur */
	val = val | SEC_AXI_SHUTDOWN_ENABLE;

459
	writel(val, qm->io_base + SEC_CONTROL_REG);
460 461
}

462
static void sec_hw_error_disable(struct hisi_qm *qm)
463 464 465
{
	u32 val;

466
	val = readl(qm->io_base + SEC_CONTROL_REG);
467 468 469 470 471 472 473 474 475 476 477 478

	/* disable RAS int */
	writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
	writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
	writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);

	/* disable SEC hw error interrupts */
	writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);

	/* disable SEC block master OOO when m-bit error occur */
	val = val & SEC_AXI_SHUTDOWN_DISABLE;

479
	writel(val, qm->io_base + SEC_CONTROL_REG);
480 481
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
static u32 sec_clear_enable_read(struct sec_debug_file *file)
{
	struct hisi_qm *qm = file->qm;

	return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
			SEC_CTRL_CNT_CLR_CE_BIT;
}

static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
{
	struct hisi_qm *qm = file->qm;
	u32 tmp;

	if (val != 1 && val)
		return -EINVAL;

	tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
	       ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
	writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);

	return 0;
}

static ssize_t sec_debug_read(struct file *filp, char __user *buf,
			       size_t count, loff_t *pos)
{
	struct sec_debug_file *file = filp->private_data;
	char tbuf[SEC_DBGFS_VAL_MAX_LEN];
	u32 val;
	int ret;

	spin_lock_irq(&file->lock);

	switch (file->index) {
	case SEC_CLEAR_ENABLE:
		val = sec_clear_enable_read(file);
		break;
	default:
		spin_unlock_irq(&file->lock);
		return -EINVAL;
	}

	spin_unlock_irq(&file->lock);
	ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);

	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}

static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
			       size_t count, loff_t *pos)
{
	struct sec_debug_file *file = filp->private_data;
	char tbuf[SEC_DBGFS_VAL_MAX_LEN];
	unsigned long val;
	int len, ret;

	if (*pos != 0)
		return 0;

	if (count >= SEC_DBGFS_VAL_MAX_LEN)
		return -ENOSPC;

	len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
				     pos, buf, count);
	if (len < 0)
		return len;

	tbuf[len] = '\0';
	if (kstrtoul(tbuf, 0, &val))
		return -EFAULT;

	spin_lock_irq(&file->lock);

	switch (file->index) {
	case SEC_CLEAR_ENABLE:
		ret = sec_clear_enable_write(file, val);
		if (ret)
			goto err_input;
		break;
	default:
		ret = -EINVAL;
		goto err_input;
	}

	spin_unlock_irq(&file->lock);

	return count;

 err_input:
	spin_unlock_irq(&file->lock);
	return ret;
}

static const struct file_operations sec_dbg_fops = {
	.owner = THIS_MODULE,
	.open = simple_open,
	.read = sec_debug_read,
	.write = sec_debug_write,
};

582
static int sec_debugfs_atomic64_get(void *data, u64 *val)
583
{
584
	*val = atomic64_read((atomic64_t *)data);
585 586 587 588 589 590 591 592 593 594 595

	return 0;
}

static int sec_debugfs_atomic64_set(void *data, u64 val)
{
	if (val)
		return -EINVAL;

	atomic64_set((atomic64_t *)data, 0);

596
	return 0;
597
}
598

599
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
600
			 sec_debugfs_atomic64_set, "%lld\n");
601

602
static int sec_core_debug_init(struct hisi_qm *qm)
603
{
604
	struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
605 606 607 608
	struct device *dev = &qm->pdev->dev;
	struct sec_dfx *dfx = &sec->debug.dfx;
	struct debugfs_regset32 *regset;
	struct dentry *tmp_d;
609
	int i;
610

611
	tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
612 613 614

	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
	if (!regset)
615
		return -ENOMEM;
616 617 618 619 620

	regset->regs = sec_dfx_regs;
	regset->nregs = ARRAY_SIZE(sec_dfx_regs);
	regset->base = qm->io_base;

621 622
	if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
		debugfs_create_regset32("regs", 0444, tmp_d, regset);
623

624 625 626 627 628 629
	for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
		atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
					sec_dfx_labels[i].offset);
		debugfs_create_file(sec_dfx_labels[i].name, 0644,
				   tmp_d, data, &sec_atomic64_ops);
	}
630 631 632 633

	return 0;
}

634
static int sec_debug_init(struct hisi_qm *qm)
635
{
636
	struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
637 638
	int i;

639
	if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
640
		for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
641 642 643 644 645 646 647 648 649
			spin_lock_init(&sec->debug.files[i].lock);
			sec->debug.files[i].index = i;
			sec->debug.files[i].qm = qm;

			debugfs_create_file(sec_dbg_file_name[i], 0600,
						  qm->debug.debug_root,
						  sec->debug.files + i,
						  &sec_dbg_fops);
		}
650 651
	}

652
	return sec_core_debug_init(qm);
653 654
}

655
static int sec_debugfs_init(struct hisi_qm *qm)
656 657 658 659 660 661
{
	struct device *dev = &qm->pdev->dev;
	int ret;

	qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
						  sec_debugfs_root);
662 663
	qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
	qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
664
	hisi_qm_debug_init(qm);
665

666 667 668 669
	ret = sec_debug_init(qm);
	if (ret)
		goto failed_to_create;

670 671 672 673 674 675 676
	return 0;

failed_to_create:
	debugfs_remove_recursive(sec_debugfs_root);
	return ret;
}

677
static void sec_debugfs_exit(struct hisi_qm *qm)
678
{
679
	debugfs_remove_recursive(qm->debug.debug_root);
680 681
}

682 683 684 685 686 687 688 689 690
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
	const struct sec_hw_error *errs = sec_hw_errors;
	struct device *dev = &qm->pdev->dev;
	u32 err_val;

	while (errs->msg) {
		if (errs->int_msk & err_sts) {
			dev_err(dev, "%s [error status=0x%x] found\n",
691
					errs->msg, errs->int_msk);
692 693 694 695 696

			if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
				err_val = readl(qm->io_base +
						SEC_CORE_SRAM_ECC_ERR_INFO);
				dev_err(dev, "multi ecc sram num=0x%x\n",
697 698
						((err_val) >> SEC_ECC_NUM) &
						SEC_ECC_MASH);
699 700 701 702 703 704 705 706 707 708 709
			}
		}
		errs++;
	}
}

static u32 sec_get_hw_err_status(struct hisi_qm *qm)
{
	return readl(qm->io_base + SEC_CORE_INT_STATUS);
}

710 711 712 713 714 715 716 717 718
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
	writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}

static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
	u32 val;

719 720 721
	val = readl(qm->io_base + SEC_CONTROL_REG);
	writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
	writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
722 723
}

724 725 726 727 728 729 730 731 732 733 734 735 736 737
static void sec_err_info_init(struct hisi_qm *qm)
{
	struct hisi_qm_err_info *err_info = &qm->err_info;

	err_info->ce = QM_BASE_CE;
	err_info->fe = 0;
	err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
	err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
	err_info->msi_wr_port = BIT(0);
	err_info->acpi_rst = "SRST";
	err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
			QM_ACC_WB_NOT_READY_TIMEOUT;
}

738
static const struct hisi_qm_err_ini sec_err_ini = {
739
	.hw_init		= sec_set_user_domain_and_cache,
740 741 742
	.hw_err_enable		= sec_hw_error_enable,
	.hw_err_disable		= sec_hw_error_disable,
	.get_dev_hw_err_status	= sec_get_hw_err_status,
743
	.clear_dev_hw_err_status = sec_clear_hw_err_status,
744
	.log_dev_hw_err		= sec_log_hw_error,
745
	.open_axi_master_ooo	= sec_open_axi_master_ooo,
746
	.err_info_init		= sec_err_info_init,
747 748
};

749 750 751 752 753
static int sec_pf_probe_init(struct sec_dev *sec)
{
	struct hisi_qm *qm = &sec->qm;
	int ret;

754
	qm->err_ini = &sec_err_ini;
755
	qm->err_ini->err_info_init(qm);
756

757
	ret = sec_set_user_domain_and_cache(qm);
758 759 760
	if (ret)
		return ret;

761
	hisi_qm_dev_err_init(qm);
762
	sec_debug_regs_clear(qm);
763 764 765 766 767 768

	return 0;
}

static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
769
	int ret;
770 771

	qm->pdev = pdev;
772
	qm->ver = pdev->revision;
773
	qm->algs = "cipher\ndigest\naead";
774
	qm->mode = uacce_mode;
775 776
	qm->sqe_size = SEC_SQE_SIZE;
	qm->dev_name = sec_name;
777

778 779
	qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
			QM_HW_PF : QM_HW_VF;
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
	if (qm->fun_type == QM_HW_PF) {
		qm->qp_base = SEC_PF_DEF_Q_BASE;
		qm->qp_num = pf_q_num;
		qm->debug.curr_qm_qp_num = pf_q_num;
		qm->qm_list = &sec_devices;
	} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
		/*
		 * have no way to get qm configure in VM in v1 hardware,
		 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
		 * to trigger only one VF in v1 hardware.
		 * v2 hardware has no such problem.
		 */
		qm->qp_base = SEC_PF_DEF_Q_NUM;
		qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
	}
795

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	/*
	 * WQ_HIGHPRI: SEC request must be low delayed,
	 * so need a high priority workqueue.
	 * WQ_UNBOUND: SEC task is likely with long
	 * running CPU intensive workloads.
	 */
	qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
				 WQ_UNBOUND, num_online_cpus(),
				 pci_name(qm->pdev));
	if (!qm->wq) {
		pci_err(qm->pdev, "fail to alloc workqueue\n");
		return -ENOMEM;
	}

	ret = hisi_qm_init(qm);
	if (ret)
		destroy_workqueue(qm->wq);

	return ret;
815 816 817 818 819 820 821
}

static void sec_qm_uninit(struct hisi_qm *qm)
{
	hisi_qm_uninit(qm);
}

822
static int sec_probe_init(struct sec_dev *sec)
823
{
824
	struct hisi_qm *qm = &sec->qm;
825 826
	int ret;

827
	if (qm->fun_type == QM_HW_PF) {
828
		ret = sec_pf_probe_init(sec);
829 830
		if (ret)
			return ret;
831
	}
832

833
	return 0;
834 835
}

836
static void sec_probe_uninit(struct hisi_qm *qm)
837
{
838
	hisi_qm_dev_err_uninit(qm);
839 840

	destroy_workqueue(qm->wq);
841 842
}

843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
static void sec_iommu_used_check(struct sec_dev *sec)
{
	struct iommu_domain *domain;
	struct device *dev = &sec->qm.pdev->dev;

	domain = iommu_get_domain_for_dev(dev);

	/* Check if iommu is used */
	sec->iommu_used = false;
	if (domain) {
		if (domain->type & __IOMMU_DOMAIN_PAGING)
			sec->iommu_used = true;
		dev_info(dev, "SMMU Opened, the iommu type = %u\n",
			domain->type);
	}
}

860 861 862 863 864 865 866 867 868 869 870 871 872
static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct sec_dev *sec;
	struct hisi_qm *qm;
	int ret;

	sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
	if (!sec)
		return -ENOMEM;

	qm = &sec->qm;
	ret = sec_qm_init(qm, pdev);
	if (ret) {
873
		pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
874 875 876
		return ret;
	}

877 878 879 880
	sec->ctx_q_num = ctx_q_num;
	sec_iommu_used_check(sec);

	ret = sec_probe_init(sec);
881 882 883 884 885 886 887 888 889 890 891
	if (ret) {
		pci_err(pdev, "Failed to probe!\n");
		goto err_qm_uninit;
	}

	ret = hisi_qm_start(qm);
	if (ret) {
		pci_err(pdev, "Failed to start sec qm!\n");
		goto err_probe_uninit;
	}

892
	ret = sec_debugfs_init(qm);
893 894 895
	if (ret)
		pci_warn(pdev, "Failed to init debugfs!\n");

896 897 898 899 900 901 902 903 904
	if (qm->qp_num >= ctx_q_num) {
		ret = hisi_qm_alg_register(qm, &sec_devices);
		if (ret < 0) {
			pr_err("Failed to register driver to crypto.\n");
			goto err_qm_stop;
		}
	} else {
		pci_warn(qm->pdev,
			"Failed to use kernel mode, qp not enough!\n");
905 906
	}

907 908 909 910 911 912 913 914
	if (qm->uacce) {
		ret = uacce_register(qm->uacce);
		if (ret) {
			pci_err(pdev, "failed to register uacce (%d)!\n", ret);
			goto err_alg_unregister;
		}
	}

915 916 917
	if (qm->fun_type == QM_HW_PF && vfs_num) {
		ret = hisi_qm_sriov_enable(pdev, vfs_num);
		if (ret < 0)
918
			goto err_alg_unregister;
919 920
	}

921 922
	return 0;

923 924 925
err_alg_unregister:
	hisi_qm_alg_unregister(qm, &sec_devices);
err_qm_stop:
926
	sec_debugfs_exit(qm);
927
	hisi_qm_stop(qm, QM_NORMAL);
928
err_probe_uninit:
929
	sec_probe_uninit(qm);
930 931 932 933 934 935 936
err_qm_uninit:
	sec_qm_uninit(qm);
	return ret;
}

static void sec_remove(struct pci_dev *pdev)
{
937
	struct hisi_qm *qm = pci_get_drvdata(pdev);
938

939
	hisi_qm_wait_task_finish(qm, &sec_devices);
940 941 942
	if (qm->qp_num >= ctx_q_num)
		hisi_qm_alg_unregister(qm, &sec_devices);

943
	if (qm->fun_type == QM_HW_PF && qm->vfs_num)
944
		hisi_qm_sriov_disable(pdev, true);
945

946
	sec_debugfs_exit(qm);
947

948
	(void)hisi_qm_stop(qm, QM_NORMAL);
949

950 951 952
	if (qm->fun_type == QM_HW_PF)
		sec_debug_regs_clear(qm);

953
	sec_probe_uninit(qm);
954 955 956 957 958

	sec_qm_uninit(qm);
}

static const struct pci_error_handlers sec_err_handler = {
959
	.error_detected = hisi_qm_dev_err_detected,
960 961 962
	.slot_reset	= hisi_qm_dev_slot_reset,
	.reset_prepare	= hisi_qm_reset_prepare,
	.reset_done	= hisi_qm_reset_done,
963 964 965 966 967 968 969 970
};

static struct pci_driver sec_pci_driver = {
	.name = "hisi_sec2",
	.id_table = sec_dev_ids,
	.probe = sec_probe,
	.remove = sec_remove,
	.err_handler = &sec_err_handler,
971
	.sriov_configure = hisi_qm_sriov_configure,
972
	.shutdown = hisi_qm_dev_shutdown,
973 974
};

975 976 977 978 979 980 981 982 983 984 985 986 987
static void sec_register_debugfs(void)
{
	if (!debugfs_initialized())
		return;

	sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
}

static void sec_unregister_debugfs(void)
{
	debugfs_remove_recursive(sec_debugfs_root);
}

988 989 990 991
static int __init sec_init(void)
{
	int ret;

992
	hisi_qm_init_list(&sec_devices);
993 994
	sec_register_debugfs();

995 996
	ret = pci_register_driver(&sec_pci_driver);
	if (ret < 0) {
997
		sec_unregister_debugfs();
998 999 1000 1001 1002 1003 1004 1005 1006 1007
		pr_err("Failed to register pci driver.\n");
		return ret;
	}

	return 0;
}

static void __exit sec_exit(void)
{
	pci_unregister_driver(&sec_pci_driver);
1008
	sec_unregister_debugfs();
1009 1010 1011 1012 1013 1014 1015 1016
}

module_init(sec_init);
module_exit(sec_exit);

MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
1017
MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
1018 1019
MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");