safexcel.c 47.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright (C) 2017 Marvell
 *
 * Antoine Tenart <antoine.tenart@free-electrons.com>
 */

#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
17
#include <linux/pci.h>
18 19 20
#include <linux/platform_device.h>
#include <linux/workqueue.h>

21
#include <crypto/internal/aead.h>
22 23 24 25 26 27 28 29 30 31 32 33
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>

#include "safexcel.h"

static u32 max_rings = EIP197_MAX_RINGS;
module_param(max_rings, uint, 0644);
MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");

static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
	u32 val, htable_offset;
34 35
	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;

36
	if (priv->version == EIP197D_MRVL) {
37 38 39 40
		cs_rc_max = EIP197D_CS_RC_MAX;
		cs_ht_wc = EIP197D_CS_HT_WC;
		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
41 42 43 44 45 46
	} else {
		/* Default to minimum "safe" settings */
		cs_rc_max = EIP197B_CS_RC_MAX;
		cs_ht_wc = EIP197B_CS_HT_WC;
		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
47
	}
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

	/* Enable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	val |= EIP197_TRC_ENABLE_0;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Clear all ECC errors */
	writel(0, priv->base + EIP197_TRC_ECCCTRL);

	/*
	 * Make sure the cache memory is accessible by taking record cache into
	 * reset.
	 */
	val = readl(priv->base + EIP197_TRC_PARAMS);
	val |= EIP197_TRC_PARAMS_SW_RESET;
	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
	writel(val, priv->base + EIP197_TRC_PARAMS);

	/* Clear all records */
68
	for (i = 0; i < cs_rc_max; i++) {
69 70 71 72 73 74 75 76 77
		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;

		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
		       priv->base + offset);

		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
		if (i == 0)
			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
78
		else if (i == cs_rc_max - 1)
79 80 81 82 83
			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
		writel(val, priv->base + offset + sizeof(u32));
	}

	/* Clear the hash table entries */
84 85
	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
	for (i = 0; i < cs_ht_wc; i++)
86 87 88 89 90 91 92 93 94 95
		writel(GENMASK(29, 0),
		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));

	/* Disable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Write head and tail pointers of the record free chain */
	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
96
	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
97 98 99
	writel(val, priv->base + EIP197_TRC_FREECHAIN);

	/* Configure the record cache #1 */
100 101
	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
102 103 104
	writel(val, priv->base + EIP197_TRC_PARAMS2);

	/* Configure the record cache #2 */
105
	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
106 107 108 109 110
	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
	writel(val, priv->base + EIP197_TRC_PARAMS);
}

111
static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
112
{
113
	int pe, i;
114 115
	u32 val;

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Configure the token FIFO's */
		writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));

		/* Clear the ICE scratchpad memory */
		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));

		/* clear the scratchpad RAM using 32 bit writes only */
		for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
			writel(0, EIP197_PE(priv) +
				  EIP197_PE_ICE_SCRATCH_RAM(pe) + (i<<2));

		/* Reset the IFPP engine to make its program mem accessible */
		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
		       EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));

		/* Reset the IPUE engine to make its program mem accessible */
		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
		       EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));

		/* Enable access to all IFPP program memories */
		writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
	}

}
152

153 154 155 156 157
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
				  const struct firmware *fw)
{
	const u32 *data = (const u32 *)fw->data;
	int i;
158 159 160 161 162 163

	/* Write the firmware */
	for (i = 0; i < fw->size / sizeof(u32); i++)
		writel(be32_to_cpu(data[i]),
		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));

164 165 166
	/* Exclude final 2 NOPs from size */
	return i - EIP197_FW_TERMINAL_NOPS;
}
167

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
/*
 * If FW is actual production firmware, then poll for its initialization
 * to complete and check if it is good for the HW, otherwise just return OK.
 */
static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
{
	int pe, pollcnt;
	u32 base, pollofs;

	if (fpp)
		pollofs  = EIP197_FW_FPP_READY;
	else
		pollofs  = EIP197_FW_PUE_READY;

	for (pe = 0; pe < priv->config.pes; pe++) {
		base = EIP197_PE_ICE_SCRATCH_RAM(pe);
		pollcnt = EIP197_FW_START_POLLCNT;
		while (pollcnt &&
		       (readl_relaxed(EIP197_PE(priv) + base +
			      pollofs) != 1)) {
			pollcnt--;
		}
		if (!pollcnt) {
			dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
				fpp, pe);
			return false;
		}
	}
	return true;
}

static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
				  int ipuesz, int ifppsz, int minifw)
{
	int pe;
	u32 val;

	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Disable access to all program memory */
		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));

		/* Start IFPP microengines */
		if (minifw)
			val = 0;
		else
			val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
				EIP197_PE_ICE_UENG_DEBUG_RESET;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));

		/* Start IPUE microengines */
		if (minifw)
			val = 0;
		else
			val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
				EIP197_PE_ICE_UENG_DEBUG_RESET;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
	}

	/* For miniFW startup, there is no initialization, so always succeed */
	if (minifw)
		return true;

	/* Wait until all the firmwares have properly started up */
	if (!poll_fw_ready(priv, 1))
		return false;
	if (!poll_fw_ready(priv, 0))
		return false;

	return true;
239 240 241 242 243 244
}

static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
	const struct firmware *fw[FW_NB];
245
	char fw_path[37], *dir = NULL;
246
	int i, j, ret = 0, pe;
247
	int ipuesz, ifppsz, minifw = 0;
248

249
	if (priv->version == EIP197D_MRVL)
250
		dir = "eip197d";
251 252 253 254 255
	else if (priv->version == EIP197B_MRVL ||
		 priv->version == EIP197_DEVBRD)
		dir = "eip197b";
	else
		return -ENODEV;
256

257
retry_fw:
258
	for (i = 0; i < FW_NB; i++) {
259 260
		snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
		ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
261
		if (ret) {
262
			if (minifw || priv->version != EIP197B_MRVL)
263 264 265 266 267
				goto release_fw;

			/* Fallback to the old firmware location for the
			 * EIP197b.
			 */
268 269 270
			ret = firmware_request_nowarn(&fw[i], fw_name[i],
						      priv->dev);
			if (ret)
271
				goto release_fw;
272
		}
273
	}
274

275 276 277
	eip197_init_firmware(priv);

	ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
278

279 280 281 282
	/* Enable access to IPUE program memories */
	for (pe = 0; pe < priv->config.pes; pe++)
		writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
283

284
	ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
285

286
	if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
287
		dev_dbg(priv->dev, "Firmware loaded successfully\n");
288
		return 0;
289
	}
290

291 292
	ret = -ENODEV;

293 294 295 296
release_fw:
	for (j = 0; j < i; j++)
		release_firmware(fw[j]);

297 298 299 300 301 302 303 304 305 306
	if (!minifw) {
		/* Retry with minifw path */
		dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
		dir = "eip197_minifw";
		minifw = 1;
		goto retry_fw;
	}

	dev_dbg(priv->dev, "Firmware load failed.\n");

307 308 309 310 311 312
	return ret;
}

static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, cd_size_rnd, val;
313
	int i, cd_fetch_cnt;
314

315 316 317 318 319 320 321 322 323 324 325 326 327 328
	cd_size_rnd  = (priv->config.cd_size +
			(BIT(priv->hwconfig.hwdataw) - 1)) >>
		       priv->hwconfig.hwdataw;
	/* determine number of CD's we can fetch into the CD FIFO as 1 block */
	if (priv->flags & SAFEXCEL_HW_EIP197) {
		/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
		cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
		cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
				     (priv->config.pes * EIP197_FETCH_DEPTH));
	} else {
		/* for the EIP97, just fetch all that fits minus 1 */
		cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
				cd_size_rnd) - 1;
	}
329 330 331 332

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
333
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
334
		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
335
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
336 337 338

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
		       priv->config.cd_size,
339
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
340 341
		writel(((cd_fetch_cnt * (cd_size_rnd << hdw)) << 16) |
		       (cd_fetch_cnt * priv->config.cd_offset),
342
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
343 344 345 346

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
347
		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
348 349 350

		/* clear any pending interrupt */
		writel(GENMASK(5, 0),
351
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
352 353 354 355 356 357 358 359
	}

	return 0;
}

static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, rd_size_rnd, val;
360
	int i, rd_fetch_cnt;
361

362 363 364 365 366 367 368 369 370 371 372 373 374 375
	/* determine number of RD's we can fetch into the FIFO as one block */
	rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
		      BIT(priv->hwconfig.hwdataw) - 1) >>
		      priv->hwconfig.hwdataw;
	if (priv->flags & SAFEXCEL_HW_EIP197) {
		/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
		rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
		rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
				     (priv->config.pes * EIP197_FETCH_DEPTH));
	} else {
		/* for the EIP97, just fetch all that fits minus 1 */
		rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
			       rd_size_rnd) - 1;
	}
376 377 378 379

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
380
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
381
		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
382
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
383 384 385

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
		       priv->config.rd_size,
386
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
387

388 389
		writel(((rd_fetch_cnt * (rd_size_rnd << hdw)) << 16) |
		       (rd_fetch_cnt * priv->config.rd_offset),
390
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
391 392 393 394

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
395
		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
396
		writel(val,
397
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
398 399 400

		/* clear any pending interrupt */
		writel(GENMASK(7, 0),
401
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
402 403

		/* enable ring interrupt */
404
		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
405
		val |= EIP197_RDR_IRQ(i);
406
		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
407 408 409 410 411 412 413
	}

	return 0;
}

static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
414
	u32 val;
415
	int i, ret, pe;
416

417 418 419 420 421 422 423
	dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
		priv->config.pes, priv->config.rings);

	/*
	 * For EIP197's only set maximum number of TX commands to 2^5 = 32
	 * Skip for the EIP97 as it does not have this field.
	 */
424 425
	if (priv->flags & SAFEXCEL_HW_EIP197) {
		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
426
		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
427 428
		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
	}
429 430 431 432

	/* Configure wr/rd cache values */
	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
433
	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
434 435 436 437

	/* Interrupts reset */

	/* Disable all global interrupts */
438
	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
439 440

	/* Clear any pending interrupt */
441
	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
442

443 444 445
	/* Processing Engine configuration */
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Data Fetch Engine configuration */
446

447 448 449
		/* Reset all DFE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
450

451
		if (priv->flags & SAFEXCEL_HW_EIP197)
452
			/* Reset HIA input interface arbiter (EIP197 only) */
453 454
			writel(EIP197_HIA_RA_PE_CTRL_RESET,
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
455

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
		/* DMA transfer size to use */
		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));

		/* Leave the DFE threads reset state */
		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));

		/* Configure the processing engine thresholds */
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(9),
		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(7),
		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));

477
		if (priv->flags & SAFEXCEL_HW_EIP197)
478 479 480 481
			/* enable HIA input interface arbiter and rings */
			writel(EIP197_HIA_RA_PE_CTRL_EN |
			       GENMASK(priv->config.rings - 1, 0),
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
482

483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
		/* Data Store Engine configuration */

		/* Reset all DSE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Wait for all DSE threads to complete */
		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
			GENMASK(15, 12)) != GENMASK(15, 12))
			;

		/* DMA transfer size to use */
		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
500 501
		/* FIXME: instability issues can occur for EIP97 but disabling
		 * it impacts performance.
502
		 */
503
		if (priv->flags & SAFEXCEL_HW_EIP197)
504 505 506 507 508 509 510 511 512 513 514 515 516
			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));

		/* Leave the DSE threads reset state */
		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Configure the procesing engine thresholds */
		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));

		/* Processing Engine configuration */

517 518
		/* Token & context configuration */
		val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
519 520
		      EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
		      EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
521 522
		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));

523 524 525
		/* H/W capabilities selection: just enable everything */
		writel(EIP197_FUNCTION_ALL,
		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
526 527
		writel(EIP197_FUNCTION_ALL,
		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
528
	}
529 530 531 532 533

	/* Command Descriptor Rings prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Clear interrupts for this ring */
		writel(GENMASK(31, 0),
534
		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
535 536

		/* Disable external triggering */
537
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
538 539 540

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
541
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
542 543 544

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
545
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
546 547

		writel(0,
548
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
549
		writel(0,
550
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
551 552

		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
553
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
554 555 556 557 558
	}

	/* Result Descriptor Ring prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Disable external triggering*/
559
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
560 561 562

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
563
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
564 565 566

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
567
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
568 569

		writel(0,
570
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
571
		writel(0,
572
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
573 574 575

		/* Ring size */
		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
576
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
577 578
	}

579 580 581 582
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Enable command descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
583

584 585 586 587
		/* Enable result descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
	}
588 589

	/* Clear any HIA interrupt */
590
	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
591

592
	if (priv->flags & SAFEXCEL_HW_EIP197) {
593
		eip197_trc_cache_init(priv);
594
		priv->flags |= EIP197_TRC_CACHE;
595

596 597 598 599
		ret = eip197_load_firmwares(priv);
		if (ret)
			return ret;
	}
600 601 602 603 604 605 606

	safexcel_hw_setup_cdesc_rings(priv);
	safexcel_hw_setup_rdesc_rings(priv);

	return 0;
}

607
/* Called with ring's lock taken */
608 609
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
				       int ring)
610
{
611
	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
612 613

	if (!coal)
614
		return;
615 616 617 618

	/* Configure when we want an interrupt */
	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
619
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
620 621
}

622
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
623 624 625
{
	struct crypto_async_request *req, *backlog;
	struct safexcel_context *ctx;
626
	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
627

628 629 630 631 632 633 634 635
	/* If a request wasn't properly dequeued because of a lack of resources,
	 * proceeded it first,
	 */
	req = priv->ring[ring].req;
	backlog = priv->ring[ring].backlog;
	if (req)
		goto handle_req;

636
	while (true) {
637 638
		spin_lock_bh(&priv->ring[ring].queue_lock);
		backlog = crypto_get_backlog(&priv->ring[ring].queue);
639
		req = crypto_dequeue_request(&priv->ring[ring].queue);
640
		spin_unlock_bh(&priv->ring[ring].queue_lock);
641

642 643 644
		if (!req) {
			priv->ring[ring].req = NULL;
			priv->ring[ring].backlog = NULL;
645
			goto finalize;
646
		}
647

648
handle_req:
649
		ctx = crypto_tfm_ctx(req->tfm);
650 651
		ret = ctx->send(req, ring, &commands, &results);
		if (ret)
652
			goto request_failed;
653 654 655 656

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

657 658 659 660
		/* In case the send() helper did not issue any command to push
		 * to the engine because the input data was cached, continue to
		 * dequeue other requests as this is valid and not an error.
		 */
661
		if (!commands && !results)
662
			continue;
663

664 665
		cdesc += commands;
		rdesc += results;
666 667
		nreq++;
	}
668

669 670 671 672 673 674 675
request_failed:
	/* Not enough resources to handle all the requests. Bail out and save
	 * the request and the backlog for the next dequeue call (per-ring).
	 */
	priv->ring[ring].req = req;
	priv->ring[ring].backlog = backlog;

676
finalize:
677
	if (!nreq)
678 679
		return;

680
	spin_lock_bh(&priv->ring[ring].lock);
681

682 683
	priv->ring[ring].requests += nreq;

684
	if (!priv->ring[ring].busy) {
685
		safexcel_try_push_requests(priv, ring);
686
		priv->ring[ring].busy = true;
687 688
	}

689
	spin_unlock_bh(&priv->ring[ring].lock);
690

691 692
	/* let the RDR know we have pending descriptors */
	writel((rdesc * priv->config.rd_offset) << 2,
693
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
694

695 696
	/* let the CDR know we have pending descriptors */
	writel((cdesc * priv->config.cd_offset) << 2,
697
	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
698 699
}

700 701 702
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
				       struct safexcel_result_desc *rdesc)
{
703 704 705
	if (likely((!rdesc->descriptor_overflow) &&
		   (!rdesc->buffer_overflow) &&
		   (!rdesc->result_data.error_code)))
706 707
		return 0;

708 709 710 711 712 713
	if (rdesc->descriptor_overflow)
		dev_err(priv->dev, "Descriptor overflow detected");

	if (rdesc->buffer_overflow)
		dev_err(priv->dev, "Buffer overflow detected");

714 715
	if (rdesc->result_data.error_code & 0x4066) {
		/* Fatal error (bits 1,2,5,6 & 14) */
716
		dev_err(priv->dev,
717
			"result descriptor error (%x)",
718
			rdesc->result_data.error_code);
719 720
		return -EIO;
	} else if (rdesc->result_data.error_code &
721
		   (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
722 723
		/*
		 * Give priority over authentication fails:
724 725
		 * Blocksize, length & overflow errors,
		 * something wrong with the input!
726
		 */
727
		return -EINVAL;
728
	} else if (rdesc->result_data.error_code & BIT(9)) {
729 730
		/* Authentication failed */
		return -EBADMSG;
731
	}
732 733 734 735 736

	/* All other non-fatal errors */
	return -EINVAL;
}

737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
				 int ring,
				 struct safexcel_result_desc *rdesc,
				 struct crypto_async_request *req)
{
	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);

	priv->ring[ring].rdr_req[i] = req;
}

inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
{
	int i = safexcel_ring_first_rdr_index(priv, ring);

	return priv->ring[ring].rdr_req[i];
}

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
	struct safexcel_command_desc *cdesc;

	/* Acknowledge the command descriptors */
	do {
		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
		if (IS_ERR(cdesc)) {
			dev_err(priv->dev,
				"Could not retrieve the command descriptor\n");
			return;
		}
	} while (!cdesc->last_seg);
}

void safexcel_inv_complete(struct crypto_async_request *req, int error)
{
	struct safexcel_inv_result *result = req->data;

	if (error == -EINPROGRESS)
		return;

	result->error = error;
	complete(&result->completion);
}

int safexcel_invalidate_cache(struct crypto_async_request *async,
			      struct safexcel_crypto_priv *priv,
783
			      dma_addr_t ctxr_dma, int ring)
784 785 786 787 788 789 790
{
	struct safexcel_command_desc *cdesc;
	struct safexcel_result_desc *rdesc;
	int ret = 0;

	/* Prepare command descriptor */
	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
791 792
	if (IS_ERR(cdesc))
		return PTR_ERR(cdesc);
793 794 795 796 797 798 799 800 801 802 803 804 805 806

	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
	cdesc->control_data.options = 0;
	cdesc->control_data.refresh = 0;
	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;

	/* Prepare result descriptor */
	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);

	if (IS_ERR(rdesc)) {
		ret = PTR_ERR(rdesc);
		goto cdesc_rollback;
	}

807 808 809
	safexcel_rdr_req_set(priv, ring, rdesc, async);

	return ret;
810 811 812 813 814 815 816 817 818 819

cdesc_rollback:
	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);

	return ret;
}

static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
						     int ring)
{
820
	struct crypto_async_request *req;
821
	struct safexcel_context *ctx;
822
	int ret, i, nreq, ndesc, tot_descs, handled = 0;
823 824
	bool should_complete;

825 826 827
handle_results:
	tot_descs = 0;

828
	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
829 830
	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
831
	if (!nreq)
832
		goto requests_left;
833 834

	for (i = 0; i < nreq; i++) {
835 836 837 838
		req = safexcel_rdr_req_get(priv, ring);

		ctx = crypto_tfm_ctx(req->tfm);
		ndesc = ctx->handle_result(priv, ring, req,
839 840
					   &should_complete, &ret);
		if (ndesc < 0) {
841 842
			dev_err(priv->dev, "failed to handle result (%d)\n",
				ndesc);
843
			goto acknowledge;
844 845 846 847
		}

		if (should_complete) {
			local_bh_disable();
848
			req->complete(req, ret);
849 850 851
			local_bh_enable();
		}

852
		tot_descs += ndesc;
853
		handled++;
854 855 856
	}

acknowledge:
857
	if (i)
858 859
		writel(EIP197_xDR_PROC_xD_PKT(i) |
		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
860
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
861

862 863 864 865 866 867
	/* If the number of requests overflowed the counter, try to proceed more
	 * requests.
	 */
	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
		goto handle_results;

868
requests_left:
869
	spin_lock_bh(&priv->ring[ring].lock);
870

871 872
	priv->ring[ring].requests -= handled;
	safexcel_try_push_requests(priv, ring);
873

874
	if (!priv->ring[ring].requests)
875 876
		priv->ring[ring].busy = false;

877
	spin_unlock_bh(&priv->ring[ring].lock);
878 879
}

880 881 882 883
static void safexcel_dequeue_work(struct work_struct *work)
{
	struct safexcel_work_data *data =
			container_of(work, struct safexcel_work_data, work);
884

885
	safexcel_dequeue(data->priv, data->ring);
886 887 888 889 890 891 892 893 894 895 896
}

struct safexcel_ring_irq_data {
	struct safexcel_crypto_priv *priv;
	int ring;
};

static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
897
	int ring = irq_data->ring, rc = IRQ_NONE;
898 899
	u32 status, stat;

900
	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
901
	if (!status)
902
		return rc;
903 904 905

	/* RDR interrupts */
	if (status & EIP197_RDR_IRQ(ring)) {
906
		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
907 908 909 910 911 912 913

		if (unlikely(stat & EIP197_xDR_ERR)) {
			/*
			 * Fatal error, the RDR is unusable and must be
			 * reinitialized. This should not happen under
			 * normal circumstances.
			 */
914
			dev_err(priv->dev, "RDR: fatal error.\n");
915
		} else if (likely(stat & EIP197_xDR_THRESH)) {
916
			rc = IRQ_WAKE_THREAD;
917 918 919 920
		}

		/* ACK the interrupts */
		writel(stat & 0xff,
921
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
922 923 924
	}

	/* ACK the interrupts */
925
	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
926

927 928 929 930 931 932 933 934 935 936 937 938 939 940
	return rc;
}

static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
	int ring = irq_data->ring;

	safexcel_handle_result_descriptor(priv, ring);

	queue_work(priv->ring[ring].workqueue,
		   &priv->ring[ring].work_data.work);

941 942 943
	return IRQ_HANDLED;
}

944 945
static int safexcel_request_ring_irq(void *pdev, int irqid,
				     int is_pci_dev,
946
				     irq_handler_t handler,
947
				     irq_handler_t threaded_handler,
948 949
				     struct safexcel_ring_irq_data *ring_irq_priv)
{
950 951 952 953 954
	int ret, irq;
	struct device *dev;

	if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
		struct pci_dev *pci_pdev = pdev;
955

956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
		dev = &pci_pdev->dev;
		irq = pci_irq_vector(pci_pdev, irqid);
		if (irq < 0) {
			dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
				irqid, irq);
			return irq;
		}
	} else if (IS_ENABLED(CONFIG_OF)) {
		struct platform_device *plf_pdev = pdev;
		char irq_name[6] = {0}; /* "ringX\0" */

		snprintf(irq_name, 6, "ring%d", irqid);
		dev = &plf_pdev->dev;
		irq = platform_get_irq_byname(plf_pdev, irq_name);

		if (irq < 0) {
			dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
				irq_name, irq);
			return irq;
		}
976 977
	}

978
	ret = devm_request_threaded_irq(dev, irq, handler,
979
					threaded_handler, IRQF_ONESHOT,
980
					dev_name(dev), ring_irq_priv);
981
	if (ret) {
982
		dev_err(dev, "unable to request IRQ %d\n", irq);
983 984 985 986 987 988 989
		return ret;
	}

	return irq;
}

static struct safexcel_alg_template *safexcel_algs[] = {
990 991
	&safexcel_alg_ecb_des,
	&safexcel_alg_cbc_des,
992 993
	&safexcel_alg_ecb_des3_ede,
	&safexcel_alg_cbc_des3_ede,
994 995
	&safexcel_alg_ecb_aes,
	&safexcel_alg_cbc_aes,
996
	&safexcel_alg_cfb_aes,
997
	&safexcel_alg_ofb_aes,
998
	&safexcel_alg_ctr_aes,
999
	&safexcel_alg_md5,
1000 1001 1002
	&safexcel_alg_sha1,
	&safexcel_alg_sha224,
	&safexcel_alg_sha256,
1003
	&safexcel_alg_sha384,
1004
	&safexcel_alg_sha512,
1005
	&safexcel_alg_hmac_md5,
1006
	&safexcel_alg_hmac_sha1,
1007
	&safexcel_alg_hmac_sha224,
1008
	&safexcel_alg_hmac_sha256,
1009
	&safexcel_alg_hmac_sha384,
1010
	&safexcel_alg_hmac_sha512,
1011
	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
1012
	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
1013
	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
1014
	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
1015
	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
1016
	&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1017 1018 1019 1020 1021
	&safexcel_alg_authenc_hmac_sha1_ctr_aes,
	&safexcel_alg_authenc_hmac_sha224_ctr_aes,
	&safexcel_alg_authenc_hmac_sha256_ctr_aes,
	&safexcel_alg_authenc_hmac_sha384_ctr_aes,
	&safexcel_alg_authenc_hmac_sha512_ctr_aes,
1022
	&safexcel_alg_xts_aes,
1023
	&safexcel_alg_gcm,
1024
	&safexcel_alg_ccm,
1025 1026 1027 1028 1029 1030 1031 1032 1033
};

static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
{
	int i, j, ret = 0;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
		safexcel_algs[i]->priv = priv;

1034 1035 1036 1037 1038 1039
		/* Do we have all required base algorithms available? */
		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
		    safexcel_algs[i]->algo_mask)
			/* No, so don't register this ciphersuite */
			continue;

1040 1041
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1042 1043
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
		else
			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);

		if (ret)
			goto fail;
	}

	return 0;

fail:
	for (j = 0; j < i; j++) {
1055 1056 1057 1058 1059 1060
		/* Do we have all required base algorithms available? */
		if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
		    safexcel_algs[j]->algo_mask)
			/* No, so don't unregister this ciphersuite */
			continue;

1061 1062
		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1063 1064
		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
		else
			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
	}

	return ret;
}

static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1077 1078 1079 1080 1081 1082
		/* Do we have all required base algorithms available? */
		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
		    safexcel_algs[i]->algo_mask)
			/* No, so don't unregister this ciphersuite */
			continue;

1083 1084
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1085 1086
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1087 1088 1089 1090 1091 1092 1093
		else
			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
	}
}

static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
1094
	u32 val, mask = 0;
1095

1096
	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
1097 1098

	/* Read number of PEs from the engine */
1099
	if (priv->flags & SAFEXCEL_HW_EIP197)
1100 1101
		/* Wider field width for all EIP197 type engines */
		mask = EIP197_N_PES_MASK;
1102 1103 1104
	else
		/* Narrow field width for EIP97 type engine */
		mask = EIP97_N_PES_MASK;
1105

1106 1107
	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;

1108 1109
	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
	val = (val & GENMASK(27, 25)) >> 25;
	mask = BIT(val) - 1;

	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;

	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}

1120 1121 1122 1123
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
	struct safexcel_register_offsets *offsets = &priv->offsets;

1124
	if (priv->flags & SAFEXCEL_HW_EIP197) {
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP197_PE_BASE;
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		offsets->global		= EIP197_GLOBAL_BASE;
	} else {
		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP97_PE_BASE;
		offsets->global		= EIP97_GLOBAL_BASE;
1148 1149 1150
	}
}

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
/*
 * Generic part of probe routine, shared by platform and PCI driver
 *
 * Assumes IO resources have been mapped, private data mem has been allocated,
 * clocks have been enabled, device pointer has been assigned etc.
 *
 */
static int safexcel_probe_generic(void *pdev,
				  struct safexcel_crypto_priv *priv,
				  int is_pci_dev)
1161
{
1162
	struct device *dev = priv->dev;
1163
	u32 peid, version, mask, val, hiaopt;
1164
	int i, ret, hwctg;
1165

1166 1167 1168 1169
	priv->context_pool = dmam_pool_create("safexcel-context", dev,
					      sizeof(struct safexcel_context_record),
					      1, 0);
	if (!priv->context_pool)
1170 1171
		return -ENOMEM;

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
	/*
	 * First try the EIP97 HIA version regs
	 * For the EIP197, this is guaranteed to NOT return any of the test
	 * values
	 */
	version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);

	mask = 0;  /* do not swap */
	if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
		priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
	} else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
		/* read back byte-swapped, so complement byte swap bits */
		mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
		priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
	} else {
		/* So it wasn't an EIP97 ... maybe it's an EIP197? */
		version = readl(priv->base + EIP197_HIA_AIC_BASE +
				EIP197_HIA_VERSION);
		if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
			priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
			priv->flags |= SAFEXCEL_HW_EIP197;
		} else if (EIP197_REG_HI16(version) ==
			   EIP197_HIA_VERSION_BE) {
			/* read back byte-swapped, so complement swap bits */
			mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
			priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
			priv->flags |= SAFEXCEL_HW_EIP197;
		} else {
			return -ENODEV;
		}
	}

	/* Now initialize the reg offsets based on the probing info so far */
1205
	safexcel_init_register_offsets(priv);
1206

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
	/*
	 * If the version was read byte-swapped, we need to flip the device
	 * swapping Keep in mind here, though, that what we write will also be
	 * byte-swapped ...
	 */
	if (mask) {
		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
		val = val ^ (mask >> 24); /* toggle byte swap bits */
		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
	}

	/*
	 * We're not done probing yet! We may fall through to here if no HIA
	 * was found at all. So, with the endianness presumably correct now and
	 * the offsets setup, *really* probe for the EIP97/EIP197.
	 */
	version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
	if (((priv->flags & SAFEXCEL_HW_EIP197) &&
	     (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
	    ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
	     (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
		/*
		 * We did not find the device that matched our initial probing
		 * (or our initial probing failed) Report appropriate error.
		 */
		return -ENODEV;
	}

	priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
	hwctg = version >> 28;
	peid = version & 255;

	/* Detect EIP96 packet engine and version */
	version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
	if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
		dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
		return -ENODEV;
	}
	priv->hwconfig.pever = EIP197_VERSION_MASK(version);

1247 1248 1249 1250 1251 1252 1253 1254 1255
	hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);

	if (priv->flags & SAFEXCEL_HW_EIP197) {
		/* EIP197 */
		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
					  EIP197_HWDATAW_MASK;
		priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
					   EIP197_CFSIZE_MASK) +
					  EIP197_CFSIZE_ADJUST;
1256 1257 1258
		priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
					   EIP197_RFSIZE_MASK) +
					  EIP197_RFSIZE_ADJUST;
1259 1260 1261 1262 1263 1264
	} else {
		/* EIP97 */
		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
					  EIP97_HWDATAW_MASK;
		priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
					  EIP97_CFSIZE_MASK;
1265 1266
		priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
					  EIP97_RFSIZE_MASK;
1267 1268
	}

1269 1270 1271 1272
	/* Get supported algorithms from EIP96 transform engine */
	priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
				    EIP197_PE_EIP96_OPTIONS(0));

1273
	/* Print single info line describing what we just detected */
1274 1275
	dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
		 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
1276
		 priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
1277 1278
		 priv->hwconfig.hwrfsize, priv->hwconfig.pever,
		 priv->hwconfig.algo_flags);
1279

1280
	safexcel_configure(priv);
1281

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
	if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
		/*
		 * Request MSI vectors for global + 1 per ring -
		 * or just 1 for older dev images
		 */
		struct pci_dev *pci_pdev = pdev;

		ret = pci_alloc_irq_vectors(pci_pdev,
					    priv->config.rings + 1,
					    priv->config.rings + 1,
					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
		if (ret < 0) {
			dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1295 1296 1297 1298
			return ret;
		}
	}

1299
	/* Register the ring IRQ handlers and configure the rings */
1300 1301
	priv->ring = devm_kcalloc(dev, priv->config.rings,
				  sizeof(*priv->ring),
1302
				  GFP_KERNEL);
1303 1304
	if (!priv->ring)
		return -ENOMEM;
1305

1306
	for (i = 0; i < priv->config.rings; i++) {
1307
		char wq_name[9] = {0};
1308 1309 1310 1311 1312 1313
		int irq;
		struct safexcel_ring_irq_data *ring_irq;

		ret = safexcel_init_ring_descriptors(priv,
						     &priv->ring[i].cdr,
						     &priv->ring[i].rdr);
1314 1315 1316 1317
		if (ret) {
			dev_err(dev, "Failed to initialize rings\n");
			return ret;
		}
1318

1319 1320 1321
		priv->ring[i].rdr_req = devm_kcalloc(dev,
			EIP197_DEFAULT_RING_SIZE,
			sizeof(priv->ring[i].rdr_req),
1322
			GFP_KERNEL);
1323 1324
		if (!priv->ring[i].rdr_req)
			return -ENOMEM;
1325

1326
		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1327 1328
		if (!ring_irq)
			return -ENOMEM;
1329 1330 1331 1332

		ring_irq->priv = priv;
		ring_irq->ring = i;

1333 1334 1335 1336
		irq = safexcel_request_ring_irq(pdev,
						EIP197_IRQ_NUMBER(i, is_pci_dev),
						is_pci_dev,
						safexcel_irq_ring,
1337
						safexcel_irq_ring_thread,
1338
						ring_irq);
1339
		if (irq < 0) {
1340 1341
			dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
			return irq;
1342
		}
1343 1344 1345

		priv->ring[i].work_data.priv = priv;
		priv->ring[i].work_data.ring = i;
1346 1347
		INIT_WORK(&priv->ring[i].work_data.work,
			  safexcel_dequeue_work);
1348 1349

		snprintf(wq_name, 9, "wq_ring%d", i);
1350 1351 1352 1353
		priv->ring[i].workqueue =
			create_singlethread_workqueue(wq_name);
		if (!priv->ring[i].workqueue)
			return -ENOMEM;
1354

1355
		priv->ring[i].requests = 0;
1356 1357
		priv->ring[i].busy = false;

1358 1359 1360
		crypto_init_queue(&priv->ring[i].queue,
				  EIP197_DEFAULT_RING_SIZE);

1361
		spin_lock_init(&priv->ring[i].lock);
1362
		spin_lock_init(&priv->ring[i].queue_lock);
1363 1364 1365 1366 1367 1368
	}

	atomic_set(&priv->ring_used, 0);

	ret = safexcel_hw_init(priv);
	if (ret) {
1369 1370
		dev_err(dev, "HW init failed (%d)\n", ret);
		return ret;
1371 1372 1373 1374 1375
	}

	ret = safexcel_register_algorithms(priv);
	if (ret) {
		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1376
		return ret;
1377 1378 1379 1380 1381
	}

	return 0;
}

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < priv->config.rings; i++) {
		/* clear any pending interrupt */
		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);

		/* Reset the CDR base address */
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);

		/* Reset the RDR base address */
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
	}
}
1400

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
#if IS_ENABLED(CONFIG_OF)
/* for Device Tree platform driver */

static int safexcel_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct safexcel_crypto_priv *priv;
	int ret;

	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->dev = dev;
	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);

	platform_set_drvdata(pdev, priv);

	priv->base = devm_platform_ioremap_resource(pdev, 0);
	if (IS_ERR(priv->base)) {
		dev_err(dev, "failed to get resource\n");
		return PTR_ERR(priv->base);
	}

	priv->clk = devm_clk_get(&pdev->dev, NULL);
	ret = PTR_ERR_OR_ZERO(priv->clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			return ret;

		ret = clk_prepare_enable(priv->clk);
		if (ret) {
			dev_err(dev, "unable to enable clk (%d)\n", ret);
			return ret;
		}
	}

	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			goto err_core_clk;

		ret = clk_prepare_enable(priv->reg_clk);
		if (ret) {
			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
			goto err_core_clk;
		}
	}

	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
	if (ret)
		goto err_reg_clk;

	/* Generic EIP97/EIP197 device probing */
	ret = safexcel_probe_generic(pdev, priv, 0);
	if (ret)
		goto err_reg_clk;

	return 0;

err_reg_clk:
	clk_disable_unprepare(priv->reg_clk);
err_core_clk:
	clk_disable_unprepare(priv->clk);
	return ret;
}

1471 1472 1473 1474 1475 1476
static int safexcel_remove(struct platform_device *pdev)
{
	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
	int i;

	safexcel_unregister_algorithms(priv);
1477 1478
	safexcel_hw_reset_rings(priv);

1479 1480 1481 1482 1483 1484 1485 1486 1487
	clk_disable_unprepare(priv->clk);

	for (i = 0; i < priv->config.rings; i++)
		destroy_workqueue(priv->ring[i].workqueue);

	return 0;
}

static const struct of_device_id safexcel_of_match_table[] = {
1488
	{
1489
		.compatible = "inside-secure,safexcel-eip97ies",
1490
		.data = (void *)EIP97IES_MRVL,
1491 1492 1493
	},
	{
		.compatible = "inside-secure,safexcel-eip197b",
1494
		.data = (void *)EIP197B_MRVL,
1495
	},
1496 1497
	{
		.compatible = "inside-secure,safexcel-eip197d",
1498
		.data = (void *)EIP197D_MRVL,
1499
	},
1500
	/* For backward compatibility and intended for generic use */
1501
	{
1502
		.compatible = "inside-secure,safexcel-eip97",
1503
		.data = (void *)EIP97IES_MRVL,
1504 1505 1506
	},
	{
		.compatible = "inside-secure,safexcel-eip197",
1507
		.data = (void *)EIP197B_MRVL,
1508
	},
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
	{},
};

static struct platform_driver  crypto_safexcel = {
	.probe		= safexcel_probe,
	.remove		= safexcel_remove,
	.driver		= {
		.name	= "crypto-safexcel",
		.of_match_table = safexcel_of_match_table,
	},
};
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
#endif

#if IS_ENABLED(CONFIG_PCI)
/* PCIE devices - i.e. Inside Secure development boards */

static int safexcel_pci_probe(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	struct device *dev = &pdev->dev;
	struct safexcel_crypto_priv *priv;
	void __iomem *pciebase;
	int rc;
	u32 val;

	dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
		ent->vendor, ent->device, ent->subvendor,
		ent->subdevice, ent->driver_data);

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->dev = dev;
	priv->version = (enum safexcel_eip_version)ent->driver_data;

	pci_set_drvdata(pdev, priv);

	/* enable the device */
	rc = pcim_enable_device(pdev);
	if (rc) {
		dev_err(dev, "Failed to enable PCI device\n");
		return rc;
	}

	/* take ownership of PCI BAR0 */
	rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
	if (rc) {
		dev_err(dev, "Failed to map IO region for BAR0\n");
		return rc;
	}
	priv->base = pcim_iomap_table(pdev)[0];

	if (priv->version == EIP197_DEVBRD) {
		dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");

		rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
		if (rc) {
			dev_err(dev, "Failed to map IO region for BAR4\n");
			return rc;
		}

		pciebase = pcim_iomap_table(pdev)[2];
		val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
		if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
			dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
				(val & 0xff));

			/* Setup MSI identity map mapping */
			writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
			writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
			writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
			writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);

			/* Enable all device interrupts */
			writel(GENMASK(31, 0),
			       pciebase + EIP197_XLX_USER_INT_ENB_MSK);
		} else {
			dev_err(dev, "Unrecognised IRQ block identifier %x\n",
				val);
			return -ENODEV;
		}

		/* HW reset FPGA dev board */
		/* assert reset */
		writel(1, priv->base + EIP197_XLX_GPIO_BASE);
		wmb(); /* maintain strict ordering for accesses here */
		/* deassert reset */
		writel(0, priv->base + EIP197_XLX_GPIO_BASE);
		wmb(); /* maintain strict ordering for accesses here */
	}

	/* enable bus mastering */
	pci_set_master(pdev);

	/* Generic EIP97/EIP197 device probing */
	rc = safexcel_probe_generic(pdev, priv, 1);
	return rc;
}

void safexcel_pci_remove(struct pci_dev *pdev)
{
	struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
	int i;

	safexcel_unregister_algorithms(priv);

	for (i = 0; i < priv->config.rings; i++)
		destroy_workqueue(priv->ring[i].workqueue);

	safexcel_hw_reset_rings(priv);
}

static const struct pci_device_id safexcel_pci_ids[] = {
	{
		PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
			       0x16ae, 0xc522),
		.driver_data = EIP197_DEVBRD,
	},
	{},
};

MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);

static struct pci_driver safexcel_pci_driver = {
	.name          = "crypto-safexcel",
	.id_table      = safexcel_pci_ids,
	.probe         = safexcel_pci_probe,
	.remove        = safexcel_pci_remove,
};
#endif

static int __init safexcel_init(void)
{
	int rc;

#if IS_ENABLED(CONFIG_OF)
		/* Register platform driver */
		platform_driver_register(&crypto_safexcel);
#endif

#if IS_ENABLED(CONFIG_PCI)
		/* Register PCI driver */
		rc = pci_register_driver(&safexcel_pci_driver);
#endif

	return 0;
}

static void __exit safexcel_exit(void)
{
#if IS_ENABLED(CONFIG_OF)
		/* Unregister platform driver */
		platform_driver_unregister(&crypto_safexcel);
#endif

#if IS_ENABLED(CONFIG_PCI)
		/* Unregister PCI driver if successfully registered before */
		pci_unregister_driver(&safexcel_pci_driver);
#endif
}

module_init(safexcel_init);
module_exit(safexcel_exit);
1677 1678 1679 1680

MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1681
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1682
MODULE_LICENSE("GPL v2");