safexcel.c 46.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright (C) 2017 Marvell
 *
 * Antoine Tenart <antoine.tenart@free-electrons.com>
 */

#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
17
#include <linux/pci.h>
18 19 20
#include <linux/platform_device.h>
#include <linux/workqueue.h>

21
#include <crypto/internal/aead.h>
22 23 24 25 26 27 28 29 30 31 32 33
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>

#include "safexcel.h"

static u32 max_rings = EIP197_MAX_RINGS;
module_param(max_rings, uint, 0644);
MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");

static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
	u32 val, htable_offset;
34 35
	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;

36
	if (priv->version == EIP197D_MRVL) {
37 38 39 40
		cs_rc_max = EIP197D_CS_RC_MAX;
		cs_ht_wc = EIP197D_CS_HT_WC;
		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
41 42 43 44 45 46
	} else {
		/* Default to minimum "safe" settings */
		cs_rc_max = EIP197B_CS_RC_MAX;
		cs_ht_wc = EIP197B_CS_HT_WC;
		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
47
	}
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

	/* Enable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	val |= EIP197_TRC_ENABLE_0;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Clear all ECC errors */
	writel(0, priv->base + EIP197_TRC_ECCCTRL);

	/*
	 * Make sure the cache memory is accessible by taking record cache into
	 * reset.
	 */
	val = readl(priv->base + EIP197_TRC_PARAMS);
	val |= EIP197_TRC_PARAMS_SW_RESET;
	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
	writel(val, priv->base + EIP197_TRC_PARAMS);

	/* Clear all records */
68
	for (i = 0; i < cs_rc_max; i++) {
69 70 71 72 73 74 75 76 77
		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;

		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
		       priv->base + offset);

		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
		if (i == 0)
			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
78
		else if (i == cs_rc_max - 1)
79 80 81 82 83
			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
		writel(val, priv->base + offset + sizeof(u32));
	}

	/* Clear the hash table entries */
84 85
	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
	for (i = 0; i < cs_ht_wc; i++)
86 87 88 89 90 91 92 93 94 95
		writel(GENMASK(29, 0),
		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));

	/* Disable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Write head and tail pointers of the record free chain */
	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
96
	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
97 98 99
	writel(val, priv->base + EIP197_TRC_FREECHAIN);

	/* Configure the record cache #1 */
100 101
	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
102 103 104
	writel(val, priv->base + EIP197_TRC_PARAMS2);

	/* Configure the record cache #2 */
105
	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
106 107 108 109 110
	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
	writel(val, priv->base + EIP197_TRC_PARAMS);
}

111
static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
112
{
113
	int pe, i;
114 115
	u32 val;

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Configure the token FIFO's */
		writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));

		/* Clear the ICE scratchpad memory */
		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));

		/* clear the scratchpad RAM using 32 bit writes only */
		for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
			writel(0, EIP197_PE(priv) +
				  EIP197_PE_ICE_SCRATCH_RAM(pe) + (i<<2));

		/* Reset the IFPP engine to make its program mem accessible */
		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
		       EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));

		/* Reset the IPUE engine to make its program mem accessible */
		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
		       EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));

		/* Enable access to all IFPP program memories */
		writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
	}

}
152

153 154 155 156 157
static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
				  const struct firmware *fw)
{
	const u32 *data = (const u32 *)fw->data;
	int i;
158 159 160 161 162 163

	/* Write the firmware */
	for (i = 0; i < fw->size / sizeof(u32); i++)
		writel(be32_to_cpu(data[i]),
		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));

164 165 166
	/* Exclude final 2 NOPs from size */
	return i - EIP197_FW_TERMINAL_NOPS;
}
167

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
/*
 * If FW is actual production firmware, then poll for its initialization
 * to complete and check if it is good for the HW, otherwise just return OK.
 */
static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
{
	int pe, pollcnt;
	u32 base, pollofs;

	if (fpp)
		pollofs  = EIP197_FW_FPP_READY;
	else
		pollofs  = EIP197_FW_PUE_READY;

	for (pe = 0; pe < priv->config.pes; pe++) {
		base = EIP197_PE_ICE_SCRATCH_RAM(pe);
		pollcnt = EIP197_FW_START_POLLCNT;
		while (pollcnt &&
		       (readl_relaxed(EIP197_PE(priv) + base +
			      pollofs) != 1)) {
			pollcnt--;
		}
		if (!pollcnt) {
			dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
				fpp, pe);
			return false;
		}
	}
	return true;
}

static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
				  int ipuesz, int ifppsz, int minifw)
{
	int pe;
	u32 val;

	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Disable access to all program memory */
		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));

		/* Start IFPP microengines */
		if (minifw)
			val = 0;
		else
			val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
				EIP197_PE_ICE_UENG_DEBUG_RESET;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));

		/* Start IPUE microengines */
		if (minifw)
			val = 0;
		else
			val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
				EIP197_PE_ICE_UENG_DEBUG_RESET;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
	}

	/* For miniFW startup, there is no initialization, so always succeed */
	if (minifw)
		return true;

	/* Wait until all the firmwares have properly started up */
	if (!poll_fw_ready(priv, 1))
		return false;
	if (!poll_fw_ready(priv, 0))
		return false;

	return true;
239 240 241 242 243 244
}

static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
	const struct firmware *fw[FW_NB];
245
	char fw_path[37], *dir = NULL;
246
	int i, j, ret = 0, pe;
247
	int ipuesz, ifppsz, minifw = 0;
248

249
	if (priv->version == EIP197D_MRVL)
250
		dir = "eip197d";
251 252 253 254 255
	else if (priv->version == EIP197B_MRVL ||
		 priv->version == EIP197_DEVBRD)
		dir = "eip197b";
	else
		return -ENODEV;
256

257
retry_fw:
258
	for (i = 0; i < FW_NB; i++) {
259 260
		snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
		ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
261
		if (ret) {
262
			if (minifw || priv->version != EIP197B_MRVL)
263 264 265 266 267
				goto release_fw;

			/* Fallback to the old firmware location for the
			 * EIP197b.
			 */
268 269 270
			ret = firmware_request_nowarn(&fw[i], fw_name[i],
						      priv->dev);
			if (ret)
271
				goto release_fw;
272
		}
273
	}
274

275 276 277
	eip197_init_firmware(priv);

	ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
278

279 280 281 282
	/* Enable access to IPUE program memories */
	for (pe = 0; pe < priv->config.pes; pe++)
		writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
283

284
	ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
285

286
	if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
287
		dev_dbg(priv->dev, "Firmware loaded successfully\n");
288
		return 0;
289
	}
290

291 292
	ret = -ENODEV;

293 294 295 296
release_fw:
	for (j = 0; j < i; j++)
		release_firmware(fw[j]);

297 298 299 300 301 302 303 304 305 306
	if (!minifw) {
		/* Retry with minifw path */
		dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
		dir = "eip197_minifw";
		minifw = 1;
		goto retry_fw;
	}

	dev_dbg(priv->dev, "Firmware load failed.\n");

307 308 309 310 311 312
	return ret;
}

static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, cd_size_rnd, val;
313
	int i, cd_fetch_cnt;
314

315 316 317 318 319 320 321 322 323 324 325 326 327 328
	cd_size_rnd  = (priv->config.cd_size +
			(BIT(priv->hwconfig.hwdataw) - 1)) >>
		       priv->hwconfig.hwdataw;
	/* determine number of CD's we can fetch into the CD FIFO as 1 block */
	if (priv->flags & SAFEXCEL_HW_EIP197) {
		/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
		cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
		cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
				     (priv->config.pes * EIP197_FETCH_DEPTH));
	} else {
		/* for the EIP97, just fetch all that fits minus 1 */
		cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
				cd_size_rnd) - 1;
	}
329 330 331 332

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
333
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
334
		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
335
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
336 337 338

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
		       priv->config.cd_size,
339
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
340 341
		writel(((cd_fetch_cnt * (cd_size_rnd << hdw)) << 16) |
		       (cd_fetch_cnt * priv->config.cd_offset),
342
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
343 344 345 346

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
347
		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
348 349 350

		/* clear any pending interrupt */
		writel(GENMASK(5, 0),
351
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
352 353 354 355 356 357 358 359 360 361
	}

	return 0;
}

static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, rd_size_rnd, val;
	int i;

362
	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
363 364 365 366 367 368 369 370
	hdw &= GENMASK(27, 25);
	hdw >>= 25;

	rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
371
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
372
		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
373
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
374 375 376

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
		       priv->config.rd_size,
377
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
378 379 380

		writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
		       (EIP197_FETCH_COUNT * priv->config.rd_offset),
381
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
382 383 384 385

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
386
		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
387
		writel(val,
388
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
389 390 391

		/* clear any pending interrupt */
		writel(GENMASK(7, 0),
392
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
393 394

		/* enable ring interrupt */
395
		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
396
		val |= EIP197_RDR_IRQ(i);
397
		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
398 399 400 401 402 403 404
	}

	return 0;
}

static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
405
	u32 val;
406
	int i, ret, pe;
407

408 409 410 411 412 413 414
	dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
		priv->config.pes, priv->config.rings);

	/*
	 * For EIP197's only set maximum number of TX commands to 2^5 = 32
	 * Skip for the EIP97 as it does not have this field.
	 */
415 416
	if (priv->flags & SAFEXCEL_HW_EIP197) {
		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
417
		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
418 419
		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
	}
420 421 422 423

	/* Configure wr/rd cache values */
	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
424
	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
425 426 427 428

	/* Interrupts reset */

	/* Disable all global interrupts */
429
	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
430 431

	/* Clear any pending interrupt */
432
	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
433

434 435 436
	/* Processing Engine configuration */
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Data Fetch Engine configuration */
437

438 439 440
		/* Reset all DFE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
441

442
		if (priv->flags & SAFEXCEL_HW_EIP197)
443
			/* Reset HIA input interface arbiter (EIP197 only) */
444 445
			writel(EIP197_HIA_RA_PE_CTRL_RESET,
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
446

447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
		/* DMA transfer size to use */
		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));

		/* Leave the DFE threads reset state */
		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));

		/* Configure the processing engine thresholds */
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(9),
		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(7),
		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));

468
		if (priv->flags & SAFEXCEL_HW_EIP197)
469 470 471 472
			/* enable HIA input interface arbiter and rings */
			writel(EIP197_HIA_RA_PE_CTRL_EN |
			       GENMASK(priv->config.rings - 1, 0),
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
473

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
		/* Data Store Engine configuration */

		/* Reset all DSE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Wait for all DSE threads to complete */
		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
			GENMASK(15, 12)) != GENMASK(15, 12))
			;

		/* DMA transfer size to use */
		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
491 492
		/* FIXME: instability issues can occur for EIP97 but disabling
		 * it impacts performance.
493
		 */
494
		if (priv->flags & SAFEXCEL_HW_EIP197)
495 496 497 498 499 500 501 502 503 504 505 506 507
			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));

		/* Leave the DSE threads reset state */
		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Configure the procesing engine thresholds */
		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));

		/* Processing Engine configuration */

508 509
		/* Token & context configuration */
		val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
510 511
		      EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
		      EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
512 513
		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));

514 515 516
		/* H/W capabilities selection: just enable everything */
		writel(EIP197_FUNCTION_ALL,
		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
517 518
		writel(EIP197_FUNCTION_ALL,
		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
519
	}
520 521 522 523 524

	/* Command Descriptor Rings prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Clear interrupts for this ring */
		writel(GENMASK(31, 0),
525
		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
526 527

		/* Disable external triggering */
528
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
529 530 531

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
532
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
533 534 535

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
536
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
537 538

		writel(0,
539
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
540
		writel(0,
541
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
542 543

		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
544
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
545 546 547 548 549
	}

	/* Result Descriptor Ring prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Disable external triggering*/
550
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
551 552 553

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
554
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
555 556 557

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
558
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
559 560

		writel(0,
561
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
562
		writel(0,
563
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
564 565 566

		/* Ring size */
		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
567
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
568 569
	}

570 571 572 573
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Enable command descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
574

575 576 577 578
		/* Enable result descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
	}
579 580

	/* Clear any HIA interrupt */
581
	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
582

583
	if (priv->flags & SAFEXCEL_HW_EIP197) {
584
		eip197_trc_cache_init(priv);
585
		priv->flags |= EIP197_TRC_CACHE;
586

587 588 589 590
		ret = eip197_load_firmwares(priv);
		if (ret)
			return ret;
	}
591 592 593 594 595 596 597

	safexcel_hw_setup_cdesc_rings(priv);
	safexcel_hw_setup_rdesc_rings(priv);

	return 0;
}

598
/* Called with ring's lock taken */
599 600
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
				       int ring)
601
{
602
	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
603 604

	if (!coal)
605
		return;
606 607 608 609

	/* Configure when we want an interrupt */
	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
610
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
611 612
}

613
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
614 615 616
{
	struct crypto_async_request *req, *backlog;
	struct safexcel_context *ctx;
617
	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
618

619 620 621 622 623 624 625 626
	/* If a request wasn't properly dequeued because of a lack of resources,
	 * proceeded it first,
	 */
	req = priv->ring[ring].req;
	backlog = priv->ring[ring].backlog;
	if (req)
		goto handle_req;

627
	while (true) {
628 629
		spin_lock_bh(&priv->ring[ring].queue_lock);
		backlog = crypto_get_backlog(&priv->ring[ring].queue);
630
		req = crypto_dequeue_request(&priv->ring[ring].queue);
631
		spin_unlock_bh(&priv->ring[ring].queue_lock);
632

633 634 635
		if (!req) {
			priv->ring[ring].req = NULL;
			priv->ring[ring].backlog = NULL;
636
			goto finalize;
637
		}
638

639
handle_req:
640
		ctx = crypto_tfm_ctx(req->tfm);
641 642
		ret = ctx->send(req, ring, &commands, &results);
		if (ret)
643
			goto request_failed;
644 645 646 647

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

648 649 650 651
		/* In case the send() helper did not issue any command to push
		 * to the engine because the input data was cached, continue to
		 * dequeue other requests as this is valid and not an error.
		 */
652
		if (!commands && !results)
653
			continue;
654

655 656
		cdesc += commands;
		rdesc += results;
657 658
		nreq++;
	}
659

660 661 662 663 664 665 666
request_failed:
	/* Not enough resources to handle all the requests. Bail out and save
	 * the request and the backlog for the next dequeue call (per-ring).
	 */
	priv->ring[ring].req = req;
	priv->ring[ring].backlog = backlog;

667
finalize:
668
	if (!nreq)
669 670
		return;

671
	spin_lock_bh(&priv->ring[ring].lock);
672

673 674
	priv->ring[ring].requests += nreq;

675
	if (!priv->ring[ring].busy) {
676
		safexcel_try_push_requests(priv, ring);
677
		priv->ring[ring].busy = true;
678 679
	}

680
	spin_unlock_bh(&priv->ring[ring].lock);
681

682 683
	/* let the RDR know we have pending descriptors */
	writel((rdesc * priv->config.rd_offset) << 2,
684
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
685

686 687
	/* let the CDR know we have pending descriptors */
	writel((cdesc * priv->config.cd_offset) << 2,
688
	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
689 690
}

691 692 693
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
				       struct safexcel_result_desc *rdesc)
{
694 695 696
	if (likely((!rdesc->descriptor_overflow) &&
		   (!rdesc->buffer_overflow) &&
		   (!rdesc->result_data.error_code)))
697 698
		return 0;

699 700 701 702 703 704
	if (rdesc->descriptor_overflow)
		dev_err(priv->dev, "Descriptor overflow detected");

	if (rdesc->buffer_overflow)
		dev_err(priv->dev, "Buffer overflow detected");

705 706
	if (rdesc->result_data.error_code & 0x4066) {
		/* Fatal error (bits 1,2,5,6 & 14) */
707
		dev_err(priv->dev,
708
			"result descriptor error (%x)",
709
			rdesc->result_data.error_code);
710 711
		return -EIO;
	} else if (rdesc->result_data.error_code &
712
		   (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
713 714
		/*
		 * Give priority over authentication fails:
715 716
		 * Blocksize, length & overflow errors,
		 * something wrong with the input!
717
		 */
718
		return -EINVAL;
719
	} else if (rdesc->result_data.error_code & BIT(9)) {
720 721
		/* Authentication failed */
		return -EBADMSG;
722
	}
723 724 725 726 727

	/* All other non-fatal errors */
	return -EINVAL;
}

728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
				 int ring,
				 struct safexcel_result_desc *rdesc,
				 struct crypto_async_request *req)
{
	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);

	priv->ring[ring].rdr_req[i] = req;
}

inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
{
	int i = safexcel_ring_first_rdr_index(priv, ring);

	return priv->ring[ring].rdr_req[i];
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
	struct safexcel_command_desc *cdesc;

	/* Acknowledge the command descriptors */
	do {
		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
		if (IS_ERR(cdesc)) {
			dev_err(priv->dev,
				"Could not retrieve the command descriptor\n");
			return;
		}
	} while (!cdesc->last_seg);
}

void safexcel_inv_complete(struct crypto_async_request *req, int error)
{
	struct safexcel_inv_result *result = req->data;

	if (error == -EINPROGRESS)
		return;

	result->error = error;
	complete(&result->completion);
}

int safexcel_invalidate_cache(struct crypto_async_request *async,
			      struct safexcel_crypto_priv *priv,
774
			      dma_addr_t ctxr_dma, int ring)
775 776 777 778 779 780 781
{
	struct safexcel_command_desc *cdesc;
	struct safexcel_result_desc *rdesc;
	int ret = 0;

	/* Prepare command descriptor */
	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
782 783
	if (IS_ERR(cdesc))
		return PTR_ERR(cdesc);
784 785 786 787 788 789 790 791 792 793 794 795 796 797

	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
	cdesc->control_data.options = 0;
	cdesc->control_data.refresh = 0;
	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;

	/* Prepare result descriptor */
	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);

	if (IS_ERR(rdesc)) {
		ret = PTR_ERR(rdesc);
		goto cdesc_rollback;
	}

798 799 800
	safexcel_rdr_req_set(priv, ring, rdesc, async);

	return ret;
801 802 803 804 805 806 807 808 809 810

cdesc_rollback:
	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);

	return ret;
}

static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
						     int ring)
{
811
	struct crypto_async_request *req;
812
	struct safexcel_context *ctx;
813
	int ret, i, nreq, ndesc, tot_descs, handled = 0;
814 815
	bool should_complete;

816 817 818
handle_results:
	tot_descs = 0;

819
	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
820 821
	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
822
	if (!nreq)
823
		goto requests_left;
824 825

	for (i = 0; i < nreq; i++) {
826 827 828 829
		req = safexcel_rdr_req_get(priv, ring);

		ctx = crypto_tfm_ctx(req->tfm);
		ndesc = ctx->handle_result(priv, ring, req,
830 831
					   &should_complete, &ret);
		if (ndesc < 0) {
832 833
			dev_err(priv->dev, "failed to handle result (%d)\n",
				ndesc);
834
			goto acknowledge;
835 836 837 838
		}

		if (should_complete) {
			local_bh_disable();
839
			req->complete(req, ret);
840 841 842
			local_bh_enable();
		}

843
		tot_descs += ndesc;
844
		handled++;
845 846 847
	}

acknowledge:
848
	if (i)
849 850
		writel(EIP197_xDR_PROC_xD_PKT(i) |
		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
851
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
852

853 854 855 856 857 858
	/* If the number of requests overflowed the counter, try to proceed more
	 * requests.
	 */
	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
		goto handle_results;

859
requests_left:
860
	spin_lock_bh(&priv->ring[ring].lock);
861

862 863
	priv->ring[ring].requests -= handled;
	safexcel_try_push_requests(priv, ring);
864

865
	if (!priv->ring[ring].requests)
866 867
		priv->ring[ring].busy = false;

868
	spin_unlock_bh(&priv->ring[ring].lock);
869 870
}

871 872 873 874
static void safexcel_dequeue_work(struct work_struct *work)
{
	struct safexcel_work_data *data =
			container_of(work, struct safexcel_work_data, work);
875

876
	safexcel_dequeue(data->priv, data->ring);
877 878 879 880 881 882 883 884 885 886 887
}

struct safexcel_ring_irq_data {
	struct safexcel_crypto_priv *priv;
	int ring;
};

static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
888
	int ring = irq_data->ring, rc = IRQ_NONE;
889 890
	u32 status, stat;

891
	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
892
	if (!status)
893
		return rc;
894 895 896

	/* RDR interrupts */
	if (status & EIP197_RDR_IRQ(ring)) {
897
		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
898 899 900 901 902 903 904

		if (unlikely(stat & EIP197_xDR_ERR)) {
			/*
			 * Fatal error, the RDR is unusable and must be
			 * reinitialized. This should not happen under
			 * normal circumstances.
			 */
905
			dev_err(priv->dev, "RDR: fatal error.\n");
906
		} else if (likely(stat & EIP197_xDR_THRESH)) {
907
			rc = IRQ_WAKE_THREAD;
908 909 910 911
		}

		/* ACK the interrupts */
		writel(stat & 0xff,
912
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
913 914 915
	}

	/* ACK the interrupts */
916
	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
917

918 919 920 921 922 923 924 925 926 927 928 929 930 931
	return rc;
}

static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
	int ring = irq_data->ring;

	safexcel_handle_result_descriptor(priv, ring);

	queue_work(priv->ring[ring].workqueue,
		   &priv->ring[ring].work_data.work);

932 933 934
	return IRQ_HANDLED;
}

935 936
static int safexcel_request_ring_irq(void *pdev, int irqid,
				     int is_pci_dev,
937
				     irq_handler_t handler,
938
				     irq_handler_t threaded_handler,
939 940
				     struct safexcel_ring_irq_data *ring_irq_priv)
{
941 942 943 944 945
	int ret, irq;
	struct device *dev;

	if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
		struct pci_dev *pci_pdev = pdev;
946

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
		dev = &pci_pdev->dev;
		irq = pci_irq_vector(pci_pdev, irqid);
		if (irq < 0) {
			dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
				irqid, irq);
			return irq;
		}
	} else if (IS_ENABLED(CONFIG_OF)) {
		struct platform_device *plf_pdev = pdev;
		char irq_name[6] = {0}; /* "ringX\0" */

		snprintf(irq_name, 6, "ring%d", irqid);
		dev = &plf_pdev->dev;
		irq = platform_get_irq_byname(plf_pdev, irq_name);

		if (irq < 0) {
			dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
				irq_name, irq);
			return irq;
		}
967 968
	}

969
	ret = devm_request_threaded_irq(dev, irq, handler,
970
					threaded_handler, IRQF_ONESHOT,
971
					dev_name(dev), ring_irq_priv);
972
	if (ret) {
973
		dev_err(dev, "unable to request IRQ %d\n", irq);
974 975 976 977 978 979 980
		return ret;
	}

	return irq;
}

static struct safexcel_alg_template *safexcel_algs[] = {
981 982
	&safexcel_alg_ecb_des,
	&safexcel_alg_cbc_des,
983 984
	&safexcel_alg_ecb_des3_ede,
	&safexcel_alg_cbc_des3_ede,
985 986
	&safexcel_alg_ecb_aes,
	&safexcel_alg_cbc_aes,
987
	&safexcel_alg_cfb_aes,
988
	&safexcel_alg_ofb_aes,
989
	&safexcel_alg_ctr_aes,
990
	&safexcel_alg_md5,
991 992 993
	&safexcel_alg_sha1,
	&safexcel_alg_sha224,
	&safexcel_alg_sha256,
994
	&safexcel_alg_sha384,
995
	&safexcel_alg_sha512,
996
	&safexcel_alg_hmac_md5,
997
	&safexcel_alg_hmac_sha1,
998
	&safexcel_alg_hmac_sha224,
999
	&safexcel_alg_hmac_sha256,
1000
	&safexcel_alg_hmac_sha384,
1001
	&safexcel_alg_hmac_sha512,
1002
	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
1003
	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
1004
	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
1005
	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
1006
	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
1007
	&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1008 1009 1010 1011 1012
	&safexcel_alg_authenc_hmac_sha1_ctr_aes,
	&safexcel_alg_authenc_hmac_sha224_ctr_aes,
	&safexcel_alg_authenc_hmac_sha256_ctr_aes,
	&safexcel_alg_authenc_hmac_sha384_ctr_aes,
	&safexcel_alg_authenc_hmac_sha512_ctr_aes,
1013
	&safexcel_alg_xts_aes,
1014
	&safexcel_alg_gcm,
1015
	&safexcel_alg_ccm,
1016 1017 1018 1019 1020 1021 1022 1023 1024
};

static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
{
	int i, j, ret = 0;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
		safexcel_algs[i]->priv = priv;

1025 1026 1027 1028 1029 1030
		/* Do we have all required base algorithms available? */
		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
		    safexcel_algs[i]->algo_mask)
			/* No, so don't register this ciphersuite */
			continue;

1031 1032
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1033 1034
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		else
			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);

		if (ret)
			goto fail;
	}

	return 0;

fail:
	for (j = 0; j < i; j++) {
1046 1047 1048 1049 1050 1051
		/* Do we have all required base algorithms available? */
		if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
		    safexcel_algs[j]->algo_mask)
			/* No, so don't unregister this ciphersuite */
			continue;

1052 1053
		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1054 1055
		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
		else
			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
	}

	return ret;
}

static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1068 1069 1070 1071 1072 1073
		/* Do we have all required base algorithms available? */
		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
		    safexcel_algs[i]->algo_mask)
			/* No, so don't unregister this ciphersuite */
			continue;

1074 1075
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1076 1077
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1078 1079 1080 1081 1082 1083 1084
		else
			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
	}
}

static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
1085
	u32 val, mask = 0;
1086

1087
	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
1088 1089

	/* Read number of PEs from the engine */
1090
	if (priv->flags & SAFEXCEL_HW_EIP197)
1091 1092
		/* Wider field width for all EIP197 type engines */
		mask = EIP197_N_PES_MASK;
1093 1094 1095
	else
		/* Narrow field width for EIP97 type engine */
		mask = EIP97_N_PES_MASK;
1096

1097 1098
	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;

1099 1100
	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
	val = (val & GENMASK(27, 25)) >> 25;
	mask = BIT(val) - 1;

	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;

	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}

1111 1112 1113 1114
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
	struct safexcel_register_offsets *offsets = &priv->offsets;

1115
	if (priv->flags & SAFEXCEL_HW_EIP197) {
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP197_PE_BASE;
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
		offsets->global		= EIP197_GLOBAL_BASE;
	} else {
		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP97_PE_BASE;
		offsets->global		= EIP97_GLOBAL_BASE;
1139 1140 1141
	}
}

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
/*
 * Generic part of probe routine, shared by platform and PCI driver
 *
 * Assumes IO resources have been mapped, private data mem has been allocated,
 * clocks have been enabled, device pointer has been assigned etc.
 *
 */
static int safexcel_probe_generic(void *pdev,
				  struct safexcel_crypto_priv *priv,
				  int is_pci_dev)
1152
{
1153
	struct device *dev = priv->dev;
1154
	u32 peid, version, mask, val, hiaopt;
1155
	int i, ret, hwctg;
1156

1157 1158 1159 1160
	priv->context_pool = dmam_pool_create("safexcel-context", dev,
					      sizeof(struct safexcel_context_record),
					      1, 0);
	if (!priv->context_pool)
1161 1162
		return -ENOMEM;

1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	/*
	 * First try the EIP97 HIA version regs
	 * For the EIP197, this is guaranteed to NOT return any of the test
	 * values
	 */
	version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);

	mask = 0;  /* do not swap */
	if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
		priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
	} else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
		/* read back byte-swapped, so complement byte swap bits */
		mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
		priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
	} else {
		/* So it wasn't an EIP97 ... maybe it's an EIP197? */
		version = readl(priv->base + EIP197_HIA_AIC_BASE +
				EIP197_HIA_VERSION);
		if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
			priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
			priv->flags |= SAFEXCEL_HW_EIP197;
		} else if (EIP197_REG_HI16(version) ==
			   EIP197_HIA_VERSION_BE) {
			/* read back byte-swapped, so complement swap bits */
			mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
			priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
			priv->flags |= SAFEXCEL_HW_EIP197;
		} else {
			return -ENODEV;
		}
	}

	/* Now initialize the reg offsets based on the probing info so far */
1196
	safexcel_init_register_offsets(priv);
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
	/*
	 * If the version was read byte-swapped, we need to flip the device
	 * swapping Keep in mind here, though, that what we write will also be
	 * byte-swapped ...
	 */
	if (mask) {
		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
		val = val ^ (mask >> 24); /* toggle byte swap bits */
		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
	}

	/*
	 * We're not done probing yet! We may fall through to here if no HIA
	 * was found at all. So, with the endianness presumably correct now and
	 * the offsets setup, *really* probe for the EIP97/EIP197.
	 */
	version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
	if (((priv->flags & SAFEXCEL_HW_EIP197) &&
	     (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
	    ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
	     (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
		/*
		 * We did not find the device that matched our initial probing
		 * (or our initial probing failed) Report appropriate error.
		 */
		return -ENODEV;
	}

	priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
	hwctg = version >> 28;
	peid = version & 255;

	/* Detect EIP96 packet engine and version */
	version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
	if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
		dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
		return -ENODEV;
	}
	priv->hwconfig.pever = EIP197_VERSION_MASK(version);

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
	hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);

	if (priv->flags & SAFEXCEL_HW_EIP197) {
		/* EIP197 */
		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
					  EIP197_HWDATAW_MASK;
		priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
					   EIP197_CFSIZE_MASK) +
					  EIP197_CFSIZE_ADJUST;
	} else {
		/* EIP97 */
		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
					  EIP97_HWDATAW_MASK;
		priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
					  EIP97_CFSIZE_MASK;
	}

1255 1256 1257 1258
	/* Get supported algorithms from EIP96 transform engine */
	priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
				    EIP197_PE_EIP96_OPTIONS(0));

1259
	/* Print single info line describing what we just detected */
1260
	dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d),PE:%x,alg:%08x\n", peid,
1261
		 priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
1262
		 priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
1263
		 priv->hwconfig.pever, priv->hwconfig.algo_flags);
1264

1265
	safexcel_configure(priv);
1266

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
	if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
		/*
		 * Request MSI vectors for global + 1 per ring -
		 * or just 1 for older dev images
		 */
		struct pci_dev *pci_pdev = pdev;

		ret = pci_alloc_irq_vectors(pci_pdev,
					    priv->config.rings + 1,
					    priv->config.rings + 1,
					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
		if (ret < 0) {
			dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1280 1281 1282 1283
			return ret;
		}
	}

1284
	/* Register the ring IRQ handlers and configure the rings */
1285 1286
	priv->ring = devm_kcalloc(dev, priv->config.rings,
				  sizeof(*priv->ring),
1287
				  GFP_KERNEL);
1288 1289
	if (!priv->ring)
		return -ENOMEM;
1290

1291
	for (i = 0; i < priv->config.rings; i++) {
1292
		char wq_name[9] = {0};
1293 1294 1295 1296 1297 1298
		int irq;
		struct safexcel_ring_irq_data *ring_irq;

		ret = safexcel_init_ring_descriptors(priv,
						     &priv->ring[i].cdr,
						     &priv->ring[i].rdr);
1299 1300 1301 1302
		if (ret) {
			dev_err(dev, "Failed to initialize rings\n");
			return ret;
		}
1303

1304 1305 1306
		priv->ring[i].rdr_req = devm_kcalloc(dev,
			EIP197_DEFAULT_RING_SIZE,
			sizeof(priv->ring[i].rdr_req),
1307
			GFP_KERNEL);
1308 1309
		if (!priv->ring[i].rdr_req)
			return -ENOMEM;
1310

1311
		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1312 1313
		if (!ring_irq)
			return -ENOMEM;
1314 1315 1316 1317

		ring_irq->priv = priv;
		ring_irq->ring = i;

1318 1319 1320 1321
		irq = safexcel_request_ring_irq(pdev,
						EIP197_IRQ_NUMBER(i, is_pci_dev),
						is_pci_dev,
						safexcel_irq_ring,
1322
						safexcel_irq_ring_thread,
1323
						ring_irq);
1324
		if (irq < 0) {
1325 1326
			dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
			return irq;
1327
		}
1328 1329 1330

		priv->ring[i].work_data.priv = priv;
		priv->ring[i].work_data.ring = i;
1331 1332
		INIT_WORK(&priv->ring[i].work_data.work,
			  safexcel_dequeue_work);
1333 1334

		snprintf(wq_name, 9, "wq_ring%d", i);
1335 1336 1337 1338
		priv->ring[i].workqueue =
			create_singlethread_workqueue(wq_name);
		if (!priv->ring[i].workqueue)
			return -ENOMEM;
1339

1340
		priv->ring[i].requests = 0;
1341 1342
		priv->ring[i].busy = false;

1343 1344 1345
		crypto_init_queue(&priv->ring[i].queue,
				  EIP197_DEFAULT_RING_SIZE);

1346
		spin_lock_init(&priv->ring[i].lock);
1347
		spin_lock_init(&priv->ring[i].queue_lock);
1348 1349 1350 1351 1352 1353
	}

	atomic_set(&priv->ring_used, 0);

	ret = safexcel_hw_init(priv);
	if (ret) {
1354 1355
		dev_err(dev, "HW init failed (%d)\n", ret);
		return ret;
1356 1357 1358 1359 1360
	}

	ret = safexcel_register_algorithms(priv);
	if (ret) {
		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1361
		return ret;
1362 1363 1364 1365 1366
	}

	return 0;
}

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < priv->config.rings; i++) {
		/* clear any pending interrupt */
		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);

		/* Reset the CDR base address */
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);

		/* Reset the RDR base address */
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
	}
}
1385

1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
#if IS_ENABLED(CONFIG_OF)
/* for Device Tree platform driver */

static int safexcel_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct safexcel_crypto_priv *priv;
	int ret;

	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->dev = dev;
	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);

	platform_set_drvdata(pdev, priv);

	priv->base = devm_platform_ioremap_resource(pdev, 0);
	if (IS_ERR(priv->base)) {
		dev_err(dev, "failed to get resource\n");
		return PTR_ERR(priv->base);
	}

	priv->clk = devm_clk_get(&pdev->dev, NULL);
	ret = PTR_ERR_OR_ZERO(priv->clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			return ret;

		ret = clk_prepare_enable(priv->clk);
		if (ret) {
			dev_err(dev, "unable to enable clk (%d)\n", ret);
			return ret;
		}
	}

	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			goto err_core_clk;

		ret = clk_prepare_enable(priv->reg_clk);
		if (ret) {
			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
			goto err_core_clk;
		}
	}

	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
	if (ret)
		goto err_reg_clk;

	/* Generic EIP97/EIP197 device probing */
	ret = safexcel_probe_generic(pdev, priv, 0);
	if (ret)
		goto err_reg_clk;

	return 0;

err_reg_clk:
	clk_disable_unprepare(priv->reg_clk);
err_core_clk:
	clk_disable_unprepare(priv->clk);
	return ret;
}

1456 1457 1458 1459 1460 1461
static int safexcel_remove(struct platform_device *pdev)
{
	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
	int i;

	safexcel_unregister_algorithms(priv);
1462 1463
	safexcel_hw_reset_rings(priv);

1464 1465 1466 1467 1468 1469 1470 1471 1472
	clk_disable_unprepare(priv->clk);

	for (i = 0; i < priv->config.rings; i++)
		destroy_workqueue(priv->ring[i].workqueue);

	return 0;
}

static const struct of_device_id safexcel_of_match_table[] = {
1473
	{
1474
		.compatible = "inside-secure,safexcel-eip97ies",
1475
		.data = (void *)EIP97IES_MRVL,
1476 1477 1478
	},
	{
		.compatible = "inside-secure,safexcel-eip197b",
1479
		.data = (void *)EIP197B_MRVL,
1480
	},
1481 1482
	{
		.compatible = "inside-secure,safexcel-eip197d",
1483
		.data = (void *)EIP197D_MRVL,
1484
	},
1485
	/* For backward compatibility and intended for generic use */
1486
	{
1487
		.compatible = "inside-secure,safexcel-eip97",
1488
		.data = (void *)EIP97IES_MRVL,
1489 1490 1491
	},
	{
		.compatible = "inside-secure,safexcel-eip197",
1492
		.data = (void *)EIP197B_MRVL,
1493
	},
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	{},
};

static struct platform_driver  crypto_safexcel = {
	.probe		= safexcel_probe,
	.remove		= safexcel_remove,
	.driver		= {
		.name	= "crypto-safexcel",
		.of_match_table = safexcel_of_match_table,
	},
};
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
#endif

#if IS_ENABLED(CONFIG_PCI)
/* PCIE devices - i.e. Inside Secure development boards */

static int safexcel_pci_probe(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	struct device *dev = &pdev->dev;
	struct safexcel_crypto_priv *priv;
	void __iomem *pciebase;
	int rc;
	u32 val;

	dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
		ent->vendor, ent->device, ent->subvendor,
		ent->subdevice, ent->driver_data);

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->dev = dev;
	priv->version = (enum safexcel_eip_version)ent->driver_data;

	pci_set_drvdata(pdev, priv);

	/* enable the device */
	rc = pcim_enable_device(pdev);
	if (rc) {
		dev_err(dev, "Failed to enable PCI device\n");
		return rc;
	}

	/* take ownership of PCI BAR0 */
	rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
	if (rc) {
		dev_err(dev, "Failed to map IO region for BAR0\n");
		return rc;
	}
	priv->base = pcim_iomap_table(pdev)[0];

	if (priv->version == EIP197_DEVBRD) {
		dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");

		rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
		if (rc) {
			dev_err(dev, "Failed to map IO region for BAR4\n");
			return rc;
		}

		pciebase = pcim_iomap_table(pdev)[2];
		val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
		if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
			dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
				(val & 0xff));

			/* Setup MSI identity map mapping */
			writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
			writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
			writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
			writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
			       pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);

			/* Enable all device interrupts */
			writel(GENMASK(31, 0),
			       pciebase + EIP197_XLX_USER_INT_ENB_MSK);
		} else {
			dev_err(dev, "Unrecognised IRQ block identifier %x\n",
				val);
			return -ENODEV;
		}

		/* HW reset FPGA dev board */
		/* assert reset */
		writel(1, priv->base + EIP197_XLX_GPIO_BASE);
		wmb(); /* maintain strict ordering for accesses here */
		/* deassert reset */
		writel(0, priv->base + EIP197_XLX_GPIO_BASE);
		wmb(); /* maintain strict ordering for accesses here */
	}

	/* enable bus mastering */
	pci_set_master(pdev);

	/* Generic EIP97/EIP197 device probing */
	rc = safexcel_probe_generic(pdev, priv, 1);
	return rc;
}

void safexcel_pci_remove(struct pci_dev *pdev)
{
	struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
	int i;

	safexcel_unregister_algorithms(priv);

	for (i = 0; i < priv->config.rings; i++)
		destroy_workqueue(priv->ring[i].workqueue);

	safexcel_hw_reset_rings(priv);
}

static const struct pci_device_id safexcel_pci_ids[] = {
	{
		PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
			       0x16ae, 0xc522),
		.driver_data = EIP197_DEVBRD,
	},
	{},
};

MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);

static struct pci_driver safexcel_pci_driver = {
	.name          = "crypto-safexcel",
	.id_table      = safexcel_pci_ids,
	.probe         = safexcel_pci_probe,
	.remove        = safexcel_pci_remove,
};
#endif

static int __init safexcel_init(void)
{
	int rc;

#if IS_ENABLED(CONFIG_OF)
		/* Register platform driver */
		platform_driver_register(&crypto_safexcel);
#endif

#if IS_ENABLED(CONFIG_PCI)
		/* Register PCI driver */
		rc = pci_register_driver(&safexcel_pci_driver);
#endif

	return 0;
}

static void __exit safexcel_exit(void)
{
#if IS_ENABLED(CONFIG_OF)
		/* Unregister platform driver */
		platform_driver_unregister(&crypto_safexcel);
#endif

#if IS_ENABLED(CONFIG_PCI)
		/* Unregister PCI driver if successfully registered before */
		pci_unregister_driver(&safexcel_pci_driver);
#endif
}

module_init(safexcel_init);
module_exit(safexcel_exit);
1662 1663 1664 1665

MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1666
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1667
MODULE_LICENSE("GPL v2");