safexcel.c 33.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2017 Marvell
 *
 * Antoine Tenart <antoine.tenart@free-electrons.com>
 */

#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>

20
#include <crypto/internal/aead.h>
21 22 23 24 25 26 27 28 29 30 31 32
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>

#include "safexcel.h"

static u32 max_rings = EIP197_MAX_RINGS;
module_param(max_rings, uint, 0644);
MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");

static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
	u32 val, htable_offset;
33 34 35 36 37 38 39 40 41 42 43 44 45
	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;

	if (priv->version == EIP197B) {
		cs_rc_max = EIP197B_CS_RC_MAX;
		cs_ht_wc = EIP197B_CS_HT_WC;
		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
	} else {
		cs_rc_max = EIP197D_CS_RC_MAX;
		cs_ht_wc = EIP197D_CS_HT_WC;
		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
	}
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65

	/* Enable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	val |= EIP197_TRC_ENABLE_0;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Clear all ECC errors */
	writel(0, priv->base + EIP197_TRC_ECCCTRL);

	/*
	 * Make sure the cache memory is accessible by taking record cache into
	 * reset.
	 */
	val = readl(priv->base + EIP197_TRC_PARAMS);
	val |= EIP197_TRC_PARAMS_SW_RESET;
	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
	writel(val, priv->base + EIP197_TRC_PARAMS);

	/* Clear all records */
66
	for (i = 0; i < cs_rc_max; i++) {
67 68 69 70 71 72 73 74 75
		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;

		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
		       priv->base + offset);

		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
		if (i == 0)
			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
76
		else if (i == cs_rc_max - 1)
77 78 79 80 81
			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
		writel(val, priv->base + offset + sizeof(u32));
	}

	/* Clear the hash table entries */
82 83
	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
	for (i = 0; i < cs_ht_wc; i++)
84 85 86 87 88 89 90 91 92 93
		writel(GENMASK(29, 0),
		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));

	/* Disable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Write head and tail pointers of the record free chain */
	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
94
	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
95 96 97
	writel(val, priv->base + EIP197_TRC_FREECHAIN);

	/* Configure the record cache #1 */
98 99
	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
100 101 102
	writel(val, priv->base + EIP197_TRC_PARAMS2);

	/* Configure the record cache #2 */
103
	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
104 105 106 107 108 109
	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
	writel(val, priv->base + EIP197_TRC_PARAMS);
}

static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
110
				  const struct firmware *fw, int pe, u32 ctrl,
111 112 113 114 115 116 117 118 119 120
				  u32 prog_en)
{
	const u32 *data = (const u32 *)fw->data;
	u32 val;
	int i;

	/* Reset the engine to make its program memory accessible */
	writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
	       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
	       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
121
	       EIP197_PE(priv) + ctrl);
122 123

	/* Enable access to the program memory */
124
	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
125 126 127 128 129 130 131

	/* Write the firmware */
	for (i = 0; i < fw->size / sizeof(u32); i++)
		writel(be32_to_cpu(data[i]),
		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));

	/* Disable access to the program memory */
132
	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
133 134

	/* Release engine from reset */
135
	val = readl(EIP197_PE(priv) + ctrl);
136
	val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
137
	writel(val, EIP197_PE(priv) + ctrl);
138 139 140 141 142 143
}

static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
	const struct firmware *fw[FW_NB];
144
	char fw_path[31], *dir = NULL;
145
	int i, j, ret = 0, pe;
146 147
	u32 val;

148 149 150 151 152 153 154 155 156 157 158 159
	switch (priv->version) {
	case EIP197B:
		dir = "eip197b";
		break;
	case EIP197D:
		dir = "eip197d";
		break;
	default:
		/* No firmware is required */
		return 0;
	}

160
	for (i = 0; i < FW_NB; i++) {
161
		snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
162
		ret = request_firmware(&fw[i], fw_path, priv->dev);
163
		if (ret) {
164 165 166 167 168 169
			if (priv->version != EIP197B)
				goto release_fw;

			/* Fallback to the old firmware location for the
			 * EIP197b.
			 */
170 171 172 173 174 175 176
			ret = request_firmware(&fw[i], fw_name[i], priv->dev);
			if (ret) {
				dev_err(priv->dev,
					"Failed to request firmware %s (%d)\n",
					fw_name[i], ret);
				goto release_fw;
			}
177
		}
178
	}
179

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Clear the scratchpad memory */
		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));

		memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
			  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));

		eip197_write_firmware(priv, fw[FW_IFPP], pe,
				      EIP197_PE_ICE_FPP_CTRL(pe),
				      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);

		eip197_write_firmware(priv, fw[FW_IPUE], pe,
				      EIP197_PE_ICE_PUE_CTRL(pe),
				      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
	}
200 201 202 203 204 205 206 207 208 209 210 211 212

release_fw:
	for (j = 0; j < i; j++)
		release_firmware(fw[j]);

	return ret;
}

static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, cd_size_rnd, val;
	int i;

213
	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
214 215 216 217 218 219 220 221
	hdw &= GENMASK(27, 25);
	hdw >>= 25;

	cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
222
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
223
		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
224
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
225 226 227

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
		       priv->config.cd_size,
228
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
229 230
		writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
		       (EIP197_FETCH_COUNT * priv->config.cd_offset),
231
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
232 233 234 235

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
236
		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
237 238 239

		/* clear any pending interrupt */
		writel(GENMASK(5, 0),
240
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
241 242 243 244 245 246 247 248 249 250
	}

	return 0;
}

static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, rd_size_rnd, val;
	int i;

251
	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
252 253 254 255 256 257 258 259
	hdw &= GENMASK(27, 25);
	hdw >>= 25;

	rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
260
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
261
		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
262
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
263 264 265

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
		       priv->config.rd_size,
266
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
267 268 269

		writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
		       (EIP197_FETCH_COUNT * priv->config.rd_offset),
270
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
271 272 273 274

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
275
		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
276
		writel(val,
277
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
278 279 280

		/* clear any pending interrupt */
		writel(GENMASK(7, 0),
281
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
282 283

		/* enable ring interrupt */
284
		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
285
		val |= EIP197_RDR_IRQ(i);
286
		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
287 288 289 290 291 292 293 294
	}

	return 0;
}

static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
	u32 version, val;
295
	int i, ret, pe;
296 297

	/* Determine endianess and configure byte swap */
298 299
	version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
	val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
300 301 302 303 304 305

	if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
		val |= EIP197_MST_CTRL_BYTE_SWAP;
	else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
		val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);

306 307 308 309
	/* For EIP197 set maximum number of TX commands to 2^5 = 32 */
	if (priv->version == EIP197B || priv->version == EIP197D)
		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);

310
	writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
311 312 313 314

	/* Configure wr/rd cache values */
	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
315
	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
316 317 318 319

	/* Interrupts reset */

	/* Disable all global interrupts */
320
	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
321 322

	/* Clear any pending interrupt */
323
	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
324

325 326 327
	/* Processing Engine configuration */
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Data Fetch Engine configuration */
328

329 330 331
		/* Reset all DFE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
332

333
		if (priv->version == EIP197B || priv->version == EIP197D) {
334 335 336 337
			/* Reset HIA input interface arbiter */
			writel(EIP197_HIA_RA_PE_CTRL_RESET,
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
		}
338

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
		/* DMA transfer size to use */
		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));

		/* Leave the DFE threads reset state */
		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));

		/* Configure the processing engine thresholds */
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(9),
		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(7),
		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));

360
		if (priv->version == EIP197B || priv->version == EIP197D) {
361 362 363 364 365
			/* enable HIA input interface arbiter and rings */
			writel(EIP197_HIA_RA_PE_CTRL_EN |
			       GENMASK(priv->config.rings - 1, 0),
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
		}
366

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
		/* Data Store Engine configuration */

		/* Reset all DSE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Wait for all DSE threads to complete */
		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
			GENMASK(15, 12)) != GENMASK(15, 12))
			;

		/* DMA transfer size to use */
		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
		/* FIXME: instability issues can occur for EIP97 but disabling it impact
		 * performances.
		 */
387
		if (priv->version == EIP197B || priv->version == EIP197D)
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));

		/* Leave the DSE threads reset state */
		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Configure the procesing engine thresholds */
		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));

		/* Processing Engine configuration */

		/* H/W capabilities selection */
		val = EIP197_FUNCTION_RSVD;
		val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
		val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
405
		val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
406
		val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
407
		val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
408
		val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
409 410 411
		val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
		val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
412
	}
413 414 415 416 417

	/* Command Descriptor Rings prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Clear interrupts for this ring */
		writel(GENMASK(31, 0),
418
		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
419 420

		/* Disable external triggering */
421
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
422 423 424

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
425
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
426 427 428

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
429
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
430 431

		writel(0,
432
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
433
		writel(0,
434
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
435 436

		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
437
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
438 439 440 441 442
	}

	/* Result Descriptor Ring prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Disable external triggering*/
443
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
444 445 446

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
447
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
448 449 450

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
451
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
452 453

		writel(0,
454
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
455
		writel(0,
456
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
457 458 459

		/* Ring size */
		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
460
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
461 462
	}

463 464 465 466
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Enable command descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
467

468 469 470 471
		/* Enable result descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
	}
472 473

	/* Clear any HIA interrupt */
474
	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
475

476
	if (priv->version == EIP197B || priv->version == EIP197D) {
477
		eip197_trc_cache_init(priv);
478

479 480 481 482
		ret = eip197_load_firmwares(priv);
		if (ret)
			return ret;
	}
483 484 485 486 487 488 489

	safexcel_hw_setup_cdesc_rings(priv);
	safexcel_hw_setup_rdesc_rings(priv);

	return 0;
}

490
/* Called with ring's lock taken */
491 492
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
				       int ring)
493
{
494
	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
495 496

	if (!coal)
497
		return;
498 499 500 501

	/* Configure when we want an interrupt */
	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
502
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
503 504
}

505
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
506 507 508
{
	struct crypto_async_request *req, *backlog;
	struct safexcel_context *ctx;
509
	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
510

511 512 513 514 515 516 517 518
	/* If a request wasn't properly dequeued because of a lack of resources,
	 * proceeded it first,
	 */
	req = priv->ring[ring].req;
	backlog = priv->ring[ring].backlog;
	if (req)
		goto handle_req;

519
	while (true) {
520 521
		spin_lock_bh(&priv->ring[ring].queue_lock);
		backlog = crypto_get_backlog(&priv->ring[ring].queue);
522
		req = crypto_dequeue_request(&priv->ring[ring].queue);
523
		spin_unlock_bh(&priv->ring[ring].queue_lock);
524

525 526 527
		if (!req) {
			priv->ring[ring].req = NULL;
			priv->ring[ring].backlog = NULL;
528
			goto finalize;
529
		}
530

531
handle_req:
532
		ctx = crypto_tfm_ctx(req->tfm);
533 534
		ret = ctx->send(req, ring, &commands, &results);
		if (ret)
535
			goto request_failed;
536 537 538 539

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

540 541 542 543
		/* In case the send() helper did not issue any command to push
		 * to the engine because the input data was cached, continue to
		 * dequeue other requests as this is valid and not an error.
		 */
544
		if (!commands && !results)
545
			continue;
546

547 548
		cdesc += commands;
		rdesc += results;
549 550
		nreq++;
	}
551

552 553 554 555 556 557 558
request_failed:
	/* Not enough resources to handle all the requests. Bail out and save
	 * the request and the backlog for the next dequeue call (per-ring).
	 */
	priv->ring[ring].req = req;
	priv->ring[ring].backlog = backlog;

559
finalize:
560
	if (!nreq)
561 562
		return;

563
	spin_lock_bh(&priv->ring[ring].lock);
564

565 566
	priv->ring[ring].requests += nreq;

567
	if (!priv->ring[ring].busy) {
568
		safexcel_try_push_requests(priv, ring);
569
		priv->ring[ring].busy = true;
570 571
	}

572
	spin_unlock_bh(&priv->ring[ring].lock);
573

574 575
	/* let the RDR know we have pending descriptors */
	writel((rdesc * priv->config.rd_offset) << 2,
576
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
577

578 579
	/* let the CDR know we have pending descriptors */
	writel((cdesc * priv->config.cd_offset) << 2,
580
	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
581 582
}

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
				       struct safexcel_result_desc *rdesc)
{
	if (likely(!rdesc->result_data.error_code))
		return 0;

	if (rdesc->result_data.error_code & 0x407f) {
		/* Fatal error (bits 0-7, 14) */
		dev_err(priv->dev,
			"cipher: result: result descriptor error (%d)\n",
			rdesc->result_data.error_code);
		return -EIO;
	} else if (rdesc->result_data.error_code == BIT(9)) {
		/* Authentication failed */
		return -EBADMSG;
	}

	/* All other non-fatal errors */
	return -EINVAL;
}

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
				 int ring,
				 struct safexcel_result_desc *rdesc,
				 struct crypto_async_request *req)
{
	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);

	priv->ring[ring].rdr_req[i] = req;
}

inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
{
	int i = safexcel_ring_first_rdr_index(priv, ring);

	return priv->ring[ring].rdr_req[i];
}

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
	struct safexcel_command_desc *cdesc;

	/* Acknowledge the command descriptors */
	do {
		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
		if (IS_ERR(cdesc)) {
			dev_err(priv->dev,
				"Could not retrieve the command descriptor\n");
			return;
		}
	} while (!cdesc->last_seg);
}

void safexcel_inv_complete(struct crypto_async_request *req, int error)
{
	struct safexcel_inv_result *result = req->data;

	if (error == -EINPROGRESS)
		return;

	result->error = error;
	complete(&result->completion);
}

int safexcel_invalidate_cache(struct crypto_async_request *async,
			      struct safexcel_crypto_priv *priv,
650
			      dma_addr_t ctxr_dma, int ring)
651 652 653 654 655 656 657
{
	struct safexcel_command_desc *cdesc;
	struct safexcel_result_desc *rdesc;
	int ret = 0;

	/* Prepare command descriptor */
	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
658 659
	if (IS_ERR(cdesc))
		return PTR_ERR(cdesc);
660 661 662 663 664 665 666 667 668 669 670 671 672 673

	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
	cdesc->control_data.options = 0;
	cdesc->control_data.refresh = 0;
	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;

	/* Prepare result descriptor */
	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);

	if (IS_ERR(rdesc)) {
		ret = PTR_ERR(rdesc);
		goto cdesc_rollback;
	}

674 675 676
	safexcel_rdr_req_set(priv, ring, rdesc, async);

	return ret;
677 678 679 680 681 682 683 684 685 686

cdesc_rollback:
	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);

	return ret;
}

static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
						     int ring)
{
687
	struct crypto_async_request *req;
688
	struct safexcel_context *ctx;
689
	int ret, i, nreq, ndesc, tot_descs, handled = 0;
690 691
	bool should_complete;

692 693 694
handle_results:
	tot_descs = 0;

695
	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
696 697
	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
698
	if (!nreq)
699
		goto requests_left;
700 701

	for (i = 0; i < nreq; i++) {
702 703 704 705
		req = safexcel_rdr_req_get(priv, ring);

		ctx = crypto_tfm_ctx(req->tfm);
		ndesc = ctx->handle_result(priv, ring, req,
706 707 708
					   &should_complete, &ret);
		if (ndesc < 0) {
			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
709
			goto acknowledge;
710 711 712 713
		}

		if (should_complete) {
			local_bh_disable();
714
			req->complete(req, ret);
715 716 717
			local_bh_enable();
		}

718
		tot_descs += ndesc;
719
		handled++;
720 721 722 723 724 725
	}

acknowledge:
	if (i) {
		writel(EIP197_xDR_PROC_xD_PKT(i) |
		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
726
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
727
	}
728

729 730 731 732 733 734
	/* If the number of requests overflowed the counter, try to proceed more
	 * requests.
	 */
	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
		goto handle_results;

735
requests_left:
736
	spin_lock_bh(&priv->ring[ring].lock);
737

738 739
	priv->ring[ring].requests -= handled;
	safexcel_try_push_requests(priv, ring);
740

741
	if (!priv->ring[ring].requests)
742 743
		priv->ring[ring].busy = false;

744
	spin_unlock_bh(&priv->ring[ring].lock);
745 746
}

747 748 749 750
static void safexcel_dequeue_work(struct work_struct *work)
{
	struct safexcel_work_data *data =
			container_of(work, struct safexcel_work_data, work);
751

752
	safexcel_dequeue(data->priv, data->ring);
753 754 755 756 757 758 759 760 761 762 763
}

struct safexcel_ring_irq_data {
	struct safexcel_crypto_priv *priv;
	int ring;
};

static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
764
	int ring = irq_data->ring, rc = IRQ_NONE;
765 766
	u32 status, stat;

767
	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
768
	if (!status)
769
		return rc;
770 771 772

	/* RDR interrupts */
	if (status & EIP197_RDR_IRQ(ring)) {
773
		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
774 775 776 777 778 779 780 781 782

		if (unlikely(stat & EIP197_xDR_ERR)) {
			/*
			 * Fatal error, the RDR is unusable and must be
			 * reinitialized. This should not happen under
			 * normal circumstances.
			 */
			dev_err(priv->dev, "RDR: fatal error.");
		} else if (likely(stat & EIP197_xDR_THRESH)) {
783
			rc = IRQ_WAKE_THREAD;
784 785 786 787
		}

		/* ACK the interrupts */
		writel(stat & 0xff,
788
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
789 790 791
	}

	/* ACK the interrupts */
792
	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
793

794 795 796 797 798 799 800 801 802 803 804 805 806 807
	return rc;
}

static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
	int ring = irq_data->ring;

	safexcel_handle_result_descriptor(priv, ring);

	queue_work(priv->ring[ring].workqueue,
		   &priv->ring[ring].work_data.work);

808 809 810 811 812
	return IRQ_HANDLED;
}

static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
				     irq_handler_t handler,
813
				     irq_handler_t threaded_handler,
814 815 816 817 818 819 820 821 822
				     struct safexcel_ring_irq_data *ring_irq_priv)
{
	int ret, irq = platform_get_irq_byname(pdev, name);

	if (irq < 0) {
		dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
		return irq;
	}

823 824 825
	ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
					threaded_handler, IRQF_ONESHOT,
					dev_name(&pdev->dev), ring_irq_priv);
826 827 828 829 830 831 832 833 834
	if (ret) {
		dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
		return ret;
	}

	return irq;
}

static struct safexcel_alg_template *safexcel_algs[] = {
835 836
	&safexcel_alg_ecb_des,
	&safexcel_alg_cbc_des,
837 838
	&safexcel_alg_ecb_des3_ede,
	&safexcel_alg_cbc_des3_ede,
839 840
	&safexcel_alg_ecb_aes,
	&safexcel_alg_cbc_aes,
841
	&safexcel_alg_md5,
842 843 844
	&safexcel_alg_sha1,
	&safexcel_alg_sha224,
	&safexcel_alg_sha256,
845
	&safexcel_alg_sha384,
846
	&safexcel_alg_sha512,
847
	&safexcel_alg_hmac_md5,
848
	&safexcel_alg_hmac_sha1,
849
	&safexcel_alg_hmac_sha224,
850
	&safexcel_alg_hmac_sha256,
851
	&safexcel_alg_hmac_sha384,
852
	&safexcel_alg_hmac_sha512,
853
	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
854
	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
855
	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
856
	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
857
	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
858 859 860 861 862 863 864 865 866
};

static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
{
	int i, j, ret = 0;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
		safexcel_algs[i]->priv = priv;

867 868 869
		if (!(safexcel_algs[i]->engines & priv->version))
			continue;

870 871
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
872 873
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
874 875 876 877 878 879 880 881 882 883 884
		else
			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);

		if (ret)
			goto fail;
	}

	return 0;

fail:
	for (j = 0; j < i; j++) {
885 886 887
		if (!(safexcel_algs[j]->engines & priv->version))
			continue;

888 889
		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
890 891
		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
892 893 894 895 896 897 898 899 900 901 902 903
		else
			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
	}

	return ret;
}

static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
904 905 906
		if (!(safexcel_algs[i]->engines & priv->version))
			continue;

907 908
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
909 910
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
911 912 913 914 915 916 917
		else
			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
	}
}

static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
918
	u32 val, mask = 0;
919

920
	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
921 922 923 924

	/* Read number of PEs from the engine */
	switch (priv->version) {
	case EIP197B:
925
	case EIP197D:
926 927 928 929 930 931 932
		mask = EIP197_N_PES_MASK;
		break;
	default:
		mask = EIP97_N_PES_MASK;
	}
	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;

933 934 935
	val = (val & GENMASK(27, 25)) >> 25;
	mask = BIT(val) - 1;

936
	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
937 938 939 940 941 942 943 944 945
	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);

	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;

	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}

946 947 948 949
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
	struct safexcel_register_offsets *offsets = &priv->offsets;

950 951 952
	switch (priv->version) {
	case EIP197B:
	case EIP197D:
953 954 955 956 957 958 959 960 961 962
		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP197_PE_BASE;
963 964
		break;
	case EIP97IES:
965 966 967 968 969 970 971 972 973 974
		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP97_PE_BASE;
975
		break;
976 977 978
	}
}

979 980 981 982 983 984 985 986 987 988 989 990
static int safexcel_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct resource *res;
	struct safexcel_crypto_priv *priv;
	int i, ret;

	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->dev = dev;
991 992
	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);

993
	if (priv->version == EIP197B || priv->version == EIP197D)
994 995
		priv->flags |= EIP197_TRC_CACHE;

996
	safexcel_init_register_offsets(priv);
997 998 999 1000 1001 1002 1003 1004

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	priv->base = devm_ioremap_resource(dev, res);
	if (IS_ERR(priv->base)) {
		dev_err(dev, "failed to get resource\n");
		return PTR_ERR(priv->base);
	}

1005
	priv->clk = devm_clk_get(&pdev->dev, NULL);
1006 1007 1008 1009 1010 1011
	ret = PTR_ERR_OR_ZERO(priv->clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			return ret;

1012 1013 1014 1015 1016 1017 1018
		ret = clk_prepare_enable(priv->clk);
		if (ret) {
			dev_err(dev, "unable to enable clk (%d)\n", ret);
			return ret;
		}
	}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			goto err_core_clk;

		ret = clk_prepare_enable(priv->reg_clk);
		if (ret) {
			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
			goto err_core_clk;
		}
	}

1033
	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1034
	if (ret)
1035
		goto err_reg_clk;
1036 1037 1038 1039 1040 1041

	priv->context_pool = dmam_pool_create("safexcel-context", dev,
					      sizeof(struct safexcel_context_record),
					      1, 0);
	if (!priv->context_pool) {
		ret = -ENOMEM;
1042
		goto err_reg_clk;
1043 1044 1045 1046
	}

	safexcel_configure(priv);

1047 1048 1049 1050 1051 1052 1053
	priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
				  GFP_KERNEL);
	if (!priv->ring) {
		ret = -ENOMEM;
		goto err_reg_clk;
	}

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	for (i = 0; i < priv->config.rings; i++) {
		char irq_name[6] = {0}; /* "ringX\0" */
		char wq_name[9] = {0}; /* "wq_ringX\0" */
		int irq;
		struct safexcel_ring_irq_data *ring_irq;

		ret = safexcel_init_ring_descriptors(priv,
						     &priv->ring[i].cdr,
						     &priv->ring[i].rdr);
		if (ret)
1064
			goto err_reg_clk;
1065

1066 1067 1068 1069 1070 1071 1072 1073
		priv->ring[i].rdr_req = devm_kzalloc(dev,
			sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
			GFP_KERNEL);
		if (!priv->ring[i].rdr_req) {
			ret = -ENOMEM;
			goto err_reg_clk;
		}

1074 1075 1076
		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
		if (!ring_irq) {
			ret = -ENOMEM;
1077
			goto err_reg_clk;
1078 1079 1080 1081 1082 1083 1084
		}

		ring_irq->priv = priv;
		ring_irq->ring = i;

		snprintf(irq_name, 6, "ring%d", i);
		irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
1085
						safexcel_irq_ring_thread,
1086
						ring_irq);
1087 1088
		if (irq < 0) {
			ret = irq;
1089
			goto err_reg_clk;
1090
		}
1091 1092 1093

		priv->ring[i].work_data.priv = priv;
		priv->ring[i].work_data.ring = i;
1094
		INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
1095 1096 1097 1098 1099

		snprintf(wq_name, 9, "wq_ring%d", i);
		priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
		if (!priv->ring[i].workqueue) {
			ret = -ENOMEM;
1100
			goto err_reg_clk;
1101 1102
		}

1103
		priv->ring[i].requests = 0;
1104 1105
		priv->ring[i].busy = false;

1106 1107 1108
		crypto_init_queue(&priv->ring[i].queue,
				  EIP197_DEFAULT_RING_SIZE);

1109
		spin_lock_init(&priv->ring[i].lock);
1110
		spin_lock_init(&priv->ring[i].queue_lock);
1111 1112 1113 1114 1115 1116 1117 1118
	}

	platform_set_drvdata(pdev, priv);
	atomic_set(&priv->ring_used, 0);

	ret = safexcel_hw_init(priv);
	if (ret) {
		dev_err(dev, "EIP h/w init failed (%d)\n", ret);
1119
		goto err_reg_clk;
1120 1121 1122 1123 1124
	}

	ret = safexcel_register_algorithms(priv);
	if (ret) {
		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1125
		goto err_reg_clk;
1126 1127 1128 1129
	}

	return 0;

1130 1131 1132
err_reg_clk:
	clk_disable_unprepare(priv->reg_clk);
err_core_clk:
1133 1134 1135 1136
	clk_disable_unprepare(priv->clk);
	return ret;
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < priv->config.rings; i++) {
		/* clear any pending interrupt */
		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);

		/* Reset the CDR base address */
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);

		/* Reset the RDR base address */
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
	}
}
1155 1156 1157 1158 1159 1160 1161

static int safexcel_remove(struct platform_device *pdev)
{
	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
	int i;

	safexcel_unregister_algorithms(priv);
1162 1163
	safexcel_hw_reset_rings(priv);

1164 1165 1166 1167 1168 1169 1170 1171 1172
	clk_disable_unprepare(priv->clk);

	for (i = 0; i < priv->config.rings; i++)
		destroy_workqueue(priv->ring[i].workqueue);

	return 0;
}

static const struct of_device_id safexcel_of_match_table[] = {
1173
	{
1174 1175 1176 1177 1178 1179 1180
		.compatible = "inside-secure,safexcel-eip97ies",
		.data = (void *)EIP97IES,
	},
	{
		.compatible = "inside-secure,safexcel-eip197b",
		.data = (void *)EIP197B,
	},
1181 1182 1183 1184
	{
		.compatible = "inside-secure,safexcel-eip197d",
		.data = (void *)EIP197D,
	},
1185 1186
	{
		/* Deprecated. Kept for backward compatibility. */
1187
		.compatible = "inside-secure,safexcel-eip97",
1188
		.data = (void *)EIP97IES,
1189 1190
	},
	{
1191
		/* Deprecated. Kept for backward compatibility. */
1192
		.compatible = "inside-secure,safexcel-eip197",
1193
		.data = (void *)EIP197B,
1194
	},
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	{},
};


static struct platform_driver  crypto_safexcel = {
	.probe		= safexcel_probe,
	.remove		= safexcel_remove,
	.driver		= {
		.name	= "crypto-safexcel",
		.of_match_table = safexcel_of_match_table,
	},
};
module_platform_driver(crypto_safexcel);

MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
MODULE_LICENSE("GPL v2");