safexcel.c 33.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright (C) 2017 Marvell
 *
 * Antoine Tenart <antoine.tenart@free-electrons.com>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2. This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 */

#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>

23
#include <crypto/internal/aead.h>
24 25 26 27 28 29 30 31 32 33 34 35
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>

#include "safexcel.h"

static u32 max_rings = EIP197_MAX_RINGS;
module_param(max_rings, uint, 0644);
MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");

static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
	u32 val, htable_offset;
36 37 38 39 40 41 42 43 44 45 46 47 48
	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;

	if (priv->version == EIP197B) {
		cs_rc_max = EIP197B_CS_RC_MAX;
		cs_ht_wc = EIP197B_CS_HT_WC;
		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
	} else {
		cs_rc_max = EIP197D_CS_RC_MAX;
		cs_ht_wc = EIP197D_CS_HT_WC;
		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
	}
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

	/* Enable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	val |= EIP197_TRC_ENABLE_0;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Clear all ECC errors */
	writel(0, priv->base + EIP197_TRC_ECCCTRL);

	/*
	 * Make sure the cache memory is accessible by taking record cache into
	 * reset.
	 */
	val = readl(priv->base + EIP197_TRC_PARAMS);
	val |= EIP197_TRC_PARAMS_SW_RESET;
	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
	writel(val, priv->base + EIP197_TRC_PARAMS);

	/* Clear all records */
69
	for (i = 0; i < cs_rc_max; i++) {
70 71 72 73 74 75 76 77 78
		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;

		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
		       priv->base + offset);

		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
		if (i == 0)
			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
79
		else if (i == cs_rc_max - 1)
80 81 82 83 84
			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
		writel(val, priv->base + offset + sizeof(u32));
	}

	/* Clear the hash table entries */
85 86
	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
	for (i = 0; i < cs_ht_wc; i++)
87 88 89 90 91 92 93 94 95 96
		writel(GENMASK(29, 0),
		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));

	/* Disable the record cache memory access */
	val = readl(priv->base + EIP197_CS_RAM_CTRL);
	val &= ~EIP197_TRC_ENABLE_MASK;
	writel(val, priv->base + EIP197_CS_RAM_CTRL);

	/* Write head and tail pointers of the record free chain */
	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
97
	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
98 99 100
	writel(val, priv->base + EIP197_TRC_FREECHAIN);

	/* Configure the record cache #1 */
101 102
	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
103 104 105
	writel(val, priv->base + EIP197_TRC_PARAMS2);

	/* Configure the record cache #2 */
106
	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
107 108 109 110 111 112
	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
	writel(val, priv->base + EIP197_TRC_PARAMS);
}

static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
113
				  const struct firmware *fw, int pe, u32 ctrl,
114 115 116 117 118 119 120 121 122 123
				  u32 prog_en)
{
	const u32 *data = (const u32 *)fw->data;
	u32 val;
	int i;

	/* Reset the engine to make its program memory accessible */
	writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
	       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
	       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
124
	       EIP197_PE(priv) + ctrl);
125 126

	/* Enable access to the program memory */
127
	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
128 129 130 131 132 133 134

	/* Write the firmware */
	for (i = 0; i < fw->size / sizeof(u32); i++)
		writel(be32_to_cpu(data[i]),
		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));

	/* Disable access to the program memory */
135
	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
136 137

	/* Release engine from reset */
138
	val = readl(EIP197_PE(priv) + ctrl);
139
	val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
140
	writel(val, EIP197_PE(priv) + ctrl);
141 142 143 144 145 146
}

static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
	const struct firmware *fw[FW_NB];
147
	char fw_path[31], *dir = NULL;
148
	int i, j, ret = 0, pe;
149 150
	u32 val;

151 152 153 154 155 156 157 158 159 160 161 162
	switch (priv->version) {
	case EIP197B:
		dir = "eip197b";
		break;
	case EIP197D:
		dir = "eip197d";
		break;
	default:
		/* No firmware is required */
		return 0;
	}

163
	for (i = 0; i < FW_NB; i++) {
164
		snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
165
		ret = request_firmware(&fw[i], fw_path, priv->dev);
166
		if (ret) {
167 168 169 170 171 172
			if (priv->version != EIP197B)
				goto release_fw;

			/* Fallback to the old firmware location for the
			 * EIP197b.
			 */
173 174 175 176 177 178 179
			ret = request_firmware(&fw[i], fw_name[i], priv->dev);
			if (ret) {
				dev_err(priv->dev,
					"Failed to request firmware %s (%d)\n",
					fw_name[i], ret);
				goto release_fw;
			}
180
		}
181
	}
182

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Clear the scratchpad memory */
		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));

		memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
			  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));

		eip197_write_firmware(priv, fw[FW_IFPP], pe,
				      EIP197_PE_ICE_FPP_CTRL(pe),
				      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);

		eip197_write_firmware(priv, fw[FW_IPUE], pe,
				      EIP197_PE_ICE_PUE_CTRL(pe),
				      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
	}
203 204 205 206 207 208 209 210 211 212 213 214 215

release_fw:
	for (j = 0; j < i; j++)
		release_firmware(fw[j]);

	return ret;
}

static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, cd_size_rnd, val;
	int i;

216
	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
217 218 219 220 221 222 223 224
	hdw &= GENMASK(27, 25);
	hdw >>= 25;

	cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
225
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
226
		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
227
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
228 229 230

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
		       priv->config.cd_size,
231
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
232 233
		writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
		       (EIP197_FETCH_COUNT * priv->config.cd_offset),
234
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
235 236 237 238

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
239
		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
240 241 242

		/* clear any pending interrupt */
		writel(GENMASK(5, 0),
243
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
244 245 246 247 248 249 250 251 252 253
	}

	return 0;
}

static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
{
	u32 hdw, rd_size_rnd, val;
	int i;

254
	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
255 256 257 258 259 260 261 262
	hdw &= GENMASK(27, 25);
	hdw >>= 25;

	rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;

	for (i = 0; i < priv->config.rings; i++) {
		/* ring base address */
		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
263
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
264
		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
265
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
266 267 268

		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
		       priv->config.rd_size,
269
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
270 271 272

		writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
		       (EIP197_FETCH_COUNT * priv->config.rd_offset),
273
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
274 275 276 277

		/* Configure DMA tx control */
		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
278
		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
279
		writel(val,
280
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
281 282 283

		/* clear any pending interrupt */
		writel(GENMASK(7, 0),
284
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
285 286

		/* enable ring interrupt */
287
		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
288
		val |= EIP197_RDR_IRQ(i);
289
		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
290 291 292 293 294 295 296 297
	}

	return 0;
}

static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
	u32 version, val;
298
	int i, ret, pe;
299 300

	/* Determine endianess and configure byte swap */
301 302
	version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
	val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
303 304 305 306 307 308

	if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
		val |= EIP197_MST_CTRL_BYTE_SWAP;
	else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
		val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);

309 310 311 312
	/* For EIP197 set maximum number of TX commands to 2^5 = 32 */
	if (priv->version == EIP197B || priv->version == EIP197D)
		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);

313
	writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
314 315 316 317

	/* Configure wr/rd cache values */
	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
318
	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
319 320 321 322

	/* Interrupts reset */

	/* Disable all global interrupts */
323
	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
324 325

	/* Clear any pending interrupt */
326
	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
327

328 329 330
	/* Processing Engine configuration */
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Data Fetch Engine configuration */
331

332 333 334
		/* Reset all DFE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
335

336
		if (priv->version == EIP197B || priv->version == EIP197D) {
337 338 339 340
			/* Reset HIA input interface arbiter */
			writel(EIP197_HIA_RA_PE_CTRL_RESET,
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
		}
341

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
		/* DMA transfer size to use */
		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));

		/* Leave the DFE threads reset state */
		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));

		/* Configure the processing engine thresholds */
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(9),
		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
		       EIP197_PE_IN_xBUF_THRES_MAX(7),
		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));

363
		if (priv->version == EIP197B || priv->version == EIP197D) {
364 365 366 367 368
			/* enable HIA input interface arbiter and rings */
			writel(EIP197_HIA_RA_PE_CTRL_EN |
			       GENMASK(priv->config.rings - 1, 0),
			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
		}
369

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
		/* Data Store Engine configuration */

		/* Reset all DSE threads */
		writel(EIP197_DxE_THR_CTRL_RESET_PE,
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Wait for all DSE threads to complete */
		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
			GENMASK(15, 12)) != GENMASK(15, 12))
			;

		/* DMA transfer size to use */
		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
		/* FIXME: instability issues can occur for EIP97 but disabling it impact
		 * performances.
		 */
390
		if (priv->version == EIP197B || priv->version == EIP197D)
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));

		/* Leave the DSE threads reset state */
		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));

		/* Configure the procesing engine thresholds */
		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));

		/* Processing Engine configuration */

		/* H/W capabilities selection */
		val = EIP197_FUNCTION_RSVD;
		val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
		val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
408
		val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
409
		val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
410
		val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
411
		val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
412 413 414
		val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
		val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
415
	}
416 417 418 419 420

	/* Command Descriptor Rings prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Clear interrupts for this ring */
		writel(GENMASK(31, 0),
421
		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
422 423

		/* Disable external triggering */
424
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
425 426 427

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
428
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
429 430 431

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
432
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
433 434

		writel(0,
435
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
436
		writel(0,
437
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
438 439

		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
440
		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
441 442 443 444 445
	}

	/* Result Descriptor Ring prepare */
	for (i = 0; i < priv->config.rings; i++) {
		/* Disable external triggering*/
446
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
447 448 449

		/* Clear the pending prepared counter */
		writel(EIP197_xDR_PREP_CLR_COUNT,
450
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
451 452 453

		/* Clear the pending processed counter */
		writel(EIP197_xDR_PROC_CLR_COUNT,
454
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
455 456

		writel(0,
457
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
458
		writel(0,
459
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
460 461 462

		/* Ring size */
		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
463
		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
464 465
	}

466 467 468 469
	for (pe = 0; pe < priv->config.pes; pe++) {
		/* Enable command descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
470

471 472 473 474
		/* Enable result descriptor rings */
		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
	}
475 476

	/* Clear any HIA interrupt */
477
	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
478

479
	if (priv->version == EIP197B || priv->version == EIP197D) {
480
		eip197_trc_cache_init(priv);
481

482 483 484 485
		ret = eip197_load_firmwares(priv);
		if (ret)
			return ret;
	}
486 487 488 489 490 491 492

	safexcel_hw_setup_cdesc_rings(priv);
	safexcel_hw_setup_rdesc_rings(priv);

	return 0;
}

493
/* Called with ring's lock taken */
494 495
static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
				       int ring)
496
{
497
	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
498 499

	if (!coal)
500
		return;
501 502 503 504

	/* Configure when we want an interrupt */
	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
505
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
506 507
}

508
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
509 510 511
{
	struct crypto_async_request *req, *backlog;
	struct safexcel_context *ctx;
512
	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
513

514 515 516 517 518 519 520 521
	/* If a request wasn't properly dequeued because of a lack of resources,
	 * proceeded it first,
	 */
	req = priv->ring[ring].req;
	backlog = priv->ring[ring].backlog;
	if (req)
		goto handle_req;

522
	while (true) {
523 524
		spin_lock_bh(&priv->ring[ring].queue_lock);
		backlog = crypto_get_backlog(&priv->ring[ring].queue);
525
		req = crypto_dequeue_request(&priv->ring[ring].queue);
526
		spin_unlock_bh(&priv->ring[ring].queue_lock);
527

528 529 530
		if (!req) {
			priv->ring[ring].req = NULL;
			priv->ring[ring].backlog = NULL;
531
			goto finalize;
532
		}
533

534
handle_req:
535
		ctx = crypto_tfm_ctx(req->tfm);
536 537
		ret = ctx->send(req, ring, &commands, &results);
		if (ret)
538
			goto request_failed;
539 540 541 542

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

543 544 545 546
		/* In case the send() helper did not issue any command to push
		 * to the engine because the input data was cached, continue to
		 * dequeue other requests as this is valid and not an error.
		 */
547
		if (!commands && !results)
548
			continue;
549

550 551
		cdesc += commands;
		rdesc += results;
552 553
		nreq++;
	}
554

555 556 557 558 559 560 561
request_failed:
	/* Not enough resources to handle all the requests. Bail out and save
	 * the request and the backlog for the next dequeue call (per-ring).
	 */
	priv->ring[ring].req = req;
	priv->ring[ring].backlog = backlog;

562
finalize:
563
	if (!nreq)
564 565
		return;

566
	spin_lock_bh(&priv->ring[ring].lock);
567

568 569
	priv->ring[ring].requests += nreq;

570
	if (!priv->ring[ring].busy) {
571
		safexcel_try_push_requests(priv, ring);
572
		priv->ring[ring].busy = true;
573 574
	}

575
	spin_unlock_bh(&priv->ring[ring].lock);
576

577 578
	/* let the RDR know we have pending descriptors */
	writel((rdesc * priv->config.rd_offset) << 2,
579
	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
580

581 582
	/* let the CDR know we have pending descriptors */
	writel((cdesc * priv->config.cd_offset) << 2,
583
	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
584 585
}

586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
				       struct safexcel_result_desc *rdesc)
{
	if (likely(!rdesc->result_data.error_code))
		return 0;

	if (rdesc->result_data.error_code & 0x407f) {
		/* Fatal error (bits 0-7, 14) */
		dev_err(priv->dev,
			"cipher: result: result descriptor error (%d)\n",
			rdesc->result_data.error_code);
		return -EIO;
	} else if (rdesc->result_data.error_code == BIT(9)) {
		/* Authentication failed */
		return -EBADMSG;
	}

	/* All other non-fatal errors */
	return -EINVAL;
}

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
				 int ring,
				 struct safexcel_result_desc *rdesc,
				 struct crypto_async_request *req)
{
	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);

	priv->ring[ring].rdr_req[i] = req;
}

inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
{
	int i = safexcel_ring_first_rdr_index(priv, ring);

	return priv->ring[ring].rdr_req[i];
}

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
	struct safexcel_command_desc *cdesc;

	/* Acknowledge the command descriptors */
	do {
		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
		if (IS_ERR(cdesc)) {
			dev_err(priv->dev,
				"Could not retrieve the command descriptor\n");
			return;
		}
	} while (!cdesc->last_seg);
}

void safexcel_inv_complete(struct crypto_async_request *req, int error)
{
	struct safexcel_inv_result *result = req->data;

	if (error == -EINPROGRESS)
		return;

	result->error = error;
	complete(&result->completion);
}

int safexcel_invalidate_cache(struct crypto_async_request *async,
			      struct safexcel_crypto_priv *priv,
653
			      dma_addr_t ctxr_dma, int ring)
654 655 656 657 658 659 660
{
	struct safexcel_command_desc *cdesc;
	struct safexcel_result_desc *rdesc;
	int ret = 0;

	/* Prepare command descriptor */
	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
661 662
	if (IS_ERR(cdesc))
		return PTR_ERR(cdesc);
663 664 665 666 667 668 669 670 671 672 673 674 675 676

	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
	cdesc->control_data.options = 0;
	cdesc->control_data.refresh = 0;
	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;

	/* Prepare result descriptor */
	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);

	if (IS_ERR(rdesc)) {
		ret = PTR_ERR(rdesc);
		goto cdesc_rollback;
	}

677 678 679
	safexcel_rdr_req_set(priv, ring, rdesc, async);

	return ret;
680 681 682 683 684 685 686 687 688 689

cdesc_rollback:
	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);

	return ret;
}

static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
						     int ring)
{
690
	struct crypto_async_request *req;
691
	struct safexcel_context *ctx;
692
	int ret, i, nreq, ndesc, tot_descs, handled = 0;
693 694
	bool should_complete;

695 696 697
handle_results:
	tot_descs = 0;

698
	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
699 700
	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
701
	if (!nreq)
702
		goto requests_left;
703 704

	for (i = 0; i < nreq; i++) {
705 706 707 708
		req = safexcel_rdr_req_get(priv, ring);

		ctx = crypto_tfm_ctx(req->tfm);
		ndesc = ctx->handle_result(priv, ring, req,
709 710 711
					   &should_complete, &ret);
		if (ndesc < 0) {
			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
712
			goto acknowledge;
713 714 715 716
		}

		if (should_complete) {
			local_bh_disable();
717
			req->complete(req, ret);
718 719 720
			local_bh_enable();
		}

721
		tot_descs += ndesc;
722
		handled++;
723 724 725 726 727 728
	}

acknowledge:
	if (i) {
		writel(EIP197_xDR_PROC_xD_PKT(i) |
		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
729
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
730
	}
731

732 733 734 735 736 737
	/* If the number of requests overflowed the counter, try to proceed more
	 * requests.
	 */
	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
		goto handle_results;

738
requests_left:
739
	spin_lock_bh(&priv->ring[ring].lock);
740

741 742
	priv->ring[ring].requests -= handled;
	safexcel_try_push_requests(priv, ring);
743

744
	if (!priv->ring[ring].requests)
745 746
		priv->ring[ring].busy = false;

747
	spin_unlock_bh(&priv->ring[ring].lock);
748 749
}

750 751 752 753
static void safexcel_dequeue_work(struct work_struct *work)
{
	struct safexcel_work_data *data =
			container_of(work, struct safexcel_work_data, work);
754

755
	safexcel_dequeue(data->priv, data->ring);
756 757 758 759 760 761 762 763 764 765 766
}

struct safexcel_ring_irq_data {
	struct safexcel_crypto_priv *priv;
	int ring;
};

static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
767
	int ring = irq_data->ring, rc = IRQ_NONE;
768 769
	u32 status, stat;

770
	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
771
	if (!status)
772
		return rc;
773 774 775

	/* RDR interrupts */
	if (status & EIP197_RDR_IRQ(ring)) {
776
		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
777 778 779 780 781 782 783 784 785

		if (unlikely(stat & EIP197_xDR_ERR)) {
			/*
			 * Fatal error, the RDR is unusable and must be
			 * reinitialized. This should not happen under
			 * normal circumstances.
			 */
			dev_err(priv->dev, "RDR: fatal error.");
		} else if (likely(stat & EIP197_xDR_THRESH)) {
786
			rc = IRQ_WAKE_THREAD;
787 788 789 790
		}

		/* ACK the interrupts */
		writel(stat & 0xff,
791
		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
792 793 794
	}

	/* ACK the interrupts */
795
	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
796

797 798 799 800 801 802 803 804 805 806 807 808 809 810
	return rc;
}

static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
{
	struct safexcel_ring_irq_data *irq_data = data;
	struct safexcel_crypto_priv *priv = irq_data->priv;
	int ring = irq_data->ring;

	safexcel_handle_result_descriptor(priv, ring);

	queue_work(priv->ring[ring].workqueue,
		   &priv->ring[ring].work_data.work);

811 812 813 814 815
	return IRQ_HANDLED;
}

static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
				     irq_handler_t handler,
816
				     irq_handler_t threaded_handler,
817 818 819 820 821 822 823 824 825
				     struct safexcel_ring_irq_data *ring_irq_priv)
{
	int ret, irq = platform_get_irq_byname(pdev, name);

	if (irq < 0) {
		dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
		return irq;
	}

826 827 828
	ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
					threaded_handler, IRQF_ONESHOT,
					dev_name(&pdev->dev), ring_irq_priv);
829 830 831 832 833 834 835 836 837
	if (ret) {
		dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
		return ret;
	}

	return irq;
}

static struct safexcel_alg_template *safexcel_algs[] = {
838 839
	&safexcel_alg_ecb_des,
	&safexcel_alg_cbc_des,
840 841
	&safexcel_alg_ecb_des3_ede,
	&safexcel_alg_cbc_des3_ede,
842 843
	&safexcel_alg_ecb_aes,
	&safexcel_alg_cbc_aes,
844
	&safexcel_alg_md5,
845 846 847
	&safexcel_alg_sha1,
	&safexcel_alg_sha224,
	&safexcel_alg_sha256,
848
	&safexcel_alg_sha384,
849
	&safexcel_alg_sha512,
850
	&safexcel_alg_hmac_md5,
851
	&safexcel_alg_hmac_sha1,
852
	&safexcel_alg_hmac_sha224,
853
	&safexcel_alg_hmac_sha256,
854
	&safexcel_alg_hmac_sha384,
855
	&safexcel_alg_hmac_sha512,
856
	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
857
	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
858
	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
859
	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
860
	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
861 862 863 864 865 866 867 868 869
};

static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
{
	int i, j, ret = 0;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
		safexcel_algs[i]->priv = priv;

870 871 872
		if (!(safexcel_algs[i]->engines & priv->version))
			continue;

873 874
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
875 876
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
877 878 879 880 881 882 883 884 885 886 887
		else
			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);

		if (ret)
			goto fail;
	}

	return 0;

fail:
	for (j = 0; j < i; j++) {
888 889 890
		if (!(safexcel_algs[j]->engines & priv->version))
			continue;

891 892
		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
893 894
		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
895 896 897 898 899 900 901 902 903 904 905 906
		else
			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
	}

	return ret;
}

static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
907 908 909
		if (!(safexcel_algs[i]->engines & priv->version))
			continue;

910 911
		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
912 913
		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
914 915 916 917 918 919 920
		else
			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
	}
}

static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
921
	u32 val, mask = 0;
922

923
	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
924 925 926 927

	/* Read number of PEs from the engine */
	switch (priv->version) {
	case EIP197B:
928
	case EIP197D:
929 930 931 932 933 934 935
		mask = EIP197_N_PES_MASK;
		break;
	default:
		mask = EIP97_N_PES_MASK;
	}
	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;

936 937 938
	val = (val & GENMASK(27, 25)) >> 25;
	mask = BIT(val) - 1;

939
	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
940 941 942 943 944 945 946 947 948
	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);

	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;

	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}

949 950 951 952
static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
	struct safexcel_register_offsets *offsets = &priv->offsets;

953 954 955
	switch (priv->version) {
	case EIP197B:
	case EIP197D:
956 957 958 959 960 961 962 963 964 965
		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP197_PE_BASE;
966 967
		break;
	case EIP97IES:
968 969 970 971 972 973 974 975 976 977
		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
		offsets->pe		= EIP97_PE_BASE;
978
		break;
979 980 981
	}
}

982 983 984 985 986 987 988 989 990 991 992 993
static int safexcel_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct resource *res;
	struct safexcel_crypto_priv *priv;
	int i, ret;

	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->dev = dev;
994 995
	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);

996
	if (priv->version == EIP197B || priv->version == EIP197D)
997 998
		priv->flags |= EIP197_TRC_CACHE;

999
	safexcel_init_register_offsets(priv);
1000 1001 1002 1003 1004 1005 1006 1007

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	priv->base = devm_ioremap_resource(dev, res);
	if (IS_ERR(priv->base)) {
		dev_err(dev, "failed to get resource\n");
		return PTR_ERR(priv->base);
	}

1008
	priv->clk = devm_clk_get(&pdev->dev, NULL);
1009 1010 1011 1012 1013 1014
	ret = PTR_ERR_OR_ZERO(priv->clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			return ret;

1015 1016 1017 1018 1019 1020 1021
		ret = clk_prepare_enable(priv->clk);
		if (ret) {
			dev_err(dev, "unable to enable clk (%d)\n", ret);
			return ret;
		}
	}

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
	/* The clock isn't mandatory */
	if  (ret != -ENOENT) {
		if (ret)
			goto err_core_clk;

		ret = clk_prepare_enable(priv->reg_clk);
		if (ret) {
			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
			goto err_core_clk;
		}
	}

1036
	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1037
	if (ret)
1038
		goto err_reg_clk;
1039 1040 1041 1042 1043 1044

	priv->context_pool = dmam_pool_create("safexcel-context", dev,
					      sizeof(struct safexcel_context_record),
					      1, 0);
	if (!priv->context_pool) {
		ret = -ENOMEM;
1045
		goto err_reg_clk;
1046 1047 1048 1049
	}

	safexcel_configure(priv);

1050 1051 1052 1053 1054 1055 1056
	priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
				  GFP_KERNEL);
	if (!priv->ring) {
		ret = -ENOMEM;
		goto err_reg_clk;
	}

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	for (i = 0; i < priv->config.rings; i++) {
		char irq_name[6] = {0}; /* "ringX\0" */
		char wq_name[9] = {0}; /* "wq_ringX\0" */
		int irq;
		struct safexcel_ring_irq_data *ring_irq;

		ret = safexcel_init_ring_descriptors(priv,
						     &priv->ring[i].cdr,
						     &priv->ring[i].rdr);
		if (ret)
1067
			goto err_reg_clk;
1068

1069 1070 1071 1072 1073 1074 1075 1076
		priv->ring[i].rdr_req = devm_kzalloc(dev,
			sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
			GFP_KERNEL);
		if (!priv->ring[i].rdr_req) {
			ret = -ENOMEM;
			goto err_reg_clk;
		}

1077 1078 1079
		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
		if (!ring_irq) {
			ret = -ENOMEM;
1080
			goto err_reg_clk;
1081 1082 1083 1084 1085 1086 1087
		}

		ring_irq->priv = priv;
		ring_irq->ring = i;

		snprintf(irq_name, 6, "ring%d", i);
		irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
1088
						safexcel_irq_ring_thread,
1089
						ring_irq);
1090 1091
		if (irq < 0) {
			ret = irq;
1092
			goto err_reg_clk;
1093
		}
1094 1095 1096

		priv->ring[i].work_data.priv = priv;
		priv->ring[i].work_data.ring = i;
1097
		INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
1098 1099 1100 1101 1102

		snprintf(wq_name, 9, "wq_ring%d", i);
		priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
		if (!priv->ring[i].workqueue) {
			ret = -ENOMEM;
1103
			goto err_reg_clk;
1104 1105
		}

1106
		priv->ring[i].requests = 0;
1107 1108
		priv->ring[i].busy = false;

1109 1110 1111
		crypto_init_queue(&priv->ring[i].queue,
				  EIP197_DEFAULT_RING_SIZE);

1112
		spin_lock_init(&priv->ring[i].lock);
1113
		spin_lock_init(&priv->ring[i].queue_lock);
1114 1115 1116 1117 1118 1119 1120 1121
	}

	platform_set_drvdata(pdev, priv);
	atomic_set(&priv->ring_used, 0);

	ret = safexcel_hw_init(priv);
	if (ret) {
		dev_err(dev, "EIP h/w init failed (%d)\n", ret);
1122
		goto err_reg_clk;
1123 1124 1125 1126 1127
	}

	ret = safexcel_register_algorithms(priv);
	if (ret) {
		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1128
		goto err_reg_clk;
1129 1130 1131 1132
	}

	return 0;

1133 1134 1135
err_reg_clk:
	clk_disable_unprepare(priv->reg_clk);
err_core_clk:
1136 1137 1138 1139
	clk_disable_unprepare(priv->clk);
	return ret;
}

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
{
	int i;

	for (i = 0; i < priv->config.rings; i++) {
		/* clear any pending interrupt */
		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);

		/* Reset the CDR base address */
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);

		/* Reset the RDR base address */
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
	}
}
1158 1159 1160 1161 1162 1163 1164

static int safexcel_remove(struct platform_device *pdev)
{
	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
	int i;

	safexcel_unregister_algorithms(priv);
1165 1166
	safexcel_hw_reset_rings(priv);

1167 1168 1169 1170 1171 1172 1173 1174 1175
	clk_disable_unprepare(priv->clk);

	for (i = 0; i < priv->config.rings; i++)
		destroy_workqueue(priv->ring[i].workqueue);

	return 0;
}

static const struct of_device_id safexcel_of_match_table[] = {
1176
	{
1177 1178 1179 1180 1181 1182 1183
		.compatible = "inside-secure,safexcel-eip97ies",
		.data = (void *)EIP97IES,
	},
	{
		.compatible = "inside-secure,safexcel-eip197b",
		.data = (void *)EIP197B,
	},
1184 1185 1186 1187
	{
		.compatible = "inside-secure,safexcel-eip197d",
		.data = (void *)EIP197D,
	},
1188 1189
	{
		/* Deprecated. Kept for backward compatibility. */
1190
		.compatible = "inside-secure,safexcel-eip97",
1191
		.data = (void *)EIP97IES,
1192 1193
	},
	{
1194
		/* Deprecated. Kept for backward compatibility. */
1195
		.compatible = "inside-secure,safexcel-eip197",
1196
		.data = (void *)EIP197B,
1197
	},
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	{},
};


static struct platform_driver  crypto_safexcel = {
	.probe		= safexcel_probe,
	.remove		= safexcel_remove,
	.driver		= {
		.name	= "crypto-safexcel",
		.of_match_table = safexcel_of_match_table,
	},
};
module_platform_driver(crypto_safexcel);

MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
MODULE_LICENSE("GPL v2");