s5p-sss.c 22.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Cryptographic API.
 *
 * Support for Samsung S5PV210 HW acceleration.
 *
 * Copyright (C) 2011 NetUP Inc. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 */

14 15 16
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
17 18
#include <linux/err.h>
#include <linux/errno.h>
19 20 21
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
22
#include <linux/kernel.h>
23 24
#include <linux/module.h>
#include <linux/of.h>
25 26 27 28
#include <linux/platform_device.h>
#include <linux/scatterlist.h>

#include <crypto/ctr.h>
29 30
#include <crypto/aes.h>
#include <crypto/algapi.h>
31
#include <crypto/scatterwalk.h>
32

33
#define _SBF(s, v)			((v) << (s))
34 35

/* Feed control registers */
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
#define SSS_REG_FCINTSTAT		0x0000
#define SSS_FCINTSTAT_BRDMAINT		BIT(3)
#define SSS_FCINTSTAT_BTDMAINT		BIT(2)
#define SSS_FCINTSTAT_HRDMAINT		BIT(1)
#define SSS_FCINTSTAT_PKDMAINT		BIT(0)

#define SSS_REG_FCINTENSET		0x0004
#define SSS_FCINTENSET_BRDMAINTENSET	BIT(3)
#define SSS_FCINTENSET_BTDMAINTENSET	BIT(2)
#define SSS_FCINTENSET_HRDMAINTENSET	BIT(1)
#define SSS_FCINTENSET_PKDMAINTENSET	BIT(0)

#define SSS_REG_FCINTENCLR		0x0008
#define SSS_FCINTENCLR_BRDMAINTENCLR	BIT(3)
#define SSS_FCINTENCLR_BTDMAINTENCLR	BIT(2)
#define SSS_FCINTENCLR_HRDMAINTENCLR	BIT(1)
#define SSS_FCINTENCLR_PKDMAINTENCLR	BIT(0)

#define SSS_REG_FCINTPEND		0x000C
#define SSS_FCINTPEND_BRDMAINTP		BIT(3)
#define SSS_FCINTPEND_BTDMAINTP		BIT(2)
#define SSS_FCINTPEND_HRDMAINTP		BIT(1)
#define SSS_FCINTPEND_PKDMAINTP		BIT(0)

#define SSS_REG_FCFIFOSTAT		0x0010
#define SSS_FCFIFOSTAT_BRFIFOFUL	BIT(7)
#define SSS_FCFIFOSTAT_BRFIFOEMP	BIT(6)
#define SSS_FCFIFOSTAT_BTFIFOFUL	BIT(5)
#define SSS_FCFIFOSTAT_BTFIFOEMP	BIT(4)
#define SSS_FCFIFOSTAT_HRFIFOFUL	BIT(3)
#define SSS_FCFIFOSTAT_HRFIFOEMP	BIT(2)
#define SSS_FCFIFOSTAT_PKFIFOFUL	BIT(1)
#define SSS_FCFIFOSTAT_PKFIFOEMP	BIT(0)

#define SSS_REG_FCFIFOCTRL		0x0014
#define SSS_FCFIFOCTRL_DESSEL		BIT(2)
#define SSS_HASHIN_INDEPENDENT		_SBF(0, 0x00)
#define SSS_HASHIN_CIPHER_INPUT		_SBF(0, 0x01)
#define SSS_HASHIN_CIPHER_OUTPUT	_SBF(0, 0x02)

#define SSS_REG_FCBRDMAS		0x0020
#define SSS_REG_FCBRDMAL		0x0024
#define SSS_REG_FCBRDMAC		0x0028
#define SSS_FCBRDMAC_BYTESWAP		BIT(1)
#define SSS_FCBRDMAC_FLUSH		BIT(0)

#define SSS_REG_FCBTDMAS		0x0030
#define SSS_REG_FCBTDMAL		0x0034
#define SSS_REG_FCBTDMAC		0x0038
#define SSS_FCBTDMAC_BYTESWAP		BIT(1)
#define SSS_FCBTDMAC_FLUSH		BIT(0)

#define SSS_REG_FCHRDMAS		0x0040
#define SSS_REG_FCHRDMAL		0x0044
#define SSS_REG_FCHRDMAC		0x0048
#define SSS_FCHRDMAC_BYTESWAP		BIT(1)
#define SSS_FCHRDMAC_FLUSH		BIT(0)

#define SSS_REG_FCPKDMAS		0x0050
#define SSS_REG_FCPKDMAL		0x0054
#define SSS_REG_FCPKDMAC		0x0058
#define SSS_FCPKDMAC_BYTESWAP		BIT(3)
#define SSS_FCPKDMAC_DESCEND		BIT(2)
#define SSS_FCPKDMAC_TRANSMIT		BIT(1)
#define SSS_FCPKDMAC_FLUSH		BIT(0)

#define SSS_REG_FCPKDMAO		0x005C
103 104

/* AES registers */
105
#define SSS_REG_AES_CONTROL		0x00
106 107 108 109 110 111 112 113 114 115 116 117 118 119
#define SSS_AES_BYTESWAP_DI		BIT(11)
#define SSS_AES_BYTESWAP_DO		BIT(10)
#define SSS_AES_BYTESWAP_IV		BIT(9)
#define SSS_AES_BYTESWAP_CNT		BIT(8)
#define SSS_AES_BYTESWAP_KEY		BIT(7)
#define SSS_AES_KEY_CHANGE_MODE		BIT(6)
#define SSS_AES_KEY_SIZE_128		_SBF(4, 0x00)
#define SSS_AES_KEY_SIZE_192		_SBF(4, 0x01)
#define SSS_AES_KEY_SIZE_256		_SBF(4, 0x02)
#define SSS_AES_FIFO_MODE		BIT(3)
#define SSS_AES_CHAIN_MODE_ECB		_SBF(1, 0x00)
#define SSS_AES_CHAIN_MODE_CBC		_SBF(1, 0x01)
#define SSS_AES_CHAIN_MODE_CTR		_SBF(1, 0x02)
#define SSS_AES_MODE_DECRYPT		BIT(0)
120

121
#define SSS_REG_AES_STATUS		0x04
122 123 124
#define SSS_AES_BUSY			BIT(2)
#define SSS_AES_INPUT_READY		BIT(1)
#define SSS_AES_OUTPUT_READY		BIT(0)
125

126 127 128 129 130
#define SSS_REG_AES_IN_DATA(s)		(0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s)		(0x20 + (s << 2))
#define SSS_REG_AES_IV_DATA(s)		(0x30 + (s << 2))
#define SSS_REG_AES_CNT_DATA(s)		(0x40 + (s << 2))
#define SSS_REG_AES_KEY_DATA(s)		(0x80 + (s << 2))
131

132 133 134
#define SSS_REG(dev, reg)		((dev)->ioaddr + (SSS_REG_##reg))
#define SSS_READ(dev, reg)		__raw_readl(SSS_REG(dev, reg))
#define SSS_WRITE(dev, reg, val)	__raw_writel((val), SSS_REG(dev, reg))
135

136
#define SSS_AES_REG(dev, reg)		((dev)->aes_ioaddr + SSS_REG_##reg)
137 138 139
#define SSS_AES_WRITE(dev, reg, val)    __raw_writel((val), \
						SSS_AES_REG(dev, reg))

140
/* HW engine modes */
141 142 143 144
#define FLAGS_AES_DECRYPT		BIT(0)
#define FLAGS_AES_MODE_MASK		_SBF(1, 0x03)
#define FLAGS_AES_CBC			_SBF(1, 0x01)
#define FLAGS_AES_CTR			_SBF(1, 0x02)
145

146 147
#define AES_KEY_LEN			16
#define CRYPTO_QUEUE_LEN		1
148

149 150 151 152 153 154 155 156 157
/**
 * struct samsung_aes_variant - platform specific SSS driver data
 * @aes_offset: AES register offset from SSS module's base.
 *
 * Specifies platform specific configuration of SSS module.
 * Note: A structure for driver specific platform data is used for future
 * expansion of its usage.
 */
struct samsung_aes_variant {
158
	unsigned int			aes_offset;
159 160
};

161
struct s5p_aes_reqctx {
162
	unsigned long			mode;
163 164 165
};

struct s5p_aes_ctx {
166
	struct s5p_aes_dev		*dev;
167

168 169 170
	uint8_t				aes_key[AES_MAX_KEY_SIZE];
	uint8_t				nonce[CTR_RFC3686_NONCE_SIZE];
	int				keylen;
171 172
};

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
/**
 * struct s5p_aes_dev - Crypto device state container
 * @dev:	Associated device
 * @clk:	Clock for accessing hardware
 * @ioaddr:	Mapped IO memory region
 * @aes_ioaddr:	Per-varian offset for AES block IO memory
 * @irq_fc:	Feed control interrupt line
 * @req:	Crypto request currently handled by the device
 * @ctx:	Configuration for currently handled crypto request
 * @sg_src:	Scatter list with source data for currently handled block
 *		in device.  This is DMA-mapped into device.
 * @sg_dst:	Scatter list with destination data for currently handled block
 *		in device. This is DMA-mapped into device.
 * @sg_src_cpy:	In case of unaligned access, copied scatter list
 *		with source data.
 * @sg_dst_cpy:	In case of unaligned access, copied scatter list
 *		with destination data.
 * @tasklet:	New request scheduling jib
 * @queue:	Crypto queue
 * @busy:	Indicates whether the device is currently handling some request
 *		thus it uses some of the fields from this state, like:
 *		req, ctx, sg_src/dst (and copies).  This essentially
 *		protects against concurrent access to these fields.
 * @lock:	Lock for protecting both access to device hardware registers
 *		and fields related to current request (including the busy field).
 */
199
struct s5p_aes_dev {
200 201 202 203 204
	struct device			*dev;
	struct clk			*clk;
	void __iomem			*ioaddr;
	void __iomem			*aes_ioaddr;
	int				irq_fc;
205

206 207 208 209
	struct ablkcipher_request	*req;
	struct s5p_aes_ctx		*ctx;
	struct scatterlist		*sg_src;
	struct scatterlist		*sg_dst;
210

211 212
	struct scatterlist		*sg_src_cpy;
	struct scatterlist		*sg_dst_cpy;
213

214 215 216 217
	struct tasklet_struct		tasklet;
	struct crypto_queue		queue;
	bool				busy;
	spinlock_t			lock;
218 219 220 221
};

static struct s5p_aes_dev *s5p_dev;

222 223 224 225 226 227 228 229
static const struct samsung_aes_variant s5p_aes_data = {
	.aes_offset	= 0x4000,
};

static const struct samsung_aes_variant exynos_aes_data = {
	.aes_offset	= 0x200,
};

230
static const struct of_device_id s5p_sss_dt_match[] = {
231 232 233 234 235 236 237 238
	{
		.compatible = "samsung,s5pv210-secss",
		.data = &s5p_aes_data,
	},
	{
		.compatible = "samsung,exynos4210-secss",
		.data = &exynos_aes_data,
	},
239 240 241 242
	{ },
};
MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);

243 244 245 246 247
static inline struct samsung_aes_variant *find_s5p_sss_version
				   (struct platform_device *pdev)
{
	if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
		const struct of_device_id *match;
248

249 250 251 252 253 254 255 256
		match = of_match_node(s5p_sss_dt_match,
					pdev->dev.of_node);
		return (struct samsung_aes_variant *)match->data;
	}
	return (struct samsung_aes_variant *)
			platform_get_device_id(pdev)->driver_data;
}

257 258 259 260 261 262 263 264 265 266 267 268
static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
	SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
	SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
}

static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
	SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
	SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
}

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
{
	int len;

	if (!*sg)
		return;

	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
	free_pages((unsigned long)sg_virt(*sg), get_order(len));

	kfree(*sg);
	*sg = NULL;
}

static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
			    unsigned int nbytes, int out)
{
	struct scatter_walk walk;

	if (!nbytes)
		return;

	scatterwalk_start(&walk, sg);
	scatterwalk_copychunks(buf, &walk, nbytes, out);
	scatterwalk_done(&walk, out, 0);
}

296
static void s5p_sg_done(struct s5p_aes_dev *dev)
297
{
298 299 300 301 302 303 304 305 306
	if (dev->sg_dst_cpy) {
		dev_dbg(dev->dev,
			"Copying %d bytes of output data back to original place\n",
			dev->req->nbytes);
		s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
				dev->req->nbytes, 1);
	}
	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
307
}
308

309 310 311
/* Calls the completion. Cannot be called with dev->lock hold. */
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
{
312 313 314 315 316 317 318 319 320 321 322 323 324
	dev->req->base.complete(&dev->req->base, err);
}

static void s5p_unset_outdata(struct s5p_aes_dev *dev)
{
	dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
}

static void s5p_unset_indata(struct s5p_aes_dev *dev)
{
	dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
}

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
			    struct scatterlist **dst)
{
	void *pages;
	int len;

	*dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
	if (!*dst)
		return -ENOMEM;

	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
	pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
	if (!pages) {
		kfree(*dst);
		*dst = NULL;
		return -ENOMEM;
	}

	s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);

	sg_init_table(*dst, 1);
	sg_set_buf(*dst, pages, len);

	return 0;
}

351 352 353 354
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
	int err;

355
	if (!sg->length) {
356 357 358 359 360 361 362 363 364 365 366 367 368
		err = -EINVAL;
		goto exit;
	}

	err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
	if (!err) {
		err = -ENOMEM;
		goto exit;
	}

	dev->sg_dst = sg;
	err = 0;

369
exit:
370 371 372 373 374 375 376
	return err;
}

static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
	int err;

377
	if (!sg->length) {
378 379 380 381 382 383 384 385 386 387 388 389 390
		err = -EINVAL;
		goto exit;
	}

	err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
	if (!err) {
		err = -ENOMEM;
		goto exit;
	}

	dev->sg_src = sg;
	err = 0;

391
exit:
392 393 394
	return err;
}

395
/*
396 397 398 399 400
 * Returns -ERRNO on error (mapping of new data failed).
 * On success returns:
 *  - 0 if there is no more data,
 *  - 1 if new transmitting (output) data is ready and its address+length
 *     have to be written to device (by calling s5p_set_dma_outdata()).
401
 */
402
static int s5p_aes_tx(struct s5p_aes_dev *dev)
403
{
404
	int ret = 0;
405 406 407 408

	s5p_unset_outdata(dev);

	if (!sg_is_last(dev->sg_dst)) {
409 410 411
		ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
		if (!ret)
			ret = 1;
412
	}
413 414

	return ret;
415 416
}

417
/*
418 419 420 421 422
 * Returns -ERRNO on error (mapping of new data failed).
 * On success returns:
 *  - 0 if there is no more data,
 *  - 1 if new receiving (input) data is ready and its address+length
 *     have to be written to device (by calling s5p_set_dma_indata()).
423
 */
424
static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
425
{
426
	int ret = 0;
427 428 429 430

	s5p_unset_indata(dev);

	if (!sg_is_last(dev->sg_src)) {
431 432 433
		ret = s5p_set_indata(dev, sg_next(dev->sg_src));
		if (!ret)
			ret = 1;
434
	}
435 436

	return ret;
437 438 439 440 441
}

static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
	struct platform_device *pdev = dev_id;
442
	struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
443 444 445
	int err_dma_tx = 0;
	int err_dma_rx = 0;
	bool tx_end = false;
446 447
	unsigned long flags;
	uint32_t status;
448
	int err;
449 450 451

	spin_lock_irqsave(&dev->lock, flags);

452 453 454 455 456 457 458 459
	/*
	 * Handle rx or tx interrupt. If there is still data (scatterlist did not
	 * reach end), then map next scatterlist entry.
	 * In case of such mapping error, s5p_aes_complete() should be called.
	 *
	 * If there is no more data in tx scatter list, call s5p_aes_complete()
	 * and schedule new tasklet.
	 */
460 461
	status = SSS_READ(dev, FCINTSTAT);
	if (status & SSS_FCINTSTAT_BRDMAINT)
462 463 464 465 466 467 468
		err_dma_rx = s5p_aes_rx(dev);

	if (status & SSS_FCINTSTAT_BTDMAINT) {
		if (sg_is_last(dev->sg_dst))
			tx_end = true;
		err_dma_tx = s5p_aes_tx(dev);
	}
469

470
	SSS_WRITE(dev, FCINTPEND, status);
471

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	if (err_dma_rx < 0) {
		err = err_dma_rx;
		goto error;
	}
	if (err_dma_tx < 0) {
		err = err_dma_tx;
		goto error;
	}

	if (tx_end) {
		s5p_sg_done(dev);

		spin_unlock_irqrestore(&dev->lock, flags);

		s5p_aes_complete(dev, 0);
487
		/* Device is still busy */
488 489 490 491 492 493 494 495 496 497 498 499
		tasklet_schedule(&dev->tasklet);
	} else {
		/*
		 * Writing length of DMA block (either receiving or
		 * transmitting) will start the operation immediately, so this
		 * should be done at the end (even after clearing pending
		 * interrupts to not miss the interrupt).
		 */
		if (err_dma_tx == 1)
			s5p_set_dma_outdata(dev, dev->sg_dst);
		if (err_dma_rx == 1)
			s5p_set_dma_indata(dev, dev->sg_src);
500

501 502 503 504 505 506 507
		spin_unlock_irqrestore(&dev->lock, flags);
	}

	return IRQ_HANDLED;

error:
	s5p_sg_done(dev);
508
	dev->busy = false;
509
	spin_unlock_irqrestore(&dev->lock, flags);
510
	s5p_aes_complete(dev, err);
511 512 513 514 515 516 517 518 519

	return IRQ_HANDLED;
}

static void s5p_set_aes(struct s5p_aes_dev *dev,
			uint8_t *key, uint8_t *iv, unsigned int keylen)
{
	void __iomem *keystart;

520
	if (iv)
521
		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
522 523

	if (keylen == AES_KEYSIZE_256)
524
		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
525
	else if (keylen == AES_KEYSIZE_192)
526
		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
527
	else
528
		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
529

530
	memcpy_toio(keystart, key, keylen);
531 532
}

533 534 535
static bool s5p_is_sg_aligned(struct scatterlist *sg)
{
	while (sg) {
536
		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
			return false;
		sg = sg_next(sg);
	}

	return true;
}

static int s5p_set_indata_start(struct s5p_aes_dev *dev,
				struct ablkcipher_request *req)
{
	struct scatterlist *sg;
	int err;

	dev->sg_src_cpy = NULL;
	sg = req->src;
	if (!s5p_is_sg_aligned(sg)) {
		dev_dbg(dev->dev,
			"At least one unaligned source scatter list, making a copy\n");
		err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
		if (err)
			return err;

		sg = dev->sg_src_cpy;
	}

	err = s5p_set_indata(dev, sg);
	if (err) {
		s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
		return err;
	}

	return 0;
}

static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
				struct ablkcipher_request *req)
{
	struct scatterlist *sg;
	int err;

	dev->sg_dst_cpy = NULL;
	sg = req->dst;
	if (!s5p_is_sg_aligned(sg)) {
		dev_dbg(dev->dev,
			"At least one unaligned dest scatter list, making a copy\n");
		err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
		if (err)
			return err;

		sg = dev->sg_dst_cpy;
	}

	err = s5p_set_outdata(dev, sg);
	if (err) {
		s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
		return err;
	}

	return 0;
}

598 599
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
600 601 602 603
	struct ablkcipher_request *req = dev->req;
	uint32_t aes_control;
	unsigned long flags;
	int err;
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633

	aes_control = SSS_AES_KEY_CHANGE_MODE;
	if (mode & FLAGS_AES_DECRYPT)
		aes_control |= SSS_AES_MODE_DECRYPT;

	if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
		aes_control |= SSS_AES_CHAIN_MODE_CBC;
	else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
		aes_control |= SSS_AES_CHAIN_MODE_CTR;

	if (dev->ctx->keylen == AES_KEYSIZE_192)
		aes_control |= SSS_AES_KEY_SIZE_192;
	else if (dev->ctx->keylen == AES_KEYSIZE_256)
		aes_control |= SSS_AES_KEY_SIZE_256;

	aes_control |= SSS_AES_FIFO_MODE;

	/* as a variant it is possible to use byte swapping on DMA side */
	aes_control |= SSS_AES_BYTESWAP_DI
		    |  SSS_AES_BYTESWAP_DO
		    |  SSS_AES_BYTESWAP_IV
		    |  SSS_AES_BYTESWAP_KEY
		    |  SSS_AES_BYTESWAP_CNT;

	spin_lock_irqsave(&dev->lock, flags);

	SSS_WRITE(dev, FCINTENCLR,
		  SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
	SSS_WRITE(dev, FCFIFOCTRL, 0x00);

634
	err = s5p_set_indata_start(dev, req);
635 636 637
	if (err)
		goto indata_error;

638
	err = s5p_set_outdata_start(dev, req);
639 640 641
	if (err)
		goto outdata_error;

642
	SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
643 644
	s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);

645 646
	s5p_set_dma_indata(dev,  dev->sg_src);
	s5p_set_dma_outdata(dev, dev->sg_dst);
647 648 649 650 651 652 653 654

	SSS_WRITE(dev, FCINTENSET,
		  SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);

	spin_unlock_irqrestore(&dev->lock, flags);

	return;

655
outdata_error:
656 657
	s5p_unset_indata(dev);

658
indata_error:
659
	s5p_sg_done(dev);
660
	dev->busy = false;
661
	spin_unlock_irqrestore(&dev->lock, flags);
662
	s5p_aes_complete(dev, err);
663 664 665 666 667 668 669 670 671 672 673 674 675
}

static void s5p_tasklet_cb(unsigned long data)
{
	struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
	struct crypto_async_request *async_req, *backlog;
	struct s5p_aes_reqctx *reqctx;
	unsigned long flags;

	spin_lock_irqsave(&dev->lock, flags);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);

676 677 678
	if (!async_req) {
		dev->busy = false;
		spin_unlock_irqrestore(&dev->lock, flags);
679
		return;
680 681
	}
	spin_unlock_irqrestore(&dev->lock, flags);
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	dev->req = ablkcipher_request_cast(async_req);
	dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
	reqctx   = ablkcipher_request_ctx(dev->req);

	s5p_aes_crypt_start(dev, reqctx->mode);
}

static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
			      struct ablkcipher_request *req)
{
	unsigned long flags;
	int err;

	spin_lock_irqsave(&dev->lock, flags);
700
	err = ablkcipher_enqueue_request(&dev->queue, req);
701 702 703 704 705 706 707 708 709 710
	if (dev->busy) {
		spin_unlock_irqrestore(&dev->lock, flags);
		goto exit;
	}
	dev->busy = true;

	spin_unlock_irqrestore(&dev->lock, flags);

	tasklet_schedule(&dev->tasklet);

711
exit:
712 713 714 715 716
	return err;
}

static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
717 718 719 720
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
	struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
	struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct s5p_aes_dev *dev = ctx->dev;
721 722

	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
723
		dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
724 725 726 727 728 729 730 731 732 733 734
		return -EINVAL;
	}

	reqctx->mode = mode;

	return s5p_aes_handle_req(dev, req);
}

static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
			  const uint8_t *key, unsigned int keylen)
{
735
	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
	struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);

	if (keylen != AES_KEYSIZE_128 &&
	    keylen != AES_KEYSIZE_192 &&
	    keylen != AES_KEYSIZE_256)
		return -EINVAL;

	memcpy(ctx->aes_key, key, keylen);
	ctx->keylen = keylen;

	return 0;
}

static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
{
	return s5p_aes_crypt(req, 0);
}

static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
{
	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
}

static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
{
	return s5p_aes_crypt(req, FLAGS_AES_CBC);
}

static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
{
	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}

static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
771
	struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
772 773 774 775 776 777 778 779 780 781 782 783 784

	ctx->dev = s5p_dev;
	tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);

	return 0;
}

static struct crypto_alg algs[] = {
	{
		.cra_name		= "ecb(aes)",
		.cra_driver_name	= "ecb-aes-s5p",
		.cra_priority		= 100,
		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
785 786
					  CRYPTO_ALG_ASYNC |
					  CRYPTO_ALG_KERN_DRIVER_ONLY,
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
		.cra_blocksize		= AES_BLOCK_SIZE,
		.cra_ctxsize		= sizeof(struct s5p_aes_ctx),
		.cra_alignmask		= 0x0f,
		.cra_type		= &crypto_ablkcipher_type,
		.cra_module		= THIS_MODULE,
		.cra_init		= s5p_aes_cra_init,
		.cra_u.ablkcipher = {
			.min_keysize	= AES_MIN_KEY_SIZE,
			.max_keysize	= AES_MAX_KEY_SIZE,
			.setkey		= s5p_aes_setkey,
			.encrypt	= s5p_aes_ecb_encrypt,
			.decrypt	= s5p_aes_ecb_decrypt,
		}
	},
	{
		.cra_name		= "cbc(aes)",
		.cra_driver_name	= "cbc-aes-s5p",
		.cra_priority		= 100,
		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
806 807
					  CRYPTO_ALG_ASYNC |
					  CRYPTO_ALG_KERN_DRIVER_ONLY,
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
		.cra_blocksize		= AES_BLOCK_SIZE,
		.cra_ctxsize		= sizeof(struct s5p_aes_ctx),
		.cra_alignmask		= 0x0f,
		.cra_type		= &crypto_ablkcipher_type,
		.cra_module		= THIS_MODULE,
		.cra_init		= s5p_aes_cra_init,
		.cra_u.ablkcipher = {
			.min_keysize	= AES_MIN_KEY_SIZE,
			.max_keysize	= AES_MAX_KEY_SIZE,
			.ivsize		= AES_BLOCK_SIZE,
			.setkey		= s5p_aes_setkey,
			.encrypt	= s5p_aes_cbc_encrypt,
			.decrypt	= s5p_aes_cbc_decrypt,
		}
	},
};

static int s5p_aes_probe(struct platform_device *pdev)
{
827 828
	struct device *dev = &pdev->dev;
	int i, j, err = -ENODEV;
829
	struct samsung_aes_variant *variant;
830 831
	struct s5p_aes_dev *pdata;
	struct resource *res;
832 833 834 835 836 837 838 839

	if (s5p_dev)
		return -EEXIST;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

840 841 842 843
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(pdata->ioaddr))
		return PTR_ERR(pdata->ioaddr);
844

845 846
	variant = find_s5p_sss_version(pdev);

847
	pdata->clk = devm_clk_get(dev, "secss");
848 849 850 851 852
	if (IS_ERR(pdata->clk)) {
		dev_err(dev, "failed to find secss clock source\n");
		return -ENOENT;
	}

853 854 855 856 857
	err = clk_prepare_enable(pdata->clk);
	if (err < 0) {
		dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
		return err;
	}
858 859 860

	spin_lock_init(&pdata->lock);

861 862
	pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;

863 864 865 866
	pdata->irq_fc = platform_get_irq(pdev, 0);
	if (pdata->irq_fc < 0) {
		err = pdata->irq_fc;
		dev_warn(dev, "feed control interrupt is not available.\n");
867 868
		goto err_irq;
	}
869 870 871
	err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
					s5p_aes_interrupt, IRQF_ONESHOT,
					pdev->name, pdev);
872
	if (err < 0) {
873
		dev_warn(dev, "feed control interrupt is not available.\n");
874 875 876
		goto err_irq;
	}

877
	pdata->busy = false;
878 879 880 881 882 883 884 885 886 887 888 889 890
	pdata->dev = dev;
	platform_set_drvdata(pdev, pdata);
	s5p_dev = pdata;

	tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
	crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);

	for (i = 0; i < ARRAY_SIZE(algs); i++) {
		err = crypto_register_alg(&algs[i]);
		if (err)
			goto err_algs;
	}

891
	dev_info(dev, "s5p-sss driver registered\n");
892 893 894

	return 0;

895
err_algs:
896 897 898 899 900 901 902
	dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);

	for (j = 0; j < i; j++)
		crypto_unregister_alg(&algs[j]);

	tasklet_kill(&pdata->tasklet);

903
err_irq:
904
	clk_disable_unprepare(pdata->clk);
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923

	s5p_dev = NULL;

	return err;
}

static int s5p_aes_remove(struct platform_device *pdev)
{
	struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
	int i;

	if (!pdata)
		return -ENODEV;

	for (i = 0; i < ARRAY_SIZE(algs); i++)
		crypto_unregister_alg(&algs[i]);

	tasklet_kill(&pdata->tasklet);

924
	clk_disable_unprepare(pdata->clk);
925 926 927 928 929 930 931 932 933 934 935

	s5p_dev = NULL;

	return 0;
}

static struct platform_driver s5p_aes_crypto = {
	.probe	= s5p_aes_probe,
	.remove	= s5p_aes_remove,
	.driver	= {
		.name	= "s5p-secss",
936
		.of_match_table = s5p_sss_dt_match,
937 938 939
	},
};

940
module_platform_driver(s5p_aes_crypto);
941 942 943 944

MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");