talitos.c 85.7 KB
Newer Older
1 2 3
/*
 * talitos - Freescale Integrated Security Engine (SEC) device driver
 *
4
 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Scatterlist Crypto API glue code copied from files with the following:
 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * Crypto algorithm registration code copied from hifn driver:
 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
35 36
#include <linux/of_address.h>
#include <linux/of_irq.h>
37 38 39 40 41
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
42
#include <linux/slab.h>
43 44 45

#include <crypto/algapi.h>
#include <crypto/aes.h>
46
#include <crypto/des.h>
47
#include <crypto/sha.h>
48
#include <crypto/md5.h>
49
#include <crypto/internal/aead.h>
50
#include <crypto/authenc.h>
51
#include <crypto/skcipher.h>
52 53
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
54
#include <crypto/scatterwalk.h>
55 56 57

#include "talitos.h"

58 59
static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
			   bool is_sec1)
60
{
61
	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 63
	if (!is_sec1)
		ptr->eptr = upper_32_bits(dma_addr);
64 65
}

66
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
67
			       bool is_sec1)
68
{
69 70 71 72 73 74
	if (is_sec1) {
		ptr->res = 0;
		ptr->len1 = cpu_to_be16(len);
	} else {
		ptr->len = cpu_to_be16(len);
	}
75 76
}

77 78
static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
					   bool is_sec1)
79
{
80 81 82 83
	if (is_sec1)
		return be16_to_cpu(ptr->len1);
	else
		return be16_to_cpu(ptr->len);
84 85
}

86
static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
87
{
88 89
	if (!is_sec1)
		ptr->j_extent = 0;
90 91
}

92 93 94 95
/*
 * map virtual single (contiguous) pointer to h/w descriptor pointer
 */
static void map_single_talitos_ptr(struct device *dev,
96
				   struct talitos_ptr *ptr,
97
				   unsigned int len, void *data,
98 99
				   enum dma_data_direction dir)
{
100
	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
101 102
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
103

104 105 106
	to_talitos_ptr_len(ptr, len, is_sec1);
	to_talitos_ptr(ptr, dma_addr, is_sec1);
	to_talitos_ptr_extent_clear(ptr, is_sec1);
107 108 109 110 111 112
}

/*
 * unmap bus single (contiguous) h/w descriptor pointer
 */
static void unmap_single_talitos_ptr(struct device *dev,
113
				     struct talitos_ptr *ptr,
114 115
				     enum dma_data_direction dir)
{
116 117 118
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);

119
	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
120
			 from_talitos_ptr_len(ptr, is_sec1), dir);
121 122 123 124 125 126
}

static int reset_channel(struct device *dev, int ch)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;
127
	bool is_sec1 = has_ftr_sec1(priv);
128

129 130 131
	if (is_sec1) {
		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
			  TALITOS1_CCCR_LO_RESET);
132

133 134 135 136 137 138 139 140 141 142 143
		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
			TALITOS1_CCCR_LO_RESET) && --timeout)
			cpu_relax();
	} else {
		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
			  TALITOS2_CCCR_RESET);

		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
			TALITOS2_CCCR_RESET) && --timeout)
			cpu_relax();
	}
144 145 146 147 148 149

	if (timeout == 0) {
		dev_err(dev, "failed to reset channel %d\n", ch);
		return -EIO;
	}

150
	/* set 36-bit addressing, done writeback enable and done IRQ enable */
151
	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
152
		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
153

154 155
	/* and ICCR writeback, if available */
	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
156
		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
157 158
		          TALITOS_CCCR_LO_IWSE);

159 160 161 162 163 164 165
	return 0;
}

static int reset_device(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;
166 167
	bool is_sec1 = has_ftr_sec1(priv);
	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
168

169
	setbits32(priv->reg + TALITOS_MCR, mcr);
170

171
	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
172 173 174
	       && --timeout)
		cpu_relax();

175
	if (priv->irq[1]) {
176 177 178 179
		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
		setbits32(priv->reg + TALITOS_MCR, mcr);
	}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	if (timeout == 0) {
		dev_err(dev, "failed to reset device\n");
		return -EIO;
	}

	return 0;
}

/*
 * Reset and initialize the device
 */
static int init_device(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int ch, err;
195
	bool is_sec1 = has_ftr_sec1(priv);
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218

	/*
	 * Master reset
	 * errata documentation: warning: certain SEC interrupts
	 * are not fully cleared by writing the MCR:SWR bit,
	 * set bit twice to completely reset
	 */
	err = reset_device(dev);
	if (err)
		return err;

	err = reset_device(dev);
	if (err)
		return err;

	/* reset channels */
	for (ch = 0; ch < priv->num_channels; ch++) {
		err = reset_channel(dev, ch);
		if (err)
			return err;
	}

	/* enable channel done and error interrupts */
219 220 221 222 223 224 225 226 227
	if (is_sec1) {
		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
		/* disable parity error check in DEU (erroneous? test vect.) */
		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
	} else {
		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
	}
228

229 230
	/* disable integrity check error interrupts (use writeback instead) */
	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
231
		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
232 233
		          TALITOS_MDEUICR_LO_ICE);

234 235 236 237 238 239
	return 0;
}

/**
 * talitos_submit - submits a descriptor to the device for processing
 * @dev:	the SEC device to be used
240
 * @ch:		the SEC device channel to be used
241 242 243 244 245 246 247 248
 * @desc:	the descriptor to be processed by the device
 * @callback:	whom to call when processing is complete
 * @context:	a handle for use by caller (optional)
 *
 * desc must contain valid dma-mapped (bus physical) address pointers.
 * callback must check err and feedback in descriptor header
 * for device processing status.
 */
249 250 251 252 253
int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
		   void (*callback)(struct device *dev,
				    struct talitos_desc *desc,
				    void *context, int error),
		   void *context)
254 255 256
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_request *request;
257
	unsigned long flags;
258
	int head;
259
	bool is_sec1 = has_ftr_sec1(priv);
260

261
	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
262

263
	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
264
		/* h/w fifo is full */
265
		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
266 267 268
		return -EAGAIN;
	}

269 270
	head = priv->chan[ch].head;
	request = &priv->chan[ch].fifo[head];
271

272
	/* map descriptor and save caller data */
273 274 275 276 277 278 279 280 281 282 283
	if (is_sec1) {
		desc->hdr1 = desc->hdr;
		desc->next_desc = 0;
		request->dma_desc = dma_map_single(dev, &desc->hdr1,
						   TALITOS_DESC_SIZE,
						   DMA_BIDIRECTIONAL);
	} else {
		request->dma_desc = dma_map_single(dev, desc,
						   TALITOS_DESC_SIZE,
						   DMA_BIDIRECTIONAL);
	}
284 285 286 287
	request->callback = callback;
	request->context = context;

	/* increment fifo head */
288
	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
289 290 291 292 293 294

	smp_wmb();
	request->desc = desc;

	/* GO! */
	wmb();
295 296 297
	out_be32(priv->chan[ch].reg + TALITOS_FF,
		 upper_32_bits(request->dma_desc));
	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
298
		 lower_32_bits(request->dma_desc));
299

300
	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
301 302 303

	return -EINPROGRESS;
}
304
EXPORT_SYMBOL(talitos_submit);
305 306 307 308 309 310 311 312 313 314

/*
 * process what was done, notify callback of error if not
 */
static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_request *request, saved_req;
	unsigned long flags;
	int tail, status;
315
	bool is_sec1 = has_ftr_sec1(priv);
316

317
	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
318

319 320
	tail = priv->chan[ch].tail;
	while (priv->chan[ch].fifo[tail].desc) {
321 322
		__be32 hdr;

323
		request = &priv->chan[ch].fifo[tail];
324 325 326

		/* descriptors with their done bits set don't get the error */
		rmb();
327 328 329
		hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;

		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
330
			status = 0;
331
		else
332 333 334 335 336 337
			if (!error)
				break;
			else
				status = error;

		dma_unmap_single(dev, request->dma_desc,
338
				 TALITOS_DESC_SIZE,
339
				 DMA_BIDIRECTIONAL);
340 341 342 343 344 345 346 347 348 349 350

		/* copy entries so we can call callback outside lock */
		saved_req.desc = request->desc;
		saved_req.callback = request->callback;
		saved_req.context = request->context;

		/* release request entry in fifo */
		smp_wmb();
		request->desc = NULL;

		/* increment fifo tail */
351
		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
352

353
		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
354

355
		atomic_dec(&priv->chan[ch].submit_count);
356

357 358 359 360 361
		saved_req.callback(dev, saved_req.desc, saved_req.context,
				   status);
		/* channel may resume processing in single desc error case */
		if (error && !reset_ch && status == error)
			return;
362 363
		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
		tail = priv->chan[ch].tail;
364 365
	}

366
	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
367 368 369 370 371
}

/*
 * process completed requests for channels that have done status
 */
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
#define DEF_TALITOS1_DONE(name, ch_done_mask)				\
static void talitos1_done_##name(unsigned long data)			\
{									\
	struct device *dev = (struct device *)data;			\
	struct talitos_private *priv = dev_get_drvdata(dev);		\
	unsigned long flags;						\
									\
	if (ch_done_mask & 0x10000000)					\
		flush_channel(dev, 0, 0, 0);			\
	if (priv->num_channels == 1)					\
		goto out;						\
	if (ch_done_mask & 0x40000000)					\
		flush_channel(dev, 1, 0, 0);			\
	if (ch_done_mask & 0x00010000)					\
		flush_channel(dev, 2, 0, 0);			\
	if (ch_done_mask & 0x00040000)					\
		flush_channel(dev, 3, 0, 0);			\
									\
out:									\
	/* At this point, all completed channels have been processed */	\
	/* Unmask done interrupts for channels completed later on. */	\
	spin_lock_irqsave(&priv->reg_lock, flags);			\
	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
}

DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)

#define DEF_TALITOS2_DONE(name, ch_done_mask)				\
static void talitos2_done_##name(unsigned long data)			\
403 404 405
{									\
	struct device *dev = (struct device *)data;			\
	struct talitos_private *priv = dev_get_drvdata(dev);		\
406
	unsigned long flags;						\
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
									\
	if (ch_done_mask & 1)						\
		flush_channel(dev, 0, 0, 0);				\
	if (priv->num_channels == 1)					\
		goto out;						\
	if (ch_done_mask & (1 << 2))					\
		flush_channel(dev, 1, 0, 0);				\
	if (ch_done_mask & (1 << 4))					\
		flush_channel(dev, 2, 0, 0);				\
	if (ch_done_mask & (1 << 6))					\
		flush_channel(dev, 3, 0, 0);				\
									\
out:									\
	/* At this point, all completed channels have been processed */	\
	/* Unmask done interrupts for channels completed later on. */	\
422
	spin_lock_irqsave(&priv->reg_lock, flags);			\
423
	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
424
	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
425
	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
426
}
427 428 429 430

DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
431 432 433 434

/*
 * locate current (offending) descriptor
 */
435
static u32 current_desc_hdr(struct device *dev, int ch)
436 437
{
	struct talitos_private *priv = dev_get_drvdata(dev);
438
	int tail, iter;
439 440
	dma_addr_t cur_desc;

441 442
	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
443

444 445 446 447 448 449 450 451 452 453 454
	if (!cur_desc) {
		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
		return 0;
	}

	tail = priv->chan[ch].tail;

	iter = tail;
	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
		iter = (iter + 1) & (priv->fifo_len - 1);
		if (iter == tail) {
455
			dev_err(dev, "couldn't locate current descriptor\n");
456
			return 0;
457 458 459
		}
	}

460
	return priv->chan[ch].fifo[iter].desc->hdr;
461 462 463 464 465
}

/*
 * user diagnostics; report root cause of error based on execution unit status
 */
466
static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
467 468 469 470
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int i;

471
	if (!desc_hdr)
472
		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
473 474

	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
475 476
	case DESC_HDR_SEL0_AFEU:
		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
477 478
			in_be32(priv->reg_afeu + TALITOS_EUISR),
			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
479 480 481
		break;
	case DESC_HDR_SEL0_DEU:
		dev_err(dev, "DEUISR 0x%08x_%08x\n",
482 483
			in_be32(priv->reg_deu + TALITOS_EUISR),
			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
484 485 486 487
		break;
	case DESC_HDR_SEL0_MDEUA:
	case DESC_HDR_SEL0_MDEUB:
		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
488 489
			in_be32(priv->reg_mdeu + TALITOS_EUISR),
			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
490 491 492
		break;
	case DESC_HDR_SEL0_RNG:
		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
493 494
			in_be32(priv->reg_rngu + TALITOS_ISR),
			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
495 496 497
		break;
	case DESC_HDR_SEL0_PKEU:
		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
498 499
			in_be32(priv->reg_pkeu + TALITOS_EUISR),
			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
500 501 502
		break;
	case DESC_HDR_SEL0_AESU:
		dev_err(dev, "AESUISR 0x%08x_%08x\n",
503 504
			in_be32(priv->reg_aesu + TALITOS_EUISR),
			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
505 506 507
		break;
	case DESC_HDR_SEL0_CRCU:
		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
508 509
			in_be32(priv->reg_crcu + TALITOS_EUISR),
			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
510 511 512
		break;
	case DESC_HDR_SEL0_KEU:
		dev_err(dev, "KEUISR 0x%08x_%08x\n",
513 514
			in_be32(priv->reg_pkeu + TALITOS_EUISR),
			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 516 517
		break;
	}

518
	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
519 520 521
	case DESC_HDR_SEL1_MDEUA:
	case DESC_HDR_SEL1_MDEUB:
		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
522 523
			in_be32(priv->reg_mdeu + TALITOS_EUISR),
			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
524 525 526
		break;
	case DESC_HDR_SEL1_CRCU:
		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
527 528
			in_be32(priv->reg_crcu + TALITOS_EUISR),
			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
529 530 531 532 533
		break;
	}

	for (i = 0; i < 8; i++)
		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
534 535
			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
536 537 538 539 540
}

/*
 * recover from error interrupts
 */
541
static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
542 543 544
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;
545
	int ch, error, reset_dev = 0;
546
	u32 v_lo;
547 548
	bool is_sec1 = has_ftr_sec1(priv);
	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
549 550 551

	for (ch = 0; ch < priv->num_channels; ch++) {
		/* skip channels without errors */
552 553 554 555 556 557 558 559
		if (is_sec1) {
			/* bits 29, 31, 17, 19 */
			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
				continue;
		} else {
			if (!(isr & (1 << (ch * 2 + 1))))
				continue;
		}
560 561 562

		error = -EINVAL;

563
		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
564 565 566 567 568 569 570 571 572 573 574 575 576 577

		if (v_lo & TALITOS_CCPSR_LO_DOF) {
			dev_err(dev, "double fetch fifo overflow error\n");
			error = -EAGAIN;
			reset_ch = 1;
		}
		if (v_lo & TALITOS_CCPSR_LO_SOF) {
			/* h/w dropped descriptor */
			dev_err(dev, "single fetch fifo overflow error\n");
			error = -EAGAIN;
		}
		if (v_lo & TALITOS_CCPSR_LO_MDTE)
			dev_err(dev, "master data transfer error\n");
		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
578 579
			dev_err(dev, is_sec1 ? "pointeur not complete error\n"
					     : "s/g data length zero error\n");
580
		if (v_lo & TALITOS_CCPSR_LO_FPZ)
581 582
			dev_err(dev, is_sec1 ? "parity error\n"
					     : "fetch pointer zero error\n");
583 584 585
		if (v_lo & TALITOS_CCPSR_LO_IDH)
			dev_err(dev, "illegal descriptor header error\n");
		if (v_lo & TALITOS_CCPSR_LO_IEU)
586 587
			dev_err(dev, is_sec1 ? "static assignment error\n"
					     : "invalid exec unit error\n");
588
		if (v_lo & TALITOS_CCPSR_LO_EU)
589
			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
590 591 592 593 594 595 596 597 598 599
		if (!is_sec1) {
			if (v_lo & TALITOS_CCPSR_LO_GB)
				dev_err(dev, "gather boundary error\n");
			if (v_lo & TALITOS_CCPSR_LO_GRL)
				dev_err(dev, "gather return/length error\n");
			if (v_lo & TALITOS_CCPSR_LO_SB)
				dev_err(dev, "scatter boundary error\n");
			if (v_lo & TALITOS_CCPSR_LO_SRL)
				dev_err(dev, "scatter return/length error\n");
		}
600 601 602 603 604 605

		flush_channel(dev, ch, error, reset_ch);

		if (reset_ch) {
			reset_channel(dev, ch);
		} else {
606
			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
607
				  TALITOS2_CCCR_CONT);
608 609
			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
610
			       TALITOS2_CCCR_CONT) && --timeout)
611 612 613 614 615 616 617 618
				cpu_relax();
			if (timeout == 0) {
				dev_err(dev, "failed to restart channel %d\n",
					ch);
				reset_dev = 1;
			}
		}
	}
619 620 621 622 623 624 625 626
	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
				isr, isr_lo);
		else
			dev_err(dev, "done overflow, internal time out, or "
				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
627 628 629 630 631 632 633 634 635 636

		/* purge request queues */
		for (ch = 0; ch < priv->num_channels; ch++)
			flush_channel(dev, ch, -EIO, 1);

		/* reset and reinitialize the device */
		init_device(dev);
	}
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
{									       \
	struct device *dev = data;					       \
	struct talitos_private *priv = dev_get_drvdata(dev);		       \
	u32 isr, isr_lo;						       \
	unsigned long flags;						       \
									       \
	spin_lock_irqsave(&priv->reg_lock, flags);			       \
	isr = in_be32(priv->reg + TALITOS_ISR);				       \
	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
	/* Acknowledge interrupt */					       \
	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
									       \
	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
	}								       \
	else {								       \
		if (likely(isr & ch_done_mask)) {			       \
			/* mask further done interrupts. */		       \
			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
			/* done_task will unmask done interrupts at exit */    \
			tasklet_schedule(&priv->done_task[tlet]);	       \
		}							       \
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
	}								       \
									       \
	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
								IRQ_NONE;      \
}

DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)

#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
674 675 676 677
{									       \
	struct device *dev = data;					       \
	struct talitos_private *priv = dev_get_drvdata(dev);		       \
	u32 isr, isr_lo;						       \
678
	unsigned long flags;						       \
679
									       \
680
	spin_lock_irqsave(&priv->reg_lock, flags);			       \
681 682 683 684 685 686
	isr = in_be32(priv->reg + TALITOS_ISR);				       \
	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
	/* Acknowledge interrupt */					       \
	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
									       \
687 688 689 690 691
	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
	}								       \
	else {								       \
692 693 694 695 696 697
		if (likely(isr & ch_done_mask)) {			       \
			/* mask further done interrupts. */		       \
			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
			/* done_task will unmask done interrupts at exit */    \
			tasklet_schedule(&priv->done_task[tlet]);	       \
		}							       \
698 699
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
	}								       \
700 701 702
									       \
	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
								IRQ_NONE;      \
703
}
704 705 706 707 708 709

DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
		       0)
DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
		       1)
710 711 712 713 714 715 716 717 718 719 720 721

/*
 * hwrng
 */
static int talitos_rng_data_present(struct hwrng *rng, int wait)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);
	u32 ofl;
	int i;

	for (i = 0; i < 20; i++) {
722
		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
		      TALITOS_RNGUSR_LO_OFL;
		if (ofl || !wait)
			break;
		udelay(10);
	}

	return !!ofl;
}

static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);

	/* rng fifo requires 64-bit accesses */
738 739
	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
740 741 742 743 744 745 746 747 748 749

	return sizeof(u32);
}

static int talitos_rng_init(struct hwrng *rng)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;

750 751 752
	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
		 & TALITOS_RNGUSR_LO_RD)
753 754 755 756 757 758 759 760
	       && --timeout)
		cpu_relax();
	if (timeout == 0) {
		dev_err(dev, "failed to reset rng hw\n");
		return -ENODEV;
	}

	/* start generating */
761
	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
762 763 764 765 766 767 768

	return 0;
}

static int talitos_register_rng(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
769
	int err;
770 771 772 773 774 775 776

	priv->rng.name		= dev_driver_string(dev),
	priv->rng.init		= talitos_rng_init,
	priv->rng.data_present	= talitos_rng_data_present,
	priv->rng.data_read	= talitos_rng_data_read,
	priv->rng.priv		= (unsigned long)dev;

777 778 779 780 781
	err = hwrng_register(&priv->rng);
	if (!err)
		priv->rng_registered = true;

	return err;
782 783 784 785 786 787
}

static void talitos_unregister_rng(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);

788 789 790
	if (!priv->rng_registered)
		return;

791
	hwrng_unregister(&priv->rng);
792
	priv->rng_registered = false;
793 794 795 796 797 798
}

/*
 * crypto alg
 */
#define TALITOS_CRA_PRIORITY		3000
799
#define TALITOS_MAX_KEY_SIZE		96
800
#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
801

802 803
struct talitos_ctx {
	struct device *dev;
804
	int ch;
805 806
	__be32 desc_hdr_template;
	u8 key[TALITOS_MAX_KEY_SIZE];
807
	u8 iv[TALITOS_MAX_IV_LENGTH];
808 809 810 811 812
	unsigned int keylen;
	unsigned int enckeylen;
	unsigned int authkeylen;
};

813 814 815 816
#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512

struct talitos_ahash_req_ctx {
817
	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
818 819 820
	unsigned int hw_context_size;
	u8 buf[HASH_MAX_BLOCK_SIZE];
	u8 bufnext[HASH_MAX_BLOCK_SIZE];
821
	unsigned int swinit;
822 823 824
	unsigned int first;
	unsigned int last;
	unsigned int to_hash_later;
825
	unsigned int nbuf;
826 827 828 829
	struct scatterlist bufsl[2];
	struct scatterlist *psrc;
};

830 831
static int aead_setkey(struct crypto_aead *authenc,
		       const u8 *key, unsigned int keylen)
832 833
{
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
834
	struct crypto_authenc_keys keys;
835

836
	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
837 838
		goto badkey;

839
	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
840 841
		goto badkey;

842 843
	memcpy(ctx->key, keys.authkey, keys.authkeylen);
	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
844

845 846 847
	ctx->keylen = keys.authkeylen + keys.enckeylen;
	ctx->enckeylen = keys.enckeylen;
	ctx->authkeylen = keys.authkeylen;
848 849 850 851 852 853 854 855 856

	return 0;

badkey:
	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
	return -EINVAL;
}

/*
857
 * talitos_edesc - s/w-extended descriptor
858 859
 * @src_nents: number of segments in input scatterlist
 * @dst_nents: number of segments in output scatterlist
860 861
 * @src_chained: whether src is chained or not
 * @dst_chained: whether dst is chained or not
862
 * @icv_ool: whether ICV is out-of-line
863
 * @iv_dma: dma address of iv for checking continuity and link table
864
 * @dma_len: length of dma mapped link_tbl space
865
 * @dma_link_tbl: bus physical address of link_tbl/buf
866
 * @desc: h/w descriptor
867 868
 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
869 870 871 872 873
 *
 * if decrypting (with authcheck), or either one of src_nents or dst_nents
 * is greater than 1, an integrity check value is concatenated to the end
 * of link_tbl data
 */
874
struct talitos_edesc {
875 876
	int src_nents;
	int dst_nents;
877 878
	bool src_chained;
	bool dst_chained;
879
	bool icv_ool;
880
	dma_addr_t iv_dma;
881 882 883
	int dma_len;
	dma_addr_t dma_link_tbl;
	struct talitos_desc desc;
884 885 886 887
	union {
		struct talitos_ptr link_tbl[0];
		u8 buf[0];
	};
888 889
};

890 891
static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
			  unsigned int nents, enum dma_data_direction dir,
892
			  bool chained)
893 894 895 896
{
	if (unlikely(chained))
		while (sg) {
			dma_map_sg(dev, sg, 1, dir);
897
			sg = sg_next(sg);
898 899 900 901 902 903 904 905 906 907 908
		}
	else
		dma_map_sg(dev, sg, nents, dir);
	return nents;
}

static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
				   enum dma_data_direction dir)
{
	while (sg) {
		dma_unmap_sg(dev, sg, 1, dir);
909
		sg = sg_next(sg);
910 911 912 913 914 915 916 917 918 919 920 921
	}
}

static void talitos_sg_unmap(struct device *dev,
			     struct talitos_edesc *edesc,
			     struct scatterlist *src,
			     struct scatterlist *dst)
{
	unsigned int src_nents = edesc->src_nents ? : 1;
	unsigned int dst_nents = edesc->dst_nents ? : 1;

	if (src != dst) {
922
		if (edesc->src_chained)
923 924 925 926
			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
		else
			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);

927
		if (dst) {
928
			if (edesc->dst_chained)
929 930 931 932 933 934
				talitos_unmap_sg_chain(dev, dst,
						       DMA_FROM_DEVICE);
			else
				dma_unmap_sg(dev, dst, dst_nents,
					     DMA_FROM_DEVICE);
		}
935
	} else
936
		if (edesc->src_chained)
937 938 939 940 941
			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
		else
			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}

942
static void ipsec_esp_unmap(struct device *dev,
943
			    struct talitos_edesc *edesc,
944 945 946 947 948 949 950
			    struct aead_request *areq)
{
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);

951
	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);
}

/*
 * ipsec_esp descriptor callbacks
 */
static void ipsec_esp_encrypt_done(struct device *dev,
				   struct talitos_desc *desc, void *context,
				   int err)
{
	struct aead_request *areq = context;
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
967
	unsigned int authsize = crypto_aead_authsize(authenc);
968
	struct talitos_edesc *edesc;
969 970 971
	struct scatterlist *sg;
	void *icvdata;

972 973
	edesc = container_of(desc, struct talitos_edesc, desc);

974 975 976
	ipsec_esp_unmap(dev, edesc, areq);

	/* copy the generated ICV to dst */
977
	if (edesc->icv_ool) {
978
		icvdata = &edesc->link_tbl[edesc->src_nents +
979
					   edesc->dst_nents + 2];
980
		sg = sg_last(areq->dst, edesc->dst_nents);
981 982
		memcpy((char *)sg_virt(sg) + sg->length - authsize,
		       icvdata, authsize);
983 984 985 986 987 988 989
	}

	kfree(edesc);

	aead_request_complete(areq, err);
}

990
static void ipsec_esp_decrypt_swauth_done(struct device *dev,
991 992
					  struct talitos_desc *desc,
					  void *context, int err)
993 994 995
{
	struct aead_request *req = context;
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
996
	unsigned int authsize = crypto_aead_authsize(authenc);
997
	struct talitos_edesc *edesc;
998
	struct scatterlist *sg;
999
	char *oicv, *icv;
1000

1001 1002
	edesc = container_of(desc, struct talitos_edesc, desc);

1003 1004 1005 1006 1007
	ipsec_esp_unmap(dev, edesc, req);

	if (!err) {
		/* auth check */
		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		icv = (char *)sg_virt(sg) + sg->length - authsize;

		if (edesc->dma_len) {
			oicv = (char *)&edesc->link_tbl[edesc->src_nents +
							edesc->dst_nents + 2];
			if (edesc->icv_ool)
				icv = oicv + authsize;
		} else
			oicv = (char *)&edesc->link_tbl[0];

		err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
1019 1020 1021 1022 1023 1024 1025
	}

	kfree(edesc);

	aead_request_complete(req, err);
}

1026
static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1027 1028
					  struct talitos_desc *desc,
					  void *context, int err)
1029 1030
{
	struct aead_request *req = context;
1031 1032 1033
	struct talitos_edesc *edesc;

	edesc = container_of(desc, struct talitos_edesc, desc);
1034 1035 1036 1037

	ipsec_esp_unmap(dev, edesc, req);

	/* check ICV auth status */
1038 1039 1040
	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
		     DESC_HDR_LO_ICCR1_PASS))
		err = -EBADMSG;
1041 1042 1043 1044 1045 1046

	kfree(edesc);

	aead_request_complete(req, err);
}

1047 1048 1049 1050
/*
 * convert scatterlist to SEC h/w link table format
 * stop at cryptlen bytes
 */
1051 1052 1053
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
				 unsigned int offset, int cryptlen,
				 struct talitos_ptr *link_tbl_ptr)
1054
{
1055
	int n_sg = sg_count;
1056
	int count = 0;
1057

1058 1059
	while (cryptlen && sg && n_sg--) {
		unsigned int len = sg_dma_len(sg);
1060

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
		if (offset >= len) {
			offset -= len;
			goto next;
		}

		len -= offset;

		if (len > cryptlen)
			len = cryptlen;

		to_talitos_ptr(link_tbl_ptr + count,
			       sg_dma_address(sg) + offset, 0);
		link_tbl_ptr[count].len = cpu_to_be16(len);
		link_tbl_ptr[count].j_extent = 0;
		count++;
		cryptlen -= len;
		offset = 0;

next:
		sg = sg_next(sg);
1081
	}
1082 1083

	/* tag end of link table */
1084 1085
	if (count > 0)
		link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1086

1087 1088 1089 1090 1091 1092 1093 1094 1095
	return count;
}

static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
				 int cryptlen,
				 struct talitos_ptr *link_tbl_ptr)
{
	return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
				     link_tbl_ptr);
1096 1097 1098 1099 1100
}

/*
 * fill in and submit ipsec_esp descriptor
 */
1101
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1102 1103 1104
		     void (*callback)(struct device *dev,
				      struct talitos_desc *desc,
				      void *context, int error))
1105 1106
{
	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1107
	unsigned int authsize = crypto_aead_authsize(aead);
1108 1109 1110 1111
	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
	unsigned int cryptlen = areq->cryptlen;
1112
	unsigned int ivsize = crypto_aead_ivsize(aead);
1113
	int tbl_off = 0;
1114
	int sg_count, ret;
1115
	int sg_link_tbl_len;
1116 1117 1118

	/* hmac key */
	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1119
			       DMA_TO_DEVICE);
1120

1121 1122 1123 1124 1125
	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
							   : DMA_TO_DEVICE,
				  edesc->src_chained);

1126
	/* hmac data */
1127 1128 1129 1130 1131 1132
	desc->ptr[1].len = cpu_to_be16(areq->assoclen);
	if (sg_count > 1 &&
	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
					 areq->assoclen,
					 &edesc->link_tbl[tbl_off])) > 1) {
		tbl_off += ret;
1133 1134

		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1135
			       sizeof(struct talitos_ptr), 0);
1136 1137 1138 1139 1140
		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;

		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
	} else {
1141
		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1142 1143 1144
		desc->ptr[1].j_extent = 0;
	}

1145
	/* cipher iv */
1146
	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1147 1148
	desc->ptr[2].len = cpu_to_be16(ivsize);
	desc->ptr[2].j_extent = 0;
1149 1150 1151

	/* cipher key */
	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1152
			       (char *)&ctx->key + ctx->authkeylen,
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
			       DMA_TO_DEVICE);

	/*
	 * cipher in
	 * map and adjust cipher len to aead request cryptlen.
	 * extent is bytes of HMAC postpended to ciphertext,
	 * typically 12 for ipsec
	 */
	desc->ptr[4].len = cpu_to_be16(cryptlen);
	desc->ptr[4].j_extent = authsize;

1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
	sg_link_tbl_len = cryptlen;
	if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
		sg_link_tbl_len += authsize;

	if (sg_count > 1 &&
	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
					 sg_link_tbl_len,
					 &edesc->link_tbl[tbl_off])) > 1) {
		tbl_off += ret;
		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
					      tbl_off *
					      sizeof(struct talitos_ptr), 0);
		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
					   edesc->dma_len,
					   DMA_BIDIRECTIONAL);
	} else
1181
		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1182 1183 1184 1185 1186

	/* cipher out */
	desc->ptr[5].len = cpu_to_be16(cryptlen);
	desc->ptr[5].j_extent = authsize;

1187
	if (areq->src != areq->dst)
1188 1189
		sg_count = talitos_map_sg(dev, areq->dst,
					  edesc->dst_nents ? : 1,
1190
					  DMA_FROM_DEVICE, edesc->dst_chained);
1191

1192 1193 1194 1195 1196 1197 1198
	edesc->icv_ool = false;

	if (sg_count > 1 &&
	    (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
					      areq->assoclen, cryptlen,
					      &edesc->link_tbl[tbl_off])) >
	    1) {
1199
		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1200

1201
		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1202
			       tbl_off * sizeof(struct talitos_ptr), 0);
1203

1204
		/* Add an entry to the link table for ICV data */
1205 1206 1207 1208 1209
		tbl_ptr += sg_count - 1;
		tbl_ptr->j_extent = 0;
		tbl_ptr++;
		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
		tbl_ptr->len = cpu_to_be16(authsize);
1210 1211

		/* icv data follows link tables */
1212
		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1213 1214 1215
					(edesc->src_nents + edesc->dst_nents +
					 2) * sizeof(struct talitos_ptr) +
					authsize, 0);
1216 1217 1218
		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
1219 1220 1221 1222

		edesc->icv_ool = true;
	} else
		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1223 1224

	/* iv out */
1225
	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1226 1227
			       DMA_FROM_DEVICE);

1228
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1229 1230 1231 1232 1233
	if (ret != -EINPROGRESS) {
		ipsec_esp_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
1234 1235 1236 1237 1238
}

/*
 * derive number of elements in scatterlist
 */
1239
static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1240 1241 1242 1243
{
	struct scatterlist *sg = sg_list;
	int sg_nents = 0;

1244
	*chained = false;
1245
	while (nbytes > 0 && sg) {
1246 1247
		sg_nents++;
		nbytes -= sg->length;
1248
		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1249
			*chained = true;
1250
		sg = sg_next(sg);
1251 1252 1253 1254 1255 1256
	}

	return sg_nents;
}

/*
1257
 * allocate and map the extended descriptor
1258
 */
1259 1260 1261
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
						 struct scatterlist *src,
						 struct scatterlist *dst,
1262 1263
						 u8 *iv,
						 unsigned int assoclen,
1264 1265
						 unsigned int cryptlen,
						 unsigned int authsize,
1266
						 unsigned int ivsize,
1267
						 int icv_stashing,
1268 1269
						 u32 cryptoflags,
						 bool encrypt)
1270
{
1271
	struct talitos_edesc *edesc;
1272 1273
	int src_nents, dst_nents, alloc_len, dma_len;
	bool src_chained = false, dst_chained = false;
1274
	dma_addr_t iv_dma = 0;
1275
	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1276
		      GFP_ATOMIC;
1277 1278 1279
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1280

1281
	if (cryptlen + authsize > max_len) {
1282
		dev_err(dev, "length exceeds h/w max limit\n");
1283 1284 1285
		return ERR_PTR(-EINVAL);
	}

1286
	if (ivsize)
1287 1288
		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);

1289
	if (!dst || dst == src) {
1290 1291
		src_nents = sg_count(src, assoclen + cryptlen + authsize,
				     &src_chained);
1292 1293 1294
		src_nents = (src_nents == 1) ? 0 : src_nents;
		dst_nents = dst ? src_nents : 0;
	} else { /* dst && dst != src*/
1295 1296
		src_nents = sg_count(src, assoclen + cryptlen +
					  (encrypt ? 0 : authsize),
1297 1298
				     &src_chained);
		src_nents = (src_nents == 1) ? 0 : src_nents;
1299 1300
		dst_nents = sg_count(dst, assoclen + cryptlen +
					  (encrypt ? authsize : 0),
1301 1302
				     &dst_chained);
		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1303 1304 1305 1306
	}

	/*
	 * allocate space for base edesc plus the link tables,
1307 1308
	 * allowing for two separate entries for AD and generated ICV (+ 2),
	 * and space for two sets of ICVs (stashed and generated)
1309
	 */
1310
	alloc_len = sizeof(struct talitos_edesc);
1311
	if (src_nents || dst_nents) {
1312
		if (is_sec1)
1313 1314
			dma_len = (src_nents ? cryptlen : 0) +
				  (dst_nents ? cryptlen : 0);
1315
		else
1316 1317
			dma_len = (src_nents + dst_nents + 2) *
				  sizeof(struct talitos_ptr) + authsize * 2;
1318 1319 1320
		alloc_len += dma_len;
	} else {
		dma_len = 0;
1321
		alloc_len += icv_stashing ? authsize : 0;
1322 1323
	}

1324
	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1325
	if (!edesc) {
1326 1327
		if (iv_dma)
			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1328

1329
		dev_err(dev, "could not allocate edescriptor\n");
1330 1331 1332 1333 1334
		return ERR_PTR(-ENOMEM);
	}

	edesc->src_nents = src_nents;
	edesc->dst_nents = dst_nents;
1335 1336
	edesc->src_chained = src_chained;
	edesc->dst_chained = dst_chained;
1337
	edesc->iv_dma = iv_dma;
1338
	edesc->dma_len = dma_len;
1339 1340 1341 1342
	if (dma_len)
		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
						     edesc->dma_len,
						     DMA_BIDIRECTIONAL);
1343 1344 1345 1346

	return edesc;
}

1347
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1348
					      int icv_stashing, bool encrypt)
1349 1350
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1351
	unsigned int authsize = crypto_aead_authsize(authenc);
1352
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1353
	unsigned int ivsize = crypto_aead_ivsize(authenc);
1354

1355
	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1356
				   iv, areq->assoclen, areq->cryptlen,
1357
				   authsize, ivsize, icv_stashing,
1358
				   areq->base.flags, encrypt);
1359 1360
}

1361
static int aead_encrypt(struct aead_request *req)
1362 1363 1364
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1365
	struct talitos_edesc *edesc;
1366 1367

	/* allocate extended descriptor */
1368
	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1369 1370 1371 1372
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
1373
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1374

1375
	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1376 1377
}

1378
static int aead_decrypt(struct aead_request *req)
1379 1380
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1381
	unsigned int authsize = crypto_aead_authsize(authenc);
1382
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1383
	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1384
	struct talitos_edesc *edesc;
1385 1386 1387 1388 1389 1390
	struct scatterlist *sg;
	void *icvdata;

	req->cryptlen -= authsize;

	/* allocate extended descriptor */
1391
	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1392 1393 1394
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1395
	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1396 1397
	    ((!edesc->src_nents && !edesc->dst_nents) ||
	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1398

1399
		/* decrypt and check the ICV */
1400 1401
		edesc->desc.hdr = ctx->desc_hdr_template |
				  DESC_HDR_DIR_INBOUND |
1402
				  DESC_HDR_MODE1_MDEU_CICV;
1403

1404 1405
		/* reset integrity check result bits */
		edesc->desc.hdr_lo = 0;
1406

1407
		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1408
	}
1409

1410 1411
	/* Have to check the ICV with software */
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1412

1413 1414
	/* stash incoming ICV for later cmp with ICV generated by the h/w */
	if (edesc->dma_len)
1415 1416
		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
						   edesc->dst_nents + 2];
1417 1418
	else
		icvdata = &edesc->link_tbl[0];
1419

1420
	sg = sg_last(req->src, edesc->src_nents ? : 1);
1421

1422
	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1423

1424
	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1425 1426
}

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
			     const u8 *key, unsigned int keylen)
{
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);

	memcpy(&ctx->key, key, keylen);
	ctx->keylen = keylen;

	return 0;
}

1438 1439 1440 1441
static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
				 struct scatterlist *dst, unsigned int len,
				 struct talitos_edesc *edesc)
{
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);

	if (is_sec1) {
		if (!edesc->src_nents) {
			dma_unmap_sg(dev, src, 1,
				     dst != src ? DMA_TO_DEVICE
						: DMA_BIDIRECTIONAL);
		}
		if (dst && edesc->dst_nents) {
			dma_sync_single_for_device(dev,
						   edesc->dma_link_tbl + len,
						   len, DMA_FROM_DEVICE);
			sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
					    edesc->buf + len, len);
		} else if (dst && dst != src) {
			dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
		}
	} else {
		talitos_sg_unmap(dev, edesc, src, dst);
	}
1463 1464
}

1465 1466 1467 1468 1469
static void common_nonsnoop_unmap(struct device *dev,
				  struct talitos_edesc *edesc,
				  struct ablkcipher_request *areq)
{
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1470 1471

	unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);
}

static void ablkcipher_done(struct device *dev,
			    struct talitos_desc *desc, void *context,
			    int err)
{
	struct ablkcipher_request *areq = context;
1485 1486 1487
	struct talitos_edesc *edesc;

	edesc = container_of(desc, struct talitos_edesc, desc);
1488 1489 1490 1491 1492 1493 1494 1495

	common_nonsnoop_unmap(dev, edesc, areq);

	kfree(edesc);

	areq->base.complete(&areq->base, err);
}

1496 1497 1498 1499 1500
int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
			  unsigned int len, struct talitos_edesc *edesc,
			  enum dma_data_direction dir, struct talitos_ptr *ptr)
{
	int sg_count;
1501 1502
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1503

1504
	to_talitos_ptr_len(ptr, len, is_sec1);
1505

1506 1507
	if (is_sec1) {
		sg_count = edesc->src_nents ? : 1;
1508

1509 1510 1511
		if (sg_count == 1) {
			dma_map_sg(dev, src, 1, dir);
			to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1512
		} else {
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
			sg_copy_to_buffer(src, sg_count, edesc->buf, len);
			to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
						   len, DMA_TO_DEVICE);
		}
	} else {
		to_talitos_ptr_extent_clear(ptr, is_sec1);

		sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
					  edesc->src_chained);

		if (sg_count == 1) {
1525
			to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
		} else {
			sg_count = sg_to_link_tbl(src, sg_count, len,
						  &edesc->link_tbl[0]);
			if (sg_count > 1) {
				to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
				ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
				dma_sync_single_for_device(dev,
							   edesc->dma_link_tbl,
							   edesc->dma_len,
							   DMA_BIDIRECTIONAL);
			} else {
				/* Only one segment now, so no link tbl needed*/
				to_talitos_ptr(ptr, sg_dma_address(src),
					       is_sec1);
			}
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
		}
	}
	return sg_count;
}

void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
			    unsigned int len, struct talitos_edesc *edesc,
			    enum dma_data_direction dir,
			    struct talitos_ptr *ptr, int sg_count)
{
1551 1552 1553
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);

1554 1555 1556 1557
	if (dir != DMA_NONE)
		sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
					  dir, edesc->dst_chained);

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
	to_talitos_ptr_len(ptr, len, is_sec1);

	if (is_sec1) {
		if (sg_count == 1) {
			if (dir != DMA_NONE)
				dma_map_sg(dev, dst, 1, dir);
			to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
		} else {
			to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
			dma_sync_single_for_device(dev,
						   edesc->dma_link_tbl + len,
						   len, DMA_FROM_DEVICE);
		}
1571
	} else {
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
		to_talitos_ptr_extent_clear(ptr, is_sec1);

		if (sg_count == 1) {
			to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
		} else {
			struct talitos_ptr *link_tbl_ptr =
				&edesc->link_tbl[edesc->src_nents + 1];

			to_talitos_ptr(ptr, edesc->dma_link_tbl +
					    (edesc->src_nents + 1) *
					     sizeof(struct talitos_ptr), 0);
			ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1584
			sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1585 1586 1587 1588
			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
						   edesc->dma_len,
						   DMA_BIDIRECTIONAL);
		}
1589 1590 1591
	}
}

1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
static int common_nonsnoop(struct talitos_edesc *edesc,
			   struct ablkcipher_request *areq,
			   void (*callback) (struct device *dev,
					     struct talitos_desc *desc,
					     void *context, int error))
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
	unsigned int cryptlen = areq->nbytes;
1603
	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1604
	int sg_count, ret;
1605 1606
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1607 1608

	/* first DWORD empty */
1609
	desc->ptr[0] = zero_entry;
1610 1611

	/* cipher iv */
1612 1613 1614
	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
	to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
	to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1615 1616 1617

	/* cipher key */
	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1618
			       (char *)&ctx->key, DMA_TO_DEVICE);
1619 1620 1621 1622

	/*
	 * cipher in
	 */
1623 1624 1625 1626
	sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
					 (areq->src == areq->dst) ?
					  DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
					  &desc->ptr[3]);
1627 1628

	/* cipher out */
1629 1630 1631 1632
	map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
			       (areq->src == areq->dst) ? DMA_NONE
							: DMA_FROM_DEVICE,
			       &desc->ptr[4], sg_count);
1633 1634

	/* iv out */
1635
	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1636 1637 1638
			       DMA_FROM_DEVICE);

	/* last DWORD empty */
1639
	desc->ptr[6] = zero_entry;
1640

1641
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1642 1643 1644 1645 1646 1647 1648
	if (ret != -EINPROGRESS) {
		common_nonsnoop_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
}

1649
static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1650
						    areq, bool encrypt)
1651 1652 1653
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1654
	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1655

1656
	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1657
				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1658
				   areq->base.flags, encrypt);
1659 1660 1661 1662 1663 1664 1665 1666 1667
}

static int ablkcipher_encrypt(struct ablkcipher_request *areq)
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct talitos_edesc *edesc;

	/* allocate extended descriptor */
1668
	edesc = ablkcipher_edesc_alloc(areq, true);
1669 1670 1671 1672 1673 1674
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;

1675
	return common_nonsnoop(edesc, areq, ablkcipher_done);
1676 1677 1678 1679 1680 1681 1682 1683 1684
}

static int ablkcipher_decrypt(struct ablkcipher_request *areq)
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct talitos_edesc *edesc;

	/* allocate extended descriptor */
1685
	edesc = ablkcipher_edesc_alloc(areq, false);
1686 1687 1688 1689 1690
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;

1691
	return common_nonsnoop(edesc, areq, ablkcipher_done);
1692 1693
}

1694 1695 1696 1697 1698
static void common_nonsnoop_hash_unmap(struct device *dev,
				       struct talitos_edesc *edesc,
				       struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1699 1700
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1701 1702 1703

	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);

1704 1705
	unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);

1706
	/* When using hashctx-in, must unmap it. */
1707
	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1708 1709 1710
		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
					 DMA_TO_DEVICE);

1711
	if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
					 DMA_TO_DEVICE);

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);

}

static void ahash_done(struct device *dev,
		       struct talitos_desc *desc, void *context,
		       int err)
{
	struct ahash_request *areq = context;
	struct talitos_edesc *edesc =
		 container_of(desc, struct talitos_edesc, desc);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	if (!req_ctx->last && req_ctx->to_hash_later) {
		/* Position any partial block for next update/final/finup */
		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1733
		req_ctx->nbuf = req_ctx->to_hash_later;
1734 1735 1736 1737 1738 1739 1740 1741
	}
	common_nonsnoop_hash_unmap(dev, edesc, areq);

	kfree(edesc);

	areq->base.complete(&areq->base, err);
}

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
/*
 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
 * ourself and submit a padded block
 */
void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
			       struct talitos_edesc *edesc,
			       struct talitos_ptr *ptr)
{
	static u8 padded_hash[64] = {
		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	};

	pr_err_once("Bug in SEC1, padding ourself\n");
	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
			       (char *)padded_hash, DMA_TO_DEVICE);
}

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
				struct ahash_request *areq, unsigned int length,
				void (*callback) (struct device *dev,
						  struct talitos_desc *desc,
						  void *context, int error))
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
1774
	int ret;
1775 1776
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1777 1778 1779 1780

	/* first DWORD empty */
	desc->ptr[0] = zero_entry;

1781 1782
	/* hash context in */
	if (!req_ctx->first || req_ctx->swinit) {
1783 1784
		map_single_talitos_ptr(dev, &desc->ptr[1],
				       req_ctx->hw_context_size,
1785
				       (char *)req_ctx->hw_context,
1786
				       DMA_TO_DEVICE);
1787
		req_ctx->swinit = 0;
1788 1789 1790 1791 1792 1793 1794 1795 1796
	} else {
		desc->ptr[1] = zero_entry;
		/* Indicate next op is not the first. */
		req_ctx->first = 0;
	}

	/* HMAC key */
	if (ctx->keylen)
		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1797
				       (char *)&ctx->key, DMA_TO_DEVICE);
1798 1799 1800 1801 1802 1803
	else
		desc->ptr[2] = zero_entry;

	/*
	 * data in
	 */
1804 1805
	map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
			      DMA_TO_DEVICE, &desc->ptr[3]);
1806 1807 1808 1809 1810 1811 1812 1813

	/* fifth DWORD empty */
	desc->ptr[4] = zero_entry;

	/* hash/HMAC out -or- hash context out */
	if (req_ctx->last)
		map_single_talitos_ptr(dev, &desc->ptr[5],
				       crypto_ahash_digestsize(tfm),
1814
				       areq->result, DMA_FROM_DEVICE);
1815 1816 1817
	else
		map_single_talitos_ptr(dev, &desc->ptr[5],
				       req_ctx->hw_context_size,
1818
				       req_ctx->hw_context, DMA_FROM_DEVICE);
1819 1820 1821 1822

	/* last DWORD empty */
	desc->ptr[6] = zero_entry;

1823 1824 1825
	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);

1826
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
	if (ret != -EINPROGRESS) {
		common_nonsnoop_hash_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
}

static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
					       unsigned int nbytes)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

1841
	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1842
				   nbytes, 0, 0, 0, areq->base.flags, false);
1843 1844 1845 1846 1847 1848 1849 1850
}

static int ahash_init(struct ahash_request *areq)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	/* Initialize the context */
1851
	req_ctx->nbuf = 0;
1852 1853
	req_ctx->first = 1; /* first indicates h/w must init its context */
	req_ctx->swinit = 0; /* assume h/w init of context */
1854 1855 1856 1857 1858 1859 1860 1861
	req_ctx->hw_context_size =
		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;

	return 0;
}

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
/*
 * on h/w without explicit sha224 support, we initialize h/w context
 * manually with sha224 constants, and tell it to run sha256.
 */
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	ahash_init(areq);
	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/

1873 1874 1875 1876 1877 1878 1879 1880
	req_ctx->hw_context[0] = SHA224_H0;
	req_ctx->hw_context[1] = SHA224_H1;
	req_ctx->hw_context[2] = SHA224_H2;
	req_ctx->hw_context[3] = SHA224_H3;
	req_ctx->hw_context[4] = SHA224_H4;
	req_ctx->hw_context[5] = SHA224_H5;
	req_ctx->hw_context[6] = SHA224_H6;
	req_ctx->hw_context[7] = SHA224_H7;
1881 1882 1883 1884 1885 1886 1887 1888

	/* init 64-bit count */
	req_ctx->hw_context[8] = 0;
	req_ctx->hw_context[9] = 0;

	return 0;
}

1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
	struct talitos_edesc *edesc;
	unsigned int blocksize =
			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	unsigned int nbytes_to_hash;
	unsigned int to_hash_later;
1899
	unsigned int nsg;
1900
	bool chained;
1901

1902 1903
	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
		/* Buffer up to one whole block */
1904 1905
		sg_copy_to_buffer(areq->src,
				  sg_count(areq->src, nbytes, &chained),
1906 1907
				  req_ctx->buf + req_ctx->nbuf, nbytes);
		req_ctx->nbuf += nbytes;
1908 1909 1910
		return 0;
	}

1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
	/* At least (blocksize + 1) bytes are available to hash */
	nbytes_to_hash = nbytes + req_ctx->nbuf;
	to_hash_later = nbytes_to_hash & (blocksize - 1);

	if (req_ctx->last)
		to_hash_later = 0;
	else if (to_hash_later)
		/* There is a partial block. Hash the full block(s) now */
		nbytes_to_hash -= to_hash_later;
	else {
		/* Keep one block buffered */
		nbytes_to_hash -= blocksize;
		to_hash_later = blocksize;
	}

	/* Chain in any previously buffered data */
	if (req_ctx->nbuf) {
		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
		sg_init_table(req_ctx->bufsl, nsg);
		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
		if (nsg > 1)
			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1933
		req_ctx->psrc = req_ctx->bufsl;
1934
	} else
1935
		req_ctx->psrc = areq->src;
1936 1937 1938

	if (to_hash_later) {
		int nents = sg_count(areq->src, nbytes, &chained);
1939
		sg_pcopy_to_buffer(areq->src, nents,
1940 1941 1942
				      req_ctx->bufnext,
				      to_hash_later,
				      nbytes - to_hash_later);
1943
	}
1944
	req_ctx->to_hash_later = to_hash_later;
1945

1946
	/* Allocate extended descriptor */
1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	edesc->desc.hdr = ctx->desc_hdr_template;

	/* On last one, request SEC to pad; otherwise continue */
	if (req_ctx->last)
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
	else
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;

1959 1960
	/* request SEC to INIT hash. */
	if (req_ctx->first && !req_ctx->swinit)
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;

	/* When the tfm context has a keylen, it's an HMAC.
	 * A first or last (ie. not middle) descriptor must request HMAC.
	 */
	if (ctx->keylen && (req_ctx->first || req_ctx->last))
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;

	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
				    ahash_done);
}

static int ahash_update(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 0;

	return ahash_process_req(areq, areq->nbytes);
}

static int ahash_final(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 1;

	return ahash_process_req(areq, 0);
}

static int ahash_finup(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 1;

	return ahash_process_req(areq, areq->nbytes);
}

static int ahash_digest(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2003
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2004

2005
	ahash->init(areq);
2006 2007 2008 2009 2010
	req_ctx->last = 1;

	return ahash_process_req(areq, areq->nbytes);
}

L
Lee Nipper 已提交
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
struct keyhash_result {
	struct completion completion;
	int err;
};

static void keyhash_complete(struct crypto_async_request *req, int err)
{
	struct keyhash_result *res = req->data;

	if (err == -EINPROGRESS)
		return;

	res->err = err;
	complete(&res->completion);
}

static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
		   u8 *hash)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));

	struct scatterlist sg[1];
	struct ahash_request *req;
	struct keyhash_result hresult;
	int ret;

	init_completion(&hresult.completion);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	/* Keep tfm keylen == 0 during hash of the long key */
	ctx->keylen = 0;
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   keyhash_complete, &hresult);

	sg_init_one(&sg[0], key, keylen);

	ahash_request_set_crypt(req, sg, hash, keylen);
	ret = crypto_ahash_digest(req);
	switch (ret) {
	case 0:
		break;
	case -EINPROGRESS:
	case -EBUSY:
		ret = wait_for_completion_interruptible(
			&hresult.completion);
		if (!ret)
			ret = hresult.err;
		break;
	default:
		break;
	}
	ahash_request_free(req);

	return ret;
}

static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
			unsigned int keylen)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
	unsigned int blocksize =
			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	unsigned int digestsize = crypto_ahash_digestsize(tfm);
	unsigned int keysize = keylen;
	u8 hash[SHA512_DIGEST_SIZE];
	int ret;

	if (keylen <= blocksize)
		memcpy(ctx->key, key, keysize);
	else {
		/* Must get the hash of the long key */
		ret = keyhash(tfm, key, keylen, hash);

		if (ret) {
			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
			return -EINVAL;
		}

		keysize = digestsize;
		memcpy(ctx->key, hash, digestsize);
	}

	ctx->keylen = keysize;

	return 0;
}


2102
struct talitos_alg_template {
2103 2104 2105
	u32 type;
	union {
		struct crypto_alg crypto;
2106
		struct ahash_alg hash;
2107
		struct aead_alg aead;
2108
	} alg;
2109 2110 2111 2112
	__be32 desc_hdr_template;
};

static struct talitos_alg_template driver_algs[] = {
2113
	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2114
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha1),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha1-"
						   "cbc-aes-talitos",
				.cra_blocksize = AES_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
2125
		},
2126 2127 2128 2129 2130 2131 2132
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2133
	},
2134
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha1),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha1-"
						   "cbc-3des-talitos",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
2146
		},
2147 2148 2149 2150 2151 2152 2153 2154
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2155
	},
2156
	{       .type = CRYPTO_ALG_TYPE_AEAD,
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha224),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha224-"
						   "cbc-aes-talitos",
				.cra_blocksize = AES_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
				     DESC_HDR_SEL0_AESU |
				     DESC_HDR_MODE0_AESU_CBC |
				     DESC_HDR_SEL1_MDEUA |
				     DESC_HDR_MODE1_MDEU_INIT |
				     DESC_HDR_MODE1_MDEU_PAD |
				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha224),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha224-"
						   "cbc-3des-talitos",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
	},
2198
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2199 2200 2201 2202 2203 2204 2205 2206 2207 2208
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha256),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha256-"
						   "cbc-aes-talitos",
				.cra_blocksize = AES_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
2209
		},
2210 2211 2212 2213 2214 2215 2216 2217
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
	},
2218
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha256),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha256-"
						   "cbc-3des-talitos",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
2230
		},
2231 2232 2233 2234 2235 2236 2237 2238 2239
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
	},
2240
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha384),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha384-"
						   "cbc-aes-talitos",
				.cra_blocksize = AES_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha384),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha384-"
						   "cbc-3des-talitos",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha512),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha512-"
						   "cbc-aes-talitos",
				.cra_blocksize = AES_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha512),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha512-"
						   "cbc-3des-talitos",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(md5),cbc(aes))",
				.cra_driver_name = "authenc-hmac-md5-"
						   "cbc-aes-talitos",
				.cra_blocksize = AES_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
2335
		},
2336 2337 2338 2339 2340 2341 2342 2343
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
	},
2344
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
		.alg.aead = {
			.base = {
				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-md5-"
						   "cbc-3des-talitos",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_ASYNC,
			},
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
2355
		},
2356 2357 2358 2359 2360 2361 2362 2363
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2364 2365
	},
	/* ABLKCIPHER algorithms. */
2366 2367
	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.alg.crypto = {
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
			.cra_name = "cbc(aes)",
			.cra_driver_name = "cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
			.cra_ablkcipher = {
				.min_keysize = AES_MIN_KEY_SIZE,
				.max_keysize = AES_MAX_KEY_SIZE,
				.ivsize = AES_BLOCK_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_AESU |
				     DESC_HDR_MODE0_AESU_CBC,
	},
2383 2384
	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.alg.crypto = {
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399
			.cra_name = "cbc(des3_ede)",
			.cra_driver_name = "cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
			.cra_ablkcipher = {
				.min_keysize = DES3_EDE_KEY_SIZE,
				.max_keysize = DES3_EDE_KEY_SIZE,
				.ivsize = DES3_EDE_BLOCK_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES,
2400 2401 2402 2403 2404 2405 2406 2407
	},
	/* AHASH algorithms. */
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = MD5_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "md5",
				.cra_driver_name = "md5-talitos",
2408
				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_MD5,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA1_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha1",
				.cra_driver_name = "sha1-talitos",
				.cra_blocksize = SHA1_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA1,
	},
2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA224_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha224",
				.cra_driver_name = "sha224-talitos",
				.cra_blocksize = SHA224_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA224,
	},
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA256_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha256",
				.cra_driver_name = "sha256-talitos",
				.cra_blocksize = SHA256_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA256,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA384_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha384",
				.cra_driver_name = "sha384-talitos",
				.cra_blocksize = SHA384_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA384,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA512_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha512",
				.cra_driver_name = "sha512-talitos",
				.cra_blocksize = SHA512_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA512,
	},
L
Lee Nipper 已提交
2492 2493 2494 2495 2496 2497
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = MD5_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(md5)",
				.cra_driver_name = "hmac-md5-talitos",
2498
				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
L
Lee Nipper 已提交
2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_MD5,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA1_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha1)",
				.cra_driver_name = "hmac-sha1-talitos",
				.cra_blocksize = SHA1_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA1,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA224_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha224)",
				.cra_driver_name = "hmac-sha224-talitos",
				.cra_blocksize = SHA224_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA224,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA256_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha256)",
				.cra_driver_name = "hmac-sha256-talitos",
				.cra_blocksize = SHA256_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA256,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA384_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha384)",
				.cra_driver_name = "hmac-sha384-talitos",
				.cra_blocksize = SHA384_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA384,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA512_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha512)",
				.cra_driver_name = "hmac-sha512-talitos",
				.cra_blocksize = SHA512_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA512,
	}
2582 2583 2584 2585 2586
};

struct talitos_crypto_alg {
	struct list_head entry;
	struct device *dev;
2587
	struct talitos_alg_template algt;
2588 2589 2590 2591 2592
};

static int talitos_cra_init(struct crypto_tfm *tfm)
{
	struct crypto_alg *alg = tfm->__crt_alg;
2593
	struct talitos_crypto_alg *talitos_alg;
2594
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2595
	struct talitos_private *priv;
2596

2597 2598 2599 2600 2601 2602 2603
	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
		talitos_alg = container_of(__crypto_ahash_alg(alg),
					   struct talitos_crypto_alg,
					   algt.alg.hash);
	else
		talitos_alg = container_of(alg, struct talitos_crypto_alg,
					   algt.alg.crypto);
2604

2605 2606
	/* update context with ptr to dev */
	ctx->dev = talitos_alg->dev;
2607

2608 2609 2610 2611 2612
	/* assign SEC channel to tfm in round-robin fashion */
	priv = dev_get_drvdata(ctx->dev);
	ctx->ch = atomic_inc_return(&priv->last_chan) &
		  (priv->num_channels - 1);

2613
	/* copy descriptor header template value */
2614
	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2615

2616 2617 2618
	/* select done notification */
	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;

2619 2620 2621
	return 0;
}

2622
static int talitos_cra_init_aead(struct crypto_aead *tfm)
2623
{
2624
	talitos_cra_init(crypto_aead_tfm(tfm));
2625 2626 2627
	return 0;
}

2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);

	talitos_cra_init(tfm);

	ctx->keylen = 0;
	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(struct talitos_ahash_req_ctx));

	return 0;
}

2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
/*
 * given the alg's descriptor header template, determine whether descriptor
 * type and primary/secondary execution units required match the hw
 * capabilities description provided in the device tree node.
 */
static int hw_supports(struct device *dev, __be32 desc_hdr_template)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int ret;

	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);

	if (SECONDARY_EU(desc_hdr_template))
		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
		              & priv->exec_units);

	return ret;
}

2661
static int talitos_remove(struct platform_device *ofdev)
2662 2663 2664 2665 2666 2667 2668
{
	struct device *dev = &ofdev->dev;
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_crypto_alg *t_alg, *n;
	int i;

	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2669 2670 2671
		switch (t_alg->algt.type) {
		case CRYPTO_ALG_TYPE_ABLKCIPHER:
			break;
2672 2673
		case CRYPTO_ALG_TYPE_AEAD:
			crypto_unregister_aead(&t_alg->algt.alg.aead);
2674 2675 2676 2677
		case CRYPTO_ALG_TYPE_AHASH:
			crypto_unregister_ahash(&t_alg->algt.alg.hash);
			break;
		}
2678 2679 2680 2681 2682 2683 2684
		list_del(&t_alg->entry);
		kfree(t_alg);
	}

	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
		talitos_unregister_rng(dev);

2685
	for (i = 0; priv->chan && i < priv->num_channels; i++)
2686
		kfree(priv->chan[i].fifo);
2687

2688
	kfree(priv->chan);
2689

2690
	for (i = 0; i < 2; i++)
2691
		if (priv->irq[i]) {
2692 2693 2694
			free_irq(priv->irq[i], dev);
			irq_dispose_mapping(priv->irq[i]);
		}
2695

2696
	tasklet_kill(&priv->done_task[0]);
2697
	if (priv->irq[1])
2698
		tasklet_kill(&priv->done_task[1]);
2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710

	iounmap(priv->reg);

	kfree(priv);

	return 0;
}

static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
						    struct talitos_alg_template
						           *template)
{
2711
	struct talitos_private *priv = dev_get_drvdata(dev);
2712 2713 2714 2715 2716 2717 2718
	struct talitos_crypto_alg *t_alg;
	struct crypto_alg *alg;

	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
	if (!t_alg)
		return ERR_PTR(-ENOMEM);

2719 2720 2721 2722
	t_alg->algt = *template;

	switch (t_alg->algt.type) {
	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2723 2724
		alg = &t_alg->algt.alg.crypto;
		alg->cra_init = talitos_cra_init;
2725
		alg->cra_type = &crypto_ablkcipher_type;
2726 2727 2728 2729
		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
		alg->cra_ablkcipher.geniv = "eseqiv";
2730
		break;
2731
	case CRYPTO_ALG_TYPE_AEAD:
2732 2733 2734 2735 2736 2737
		alg = &t_alg->algt.alg.aead.base;
		alg->cra_flags |= CRYPTO_ALG_AEAD_NEW;
		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
		t_alg->algt.alg.aead.setkey = aead_setkey;
		t_alg->algt.alg.aead.encrypt = aead_encrypt;
		t_alg->algt.alg.aead.decrypt = aead_decrypt;
2738 2739 2740
		break;
	case CRYPTO_ALG_TYPE_AHASH:
		alg = &t_alg->algt.alg.hash.halg.base;
2741
		alg->cra_init = talitos_cra_init_ahash;
2742
		alg->cra_type = &crypto_ahash_type;
2743 2744 2745 2746 2747 2748 2749
		t_alg->algt.alg.hash.init = ahash_init;
		t_alg->algt.alg.hash.update = ahash_update;
		t_alg->algt.alg.hash.final = ahash_final;
		t_alg->algt.alg.hash.finup = ahash_finup;
		t_alg->algt.alg.hash.digest = ahash_digest;
		t_alg->algt.alg.hash.setkey = ahash_setkey;

L
Lee Nipper 已提交
2750
		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
K
Kim Phillips 已提交
2751 2752
		    !strncmp(alg->cra_name, "hmac", 4)) {
			kfree(t_alg);
L
Lee Nipper 已提交
2753
			return ERR_PTR(-ENOTSUPP);
K
Kim Phillips 已提交
2754
		}
2755
		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
L
Lee Nipper 已提交
2756 2757
		    (!strcmp(alg->cra_name, "sha224") ||
		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2758 2759 2760 2761 2762 2763
			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
			t_alg->algt.desc_hdr_template =
					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
					DESC_HDR_SEL0_MDEUA |
					DESC_HDR_MODE0_MDEU_SHA256;
		}
2764
		break;
2765 2766
	default:
		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2767
		kfree(t_alg);
2768
		return ERR_PTR(-EINVAL);
2769
	}
2770 2771 2772 2773 2774

	alg->cra_module = THIS_MODULE;
	alg->cra_priority = TALITOS_CRA_PRIORITY;
	alg->cra_alignmask = 0;
	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2775
	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2776 2777 2778 2779 2780 2781

	t_alg->dev = dev;

	return t_alg;
}

2782 2783 2784 2785 2786 2787
static int talitos_probe_irq(struct platform_device *ofdev)
{
	struct device *dev = &ofdev->dev;
	struct device_node *np = ofdev->dev.of_node;
	struct talitos_private *priv = dev_get_drvdata(dev);
	int err;
2788
	bool is_sec1 = has_ftr_sec1(priv);
2789 2790

	priv->irq[0] = irq_of_parse_and_map(np, 0);
2791
	if (!priv->irq[0]) {
2792 2793 2794
		dev_err(dev, "failed to map irq\n");
		return -EINVAL;
	}
2795 2796 2797 2798 2799
	if (is_sec1) {
		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
				  dev_driver_string(dev), dev);
		goto primary_out;
	}
2800 2801 2802 2803

	priv->irq[1] = irq_of_parse_and_map(np, 1);

	/* get the primary irq line */
2804
	if (!priv->irq[1]) {
2805
		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2806 2807 2808 2809
				  dev_driver_string(dev), dev);
		goto primary_out;
	}

2810
	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2811 2812 2813 2814 2815
			  dev_driver_string(dev), dev);
	if (err)
		goto primary_out;

	/* get the secondary irq line */
2816
	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2817 2818 2819 2820
			  dev_driver_string(dev), dev);
	if (err) {
		dev_err(dev, "failed to request secondary irq\n");
		irq_dispose_mapping(priv->irq[1]);
2821
		priv->irq[1] = 0;
2822 2823 2824 2825 2826 2827 2828 2829
	}

	return err;

primary_out:
	if (err) {
		dev_err(dev, "failed to request primary irq\n");
		irq_dispose_mapping(priv->irq[0]);
2830
		priv->irq[0] = 0;
2831 2832 2833 2834 2835
	}

	return err;
}

2836
static int talitos_probe(struct platform_device *ofdev)
2837 2838
{
	struct device *dev = &ofdev->dev;
2839
	struct device_node *np = ofdev->dev.of_node;
2840 2841 2842
	struct talitos_private *priv;
	const unsigned int *prop;
	int i, err;
2843
	int stride;
2844 2845 2846 2847 2848

	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

2849 2850
	INIT_LIST_HEAD(&priv->alg_list);

2851 2852 2853 2854
	dev_set_drvdata(dev, priv);

	priv->ofdev = ofdev;

2855 2856
	spin_lock_init(&priv->reg_lock);

2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
	priv->reg = of_iomap(np, 0);
	if (!priv->reg) {
		dev_err(dev, "failed to of_iomap\n");
		err = -ENOMEM;
		goto err_out;
	}

	/* get SEC version capabilities from device tree */
	prop = of_get_property(np, "fsl,num-channels", NULL);
	if (prop)
		priv->num_channels = *prop;

	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
	if (prop)
		priv->chfifo_len = *prop;

	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
	if (prop)
		priv->exec_units = *prop;

	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
	if (prop)
		priv->desc_types = *prop;

	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
	    !priv->exec_units || !priv->desc_types) {
		dev_err(dev, "invalid property data in device tree node\n");
		err = -EINVAL;
		goto err_out;
	}

2888 2889 2890
	if (of_device_is_compatible(np, "fsl,sec3.0"))
		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;

2891
	if (of_device_is_compatible(np, "fsl,sec2.1"))
2892
		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
L
Lee Nipper 已提交
2893 2894
				  TALITOS_FTR_SHA224_HWINIT |
				  TALITOS_FTR_HMAC_OK;
2895

2896 2897 2898
	if (of_device_is_compatible(np, "fsl,sec1.0"))
		priv->features |= TALITOS_FTR_SEC1;

2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
	if (of_device_is_compatible(np, "fsl,sec1.2")) {
		priv->reg_deu = priv->reg + TALITOS12_DEU;
		priv->reg_aesu = priv->reg + TALITOS12_AESU;
		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
		stride = TALITOS1_CH_STRIDE;
	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
		priv->reg_deu = priv->reg + TALITOS10_DEU;
		priv->reg_aesu = priv->reg + TALITOS10_AESU;
		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
		stride = TALITOS1_CH_STRIDE;
	} else {
		priv->reg_deu = priv->reg + TALITOS2_DEU;
		priv->reg_aesu = priv->reg + TALITOS2_AESU;
		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
		priv->reg_keu = priv->reg + TALITOS2_KEU;
		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
		stride = TALITOS2_CH_STRIDE;
	}

2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
	err = talitos_probe_irq(ofdev);
	if (err)
		goto err_out;

	if (of_device_is_compatible(np, "fsl,sec1.0")) {
		tasklet_init(&priv->done_task[0], talitos1_done_4ch,
			     (unsigned long)dev);
	} else {
		if (!priv->irq[1]) {
			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
				     (unsigned long)dev);
		} else {
			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
				     (unsigned long)dev);
			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
				     (unsigned long)dev);
		}
	}

2943 2944 2945 2946
	priv->chan = kzalloc(sizeof(struct talitos_channel) *
			     priv->num_channels, GFP_KERNEL);
	if (!priv->chan) {
		dev_err(dev, "failed to allocate channel management space\n");
2947 2948 2949 2950
		err = -ENOMEM;
		goto err_out;
	}

2951 2952
	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);

2953
	for (i = 0; i < priv->num_channels; i++) {
2954
		priv->chan[i].reg = priv->reg + stride * (i + 1);
2955
		if (!priv->irq[1] || !(i & 1))
2956
			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2957

2958 2959
		spin_lock_init(&priv->chan[i].head_lock);
		spin_lock_init(&priv->chan[i].tail_lock);
2960

2961 2962 2963
		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
					     priv->fifo_len, GFP_KERNEL);
		if (!priv->chan[i].fifo) {
2964 2965 2966 2967 2968
			dev_err(dev, "failed to allocate request fifo %d\n", i);
			err = -ENOMEM;
			goto err_out;
		}

2969 2970
		atomic_set(&priv->chan[i].submit_count,
			   -(priv->chfifo_len - 1));
2971
	}
2972

2973 2974
	dma_set_mask(dev, DMA_BIT_MASK(36));

2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
	/* reset and initialize the h/w */
	err = init_device(dev);
	if (err) {
		dev_err(dev, "failed to initialize device\n");
		goto err_out;
	}

	/* register the RNG, if available */
	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
		err = talitos_register_rng(dev);
		if (err) {
			dev_err(dev, "failed to register hwrng: %d\n", err);
			goto err_out;
		} else
			dev_info(dev, "hwrng\n");
	}

	/* register crypto algorithms the device supports */
	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
			struct talitos_crypto_alg *t_alg;
2996
			struct crypto_alg *alg = NULL;
2997 2998 2999 3000

			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
			if (IS_ERR(t_alg)) {
				err = PTR_ERR(t_alg);
K
Kim Phillips 已提交
3001
				if (err == -ENOTSUPP)
L
Lee Nipper 已提交
3002
					continue;
3003 3004 3005
				goto err_out;
			}

3006 3007 3008 3009
			switch (t_alg->algt.type) {
			case CRYPTO_ALG_TYPE_ABLKCIPHER:
				err = crypto_register_alg(
						&t_alg->algt.alg.crypto);
3010
				alg = &t_alg->algt.alg.crypto;
3011
				break;
3012 3013 3014 3015 3016 3017 3018

			case CRYPTO_ALG_TYPE_AEAD:
				err = crypto_register_aead(
					&t_alg->algt.alg.aead);
				alg = &t_alg->algt.alg.aead.base;
				break;

3019 3020 3021
			case CRYPTO_ALG_TYPE_AHASH:
				err = crypto_register_ahash(
						&t_alg->algt.alg.hash);
3022
				alg = &t_alg->algt.alg.hash.halg.base;
3023 3024
				break;
			}
3025 3026
			if (err) {
				dev_err(dev, "%s alg registration failed\n",
3027
					alg->cra_driver_name);
3028
				kfree(t_alg);
3029
			} else
3030 3031 3032
				list_add_tail(&t_alg->entry, &priv->alg_list);
		}
	}
3033 3034 3035
	if (!list_empty(&priv->alg_list))
		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
			 (char *)of_get_property(np, "compatible", NULL));
3036 3037 3038 3039 3040 3041 3042 3043 3044

	return 0;

err_out:
	talitos_remove(ofdev);

	return err;
}

3045
static const struct of_device_id talitos_match[] = {
3046 3047 3048 3049 3050 3051
#ifdef CONFIG_CRYPTO_DEV_TALITOS1
	{
		.compatible = "fsl,sec1.0",
	},
#endif
#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3052 3053 3054
	{
		.compatible = "fsl,sec2.0",
	},
3055
#endif
3056 3057 3058 3059
	{},
};
MODULE_DEVICE_TABLE(of, talitos_match);

3060
static struct platform_driver talitos_driver = {
3061 3062 3063 3064
	.driver = {
		.name = "talitos",
		.of_match_table = talitos_match,
	},
3065
	.probe = talitos_probe,
A
Al Viro 已提交
3066
	.remove = talitos_remove,
3067 3068
};

3069
module_platform_driver(talitos_driver);
3070 3071 3072 3073

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");