talitos.c 78.7 KB
Newer Older
1 2 3
/*
 * talitos - Freescale Integrated Security Engine (SEC) device driver
 *
4
 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Scatterlist Crypto API glue code copied from files with the following:
 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * Crypto algorithm registration code copied from hifn driver:
 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
35 36
#include <linux/of_address.h>
#include <linux/of_irq.h>
37 38 39 40 41
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
42
#include <linux/slab.h>
43 44 45

#include <crypto/algapi.h>
#include <crypto/aes.h>
46
#include <crypto/des.h>
47
#include <crypto/sha.h>
48
#include <crypto/md5.h>
49 50
#include <crypto/aead.h>
#include <crypto/authenc.h>
51
#include <crypto/skcipher.h>
52 53
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
54
#include <crypto/scatterwalk.h>
55 56 57

#include "talitos.h"

58
static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr)
59
{
60 61
	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
	ptr->eptr = upper_32_bits(dma_addr);
62 63
}

64 65 66 67 68
static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr)
{
	ptr->j_extent = 0;
}

69 70 71 72
/*
 * map virtual single (contiguous) pointer to h/w descriptor pointer
 */
static void map_single_talitos_ptr(struct device *dev,
73
				   struct talitos_ptr *ptr,
74 75 76
				   unsigned short len, void *data,
				   enum dma_data_direction dir)
{
77 78
	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);

79 80
	ptr->len = cpu_to_be16(len);
	to_talitos_ptr(ptr, dma_addr);
81
	to_talitos_ptr_extent_clear(ptr);
82 83 84 85 86 87
}

/*
 * unmap bus single (contiguous) h/w descriptor pointer
 */
static void unmap_single_talitos_ptr(struct device *dev,
88
				     struct talitos_ptr *ptr,
89 90
				     enum dma_data_direction dir)
{
91 92
	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
			 be16_to_cpu(ptr->len), dir);
93 94 95 96 97 98 99
}

static int reset_channel(struct device *dev, int ch)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;

100
	setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
101

102
	while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
103 104 105 106 107 108 109 110
	       && --timeout)
		cpu_relax();

	if (timeout == 0) {
		dev_err(dev, "failed to reset channel %d\n", ch);
		return -EIO;
	}

111
	/* set 36-bit addressing, done writeback enable and done IRQ enable */
112
	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
113
		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
114

115 116
	/* and ICCR writeback, if available */
	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
117
		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
118 119
		          TALITOS_CCCR_LO_IWSE);

120 121 122 123 124 125 126
	return 0;
}

static int reset_device(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;
127
	u32 mcr = TALITOS_MCR_SWR;
128

129
	setbits32(priv->reg + TALITOS_MCR, mcr);
130 131 132 133 134

	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
	       && --timeout)
		cpu_relax();

135
	if (priv->irq[1]) {
136 137 138 139
		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
		setbits32(priv->reg + TALITOS_MCR, mcr);
	}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
	if (timeout == 0) {
		dev_err(dev, "failed to reset device\n");
		return -EIO;
	}

	return 0;
}

/*
 * Reset and initialize the device
 */
static int init_device(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int ch, err;

	/*
	 * Master reset
	 * errata documentation: warning: certain SEC interrupts
	 * are not fully cleared by writing the MCR:SWR bit,
	 * set bit twice to completely reset
	 */
	err = reset_device(dev);
	if (err)
		return err;

	err = reset_device(dev);
	if (err)
		return err;

	/* reset channels */
	for (ch = 0; ch < priv->num_channels; ch++) {
		err = reset_channel(dev, ch);
		if (err)
			return err;
	}

	/* enable channel done and error interrupts */
	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);

181 182 183 184 185
	/* disable integrity check error interrupts (use writeback instead) */
	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
		setbits32(priv->reg + TALITOS_MDEUICR_LO,
		          TALITOS_MDEUICR_LO_ICE);

186 187 188 189 190 191
	return 0;
}

/**
 * talitos_submit - submits a descriptor to the device for processing
 * @dev:	the SEC device to be used
192
 * @ch:		the SEC device channel to be used
193 194 195 196 197 198 199 200
 * @desc:	the descriptor to be processed by the device
 * @callback:	whom to call when processing is complete
 * @context:	a handle for use by caller (optional)
 *
 * desc must contain valid dma-mapped (bus physical) address pointers.
 * callback must check err and feedback in descriptor header
 * for device processing status.
 */
201 202 203 204 205
int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
		   void (*callback)(struct device *dev,
				    struct talitos_desc *desc,
				    void *context, int error),
		   void *context)
206 207 208
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_request *request;
209
	unsigned long flags;
210 211
	int head;

212
	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
213

214
	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
215
		/* h/w fifo is full */
216
		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
217 218 219
		return -EAGAIN;
	}

220 221
	head = priv->chan[ch].head;
	request = &priv->chan[ch].fifo[head];
222

223 224 225 226 227 228 229
	/* map descriptor and save caller data */
	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
					   DMA_BIDIRECTIONAL);
	request->callback = callback;
	request->context = context;

	/* increment fifo head */
230
	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
231 232 233 234 235 236

	smp_wmb();
	request->desc = desc;

	/* GO! */
	wmb();
237 238 239
	out_be32(priv->chan[ch].reg + TALITOS_FF,
		 upper_32_bits(request->dma_desc));
	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
240
		 lower_32_bits(request->dma_desc));
241

242
	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
243 244 245

	return -EINPROGRESS;
}
246
EXPORT_SYMBOL(talitos_submit);
247 248 249 250 251 252 253 254 255 256 257

/*
 * process what was done, notify callback of error if not
 */
static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_request *request, saved_req;
	unsigned long flags;
	int tail, status;

258
	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
259

260 261 262
	tail = priv->chan[ch].tail;
	while (priv->chan[ch].fifo[tail].desc) {
		request = &priv->chan[ch].fifo[tail];
263 264 265

		/* descriptors with their done bits set don't get the error */
		rmb();
266
		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
267
			status = 0;
268
		else
269 270 271 272 273 274
			if (!error)
				break;
			else
				status = error;

		dma_unmap_single(dev, request->dma_desc,
275 276
				 sizeof(struct talitos_desc),
				 DMA_BIDIRECTIONAL);
277 278 279 280 281 282 283 284 285 286 287

		/* copy entries so we can call callback outside lock */
		saved_req.desc = request->desc;
		saved_req.callback = request->callback;
		saved_req.context = request->context;

		/* release request entry in fifo */
		smp_wmb();
		request->desc = NULL;

		/* increment fifo tail */
288
		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
289

290
		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
291

292
		atomic_dec(&priv->chan[ch].submit_count);
293

294 295 296 297 298
		saved_req.callback(dev, saved_req.desc, saved_req.context,
				   status);
		/* channel may resume processing in single desc error case */
		if (error && !reset_ch && status == error)
			return;
299 300
		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
		tail = priv->chan[ch].tail;
301 302
	}

303
	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
304 305 306 307 308
}

/*
 * process completed requests for channels that have done status
 */
309 310 311 312 313
#define DEF_TALITOS_DONE(name, ch_done_mask)				\
static void talitos_done_##name(unsigned long data)			\
{									\
	struct device *dev = (struct device *)data;			\
	struct talitos_private *priv = dev_get_drvdata(dev);		\
314
	unsigned long flags;						\
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
									\
	if (ch_done_mask & 1)						\
		flush_channel(dev, 0, 0, 0);				\
	if (priv->num_channels == 1)					\
		goto out;						\
	if (ch_done_mask & (1 << 2))					\
		flush_channel(dev, 1, 0, 0);				\
	if (ch_done_mask & (1 << 4))					\
		flush_channel(dev, 2, 0, 0);				\
	if (ch_done_mask & (1 << 6))					\
		flush_channel(dev, 3, 0, 0);				\
									\
out:									\
	/* At this point, all completed channels have been processed */	\
	/* Unmask done interrupts for channels completed later on. */	\
330
	spin_lock_irqsave(&priv->reg_lock, flags);			\
331 332
	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
333
	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
334
}
335 336 337
DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
338 339 340 341

/*
 * locate current (offending) descriptor
 */
342
static u32 current_desc_hdr(struct device *dev, int ch)
343 344
{
	struct talitos_private *priv = dev_get_drvdata(dev);
345
	int tail, iter;
346 347
	dma_addr_t cur_desc;

348 349
	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
350

351 352 353 354 355 356 357 358 359 360 361
	if (!cur_desc) {
		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
		return 0;
	}

	tail = priv->chan[ch].tail;

	iter = tail;
	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
		iter = (iter + 1) & (priv->fifo_len - 1);
		if (iter == tail) {
362
			dev_err(dev, "couldn't locate current descriptor\n");
363
			return 0;
364 365 366
		}
	}

367
	return priv->chan[ch].fifo[iter].desc->hdr;
368 369 370 371 372
}

/*
 * user diagnostics; report root cause of error based on execution unit status
 */
373
static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
374 375 376 377
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int i;

378
	if (!desc_hdr)
379
		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
380 381

	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	case DESC_HDR_SEL0_AFEU:
		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_AFEUISR),
			in_be32(priv->reg + TALITOS_AFEUISR_LO));
		break;
	case DESC_HDR_SEL0_DEU:
		dev_err(dev, "DEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_DEUISR),
			in_be32(priv->reg + TALITOS_DEUISR_LO));
		break;
	case DESC_HDR_SEL0_MDEUA:
	case DESC_HDR_SEL0_MDEUB:
		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_MDEUISR),
			in_be32(priv->reg + TALITOS_MDEUISR_LO));
		break;
	case DESC_HDR_SEL0_RNG:
		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_RNGUISR),
			in_be32(priv->reg + TALITOS_RNGUISR_LO));
		break;
	case DESC_HDR_SEL0_PKEU:
		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_PKEUISR),
			in_be32(priv->reg + TALITOS_PKEUISR_LO));
		break;
	case DESC_HDR_SEL0_AESU:
		dev_err(dev, "AESUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_AESUISR),
			in_be32(priv->reg + TALITOS_AESUISR_LO));
		break;
	case DESC_HDR_SEL0_CRCU:
		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_CRCUISR),
			in_be32(priv->reg + TALITOS_CRCUISR_LO));
		break;
	case DESC_HDR_SEL0_KEU:
		dev_err(dev, "KEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_KEUISR),
			in_be32(priv->reg + TALITOS_KEUISR_LO));
		break;
	}

425
	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	case DESC_HDR_SEL1_MDEUA:
	case DESC_HDR_SEL1_MDEUB:
		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_MDEUISR),
			in_be32(priv->reg + TALITOS_MDEUISR_LO));
		break;
	case DESC_HDR_SEL1_CRCU:
		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_CRCUISR),
			in_be32(priv->reg + TALITOS_CRCUISR_LO));
		break;
	}

	for (i = 0; i < 8; i++)
		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
441 442
			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
443 444 445 446 447
}

/*
 * recover from error interrupts
 */
448
static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
449 450 451 452
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;
	int ch, error, reset_dev = 0, reset_ch = 0;
453
	u32 v, v_lo;
454 455 456 457 458 459 460 461

	for (ch = 0; ch < priv->num_channels; ch++) {
		/* skip channels without errors */
		if (!(isr & (1 << (ch * 2 + 1))))
			continue;

		error = -EINVAL;

462 463
		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485

		if (v_lo & TALITOS_CCPSR_LO_DOF) {
			dev_err(dev, "double fetch fifo overflow error\n");
			error = -EAGAIN;
			reset_ch = 1;
		}
		if (v_lo & TALITOS_CCPSR_LO_SOF) {
			/* h/w dropped descriptor */
			dev_err(dev, "single fetch fifo overflow error\n");
			error = -EAGAIN;
		}
		if (v_lo & TALITOS_CCPSR_LO_MDTE)
			dev_err(dev, "master data transfer error\n");
		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
			dev_err(dev, "s/g data length zero error\n");
		if (v_lo & TALITOS_CCPSR_LO_FPZ)
			dev_err(dev, "fetch pointer zero error\n");
		if (v_lo & TALITOS_CCPSR_LO_IDH)
			dev_err(dev, "illegal descriptor header error\n");
		if (v_lo & TALITOS_CCPSR_LO_IEU)
			dev_err(dev, "invalid execution unit error\n");
		if (v_lo & TALITOS_CCPSR_LO_EU)
486
			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
487 488 489 490 491 492 493 494 495 496 497 498 499 500
		if (v_lo & TALITOS_CCPSR_LO_GB)
			dev_err(dev, "gather boundary error\n");
		if (v_lo & TALITOS_CCPSR_LO_GRL)
			dev_err(dev, "gather return/length error\n");
		if (v_lo & TALITOS_CCPSR_LO_SB)
			dev_err(dev, "scatter boundary error\n");
		if (v_lo & TALITOS_CCPSR_LO_SRL)
			dev_err(dev, "scatter return/length error\n");

		flush_channel(dev, ch, error, reset_ch);

		if (reset_ch) {
			reset_channel(dev, ch);
		} else {
501
			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
502
				  TALITOS_CCCR_CONT);
503 504
			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
505 506 507 508 509 510 511 512 513
			       TALITOS_CCCR_CONT) && --timeout)
				cpu_relax();
			if (timeout == 0) {
				dev_err(dev, "failed to restart channel %d\n",
					ch);
				reset_dev = 1;
			}
		}
	}
514
	if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
515 516 517 518 519 520 521 522 523 524 525 526
		dev_err(dev, "done overflow, internal time out, or rngu error: "
		        "ISR 0x%08x_%08x\n", isr, isr_lo);

		/* purge request queues */
		for (ch = 0; ch < priv->num_channels; ch++)
			flush_channel(dev, ch, -EIO, 1);

		/* reset and reinitialize the device */
		init_device(dev);
	}
}

527 528 529 530 531 532
#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
{									       \
	struct device *dev = data;					       \
	struct talitos_private *priv = dev_get_drvdata(dev);		       \
	u32 isr, isr_lo;						       \
533
	unsigned long flags;						       \
534
									       \
535
	spin_lock_irqsave(&priv->reg_lock, flags);			       \
536 537 538 539 540 541
	isr = in_be32(priv->reg + TALITOS_ISR);				       \
	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
	/* Acknowledge interrupt */					       \
	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
									       \
542 543 544 545 546
	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
	}								       \
	else {								       \
547 548 549 550 551 552
		if (likely(isr & ch_done_mask)) {			       \
			/* mask further done interrupts. */		       \
			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
			/* done_task will unmask done interrupts at exit */    \
			tasklet_schedule(&priv->done_task[tlet]);	       \
		}							       \
553 554
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
	}								       \
555 556 557
									       \
	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
								IRQ_NONE;      \
558
}
559 560 561
DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640

/*
 * hwrng
 */
static int talitos_rng_data_present(struct hwrng *rng, int wait)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);
	u32 ofl;
	int i;

	for (i = 0; i < 20; i++) {
		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
		      TALITOS_RNGUSR_LO_OFL;
		if (ofl || !wait)
			break;
		udelay(10);
	}

	return !!ofl;
}

static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);

	/* rng fifo requires 64-bit accesses */
	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);

	return sizeof(u32);
}

static int talitos_rng_init(struct hwrng *rng)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;

	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
	       && --timeout)
		cpu_relax();
	if (timeout == 0) {
		dev_err(dev, "failed to reset rng hw\n");
		return -ENODEV;
	}

	/* start generating */
	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);

	return 0;
}

static int talitos_register_rng(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);

	priv->rng.name		= dev_driver_string(dev),
	priv->rng.init		= talitos_rng_init,
	priv->rng.data_present	= talitos_rng_data_present,
	priv->rng.data_read	= talitos_rng_data_read,
	priv->rng.priv		= (unsigned long)dev;

	return hwrng_register(&priv->rng);
}

static void talitos_unregister_rng(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);

	hwrng_unregister(&priv->rng);
}

/*
 * crypto alg
 */
#define TALITOS_CRA_PRIORITY		3000
641
#define TALITOS_MAX_KEY_SIZE		96
642
#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
643

644 645
struct talitos_ctx {
	struct device *dev;
646
	int ch;
647 648
	__be32 desc_hdr_template;
	u8 key[TALITOS_MAX_KEY_SIZE];
649
	u8 iv[TALITOS_MAX_IV_LENGTH];
650 651 652 653 654 655
	unsigned int keylen;
	unsigned int enckeylen;
	unsigned int authkeylen;
	unsigned int authsize;
};

656 657 658 659
#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512

struct talitos_ahash_req_ctx {
660
	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
661 662 663
	unsigned int hw_context_size;
	u8 buf[HASH_MAX_BLOCK_SIZE];
	u8 bufnext[HASH_MAX_BLOCK_SIZE];
664
	unsigned int swinit;
665 666 667
	unsigned int first;
	unsigned int last;
	unsigned int to_hash_later;
668
	u64 nbuf;
669 670 671 672
	struct scatterlist bufsl[2];
	struct scatterlist *psrc;
};

673 674
static int aead_setauthsize(struct crypto_aead *authenc,
			    unsigned int authsize)
675 676 677 678 679 680 681 682
{
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);

	ctx->authsize = authsize;

	return 0;
}

683 684
static int aead_setkey(struct crypto_aead *authenc,
		       const u8 *key, unsigned int keylen)
685 686
{
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
687
	struct crypto_authenc_keys keys;
688

689
	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
690 691
		goto badkey;

692
	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
693 694
		goto badkey;

695 696
	memcpy(ctx->key, keys.authkey, keys.authkeylen);
	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
697

698 699 700
	ctx->keylen = keys.authkeylen + keys.enckeylen;
	ctx->enckeylen = keys.enckeylen;
	ctx->authkeylen = keys.authkeylen;
701 702 703 704 705 706 707 708 709

	return 0;

badkey:
	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
	return -EINVAL;
}

/*
710
 * talitos_edesc - s/w-extended descriptor
711
 * @assoc_nents: number of segments in associated data scatterlist
712 713
 * @src_nents: number of segments in input scatterlist
 * @dst_nents: number of segments in output scatterlist
714
 * @assoc_chained: whether assoc is chained or not
715 716
 * @src_chained: whether src is chained or not
 * @dst_chained: whether dst is chained or not
717
 * @iv_dma: dma address of iv for checking continuity and link table
718 719 720 721 722 723 724 725 726
 * @dma_len: length of dma mapped link_tbl space
 * @dma_link_tbl: bus physical address of link_tbl
 * @desc: h/w descriptor
 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
 *
 * if decrypting (with authcheck), or either one of src_nents or dst_nents
 * is greater than 1, an integrity check value is concatenated to the end
 * of link_tbl data
 */
727
struct talitos_edesc {
728
	int assoc_nents;
729 730
	int src_nents;
	int dst_nents;
731
	bool assoc_chained;
732 733
	bool src_chained;
	bool dst_chained;
734
	dma_addr_t iv_dma;
735 736 737 738 739 740
	int dma_len;
	dma_addr_t dma_link_tbl;
	struct talitos_desc desc;
	struct talitos_ptr link_tbl[0];
};

741 742
static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
			  unsigned int nents, enum dma_data_direction dir,
743
			  bool chained)
744 745 746 747
{
	if (unlikely(chained))
		while (sg) {
			dma_map_sg(dev, sg, 1, dir);
748
			sg = sg_next(sg);
749 750 751 752 753 754 755 756 757 758 759
		}
	else
		dma_map_sg(dev, sg, nents, dir);
	return nents;
}

static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
				   enum dma_data_direction dir)
{
	while (sg) {
		dma_unmap_sg(dev, sg, 1, dir);
760
		sg = sg_next(sg);
761 762 763 764 765 766 767 768 769 770 771 772
	}
}

static void talitos_sg_unmap(struct device *dev,
			     struct talitos_edesc *edesc,
			     struct scatterlist *src,
			     struct scatterlist *dst)
{
	unsigned int src_nents = edesc->src_nents ? : 1;
	unsigned int dst_nents = edesc->dst_nents ? : 1;

	if (src != dst) {
773
		if (edesc->src_chained)
774 775 776 777
			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
		else
			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);

778
		if (dst) {
779
			if (edesc->dst_chained)
780 781 782 783 784 785
				talitos_unmap_sg_chain(dev, dst,
						       DMA_FROM_DEVICE);
			else
				dma_unmap_sg(dev, dst, dst_nents,
					     DMA_FROM_DEVICE);
		}
786
	} else
787
		if (edesc->src_chained)
788 789 790 791 792
			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
		else
			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}

793
static void ipsec_esp_unmap(struct device *dev,
794
			    struct talitos_edesc *edesc,
795 796 797 798 799 800 801
			    struct aead_request *areq)
{
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);

802 803
	if (edesc->assoc_chained)
		talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
804
	else if (areq->assoclen)
805 806 807 808
		/* assoc_nents counts also for IV in non-contiguous cases */
		dma_unmap_sg(dev, areq->assoc,
			     edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
			     DMA_TO_DEVICE);
809

810
	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);
}

/*
 * ipsec_esp descriptor callbacks
 */
static void ipsec_esp_encrypt_done(struct device *dev,
				   struct talitos_desc *desc, void *context,
				   int err)
{
	struct aead_request *areq = context;
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
827
	struct talitos_edesc *edesc;
828 829 830
	struct scatterlist *sg;
	void *icvdata;

831 832
	edesc = container_of(desc, struct talitos_edesc, desc);

833 834 835
	ipsec_esp_unmap(dev, edesc, areq);

	/* copy the generated ICV to dst */
836
	if (edesc->dst_nents) {
837
		icvdata = &edesc->link_tbl[edesc->src_nents +
838 839
					   edesc->dst_nents + 2 +
					   edesc->assoc_nents];
840 841 842 843 844 845 846 847 848 849
		sg = sg_last(areq->dst, edesc->dst_nents);
		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
		       icvdata, ctx->authsize);
	}

	kfree(edesc);

	aead_request_complete(areq, err);
}

850
static void ipsec_esp_decrypt_swauth_done(struct device *dev,
851 852
					  struct talitos_desc *desc,
					  void *context, int err)
853 854 855 856
{
	struct aead_request *req = context;
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
857
	struct talitos_edesc *edesc;
858 859 860
	struct scatterlist *sg;
	void *icvdata;

861 862
	edesc = container_of(desc, struct talitos_edesc, desc);

863 864 865 866 867 868
	ipsec_esp_unmap(dev, edesc, req);

	if (!err) {
		/* auth check */
		if (edesc->dma_len)
			icvdata = &edesc->link_tbl[edesc->src_nents +
869 870
						   edesc->dst_nents + 2 +
						   edesc->assoc_nents];
871 872 873 874 875 876 877 878 879 880 881 882 883
		else
			icvdata = &edesc->link_tbl[0];

		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
	}

	kfree(edesc);

	aead_request_complete(req, err);
}

884
static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
885 886
					  struct talitos_desc *desc,
					  void *context, int err)
887 888
{
	struct aead_request *req = context;
889 890 891
	struct talitos_edesc *edesc;

	edesc = container_of(desc, struct talitos_edesc, desc);
892 893 894 895

	ipsec_esp_unmap(dev, edesc, req);

	/* check ICV auth status */
896 897 898
	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
		     DESC_HDR_LO_ICCR1_PASS))
		err = -EBADMSG;
899 900 901 902 903 904

	kfree(edesc);

	aead_request_complete(req, err);
}

905 906 907 908
/*
 * convert scatterlist to SEC h/w link table format
 * stop at cryptlen bytes
 */
909
static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
910 911
			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
{
912 913 914
	int n_sg = sg_count;

	while (n_sg--) {
915
		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
916 917 918 919
		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
		link_tbl_ptr->j_extent = 0;
		link_tbl_ptr++;
		cryptlen -= sg_dma_len(sg);
920
		sg = sg_next(sg);
921 922
	}

923
	/* adjust (decrease) last one (or two) entry's len to cryptlen */
924
	link_tbl_ptr--;
K
Kim Phillips 已提交
925
	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
926 927 928 929 930 931
		/* Empty this entry, and move to previous one */
		cryptlen += be16_to_cpu(link_tbl_ptr->len);
		link_tbl_ptr->len = 0;
		sg_count--;
		link_tbl_ptr--;
	}
932
	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
933 934 935

	/* tag end of link table */
	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
936 937

	return sg_count;
938 939 940 941 942
}

/*
 * fill in and submit ipsec_esp descriptor
 */
943
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
944 945 946
		     u64 seq, void (*callback) (struct device *dev,
						struct talitos_desc *desc,
						void *context, int error))
947 948 949 950 951 952 953
{
	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
	unsigned int cryptlen = areq->cryptlen;
	unsigned int authsize = ctx->authsize;
954
	unsigned int ivsize = crypto_aead_ivsize(aead);
955
	int sg_count, ret;
956
	int sg_link_tbl_len;
957 958 959

	/* hmac key */
	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
960
			       DMA_TO_DEVICE);
961

962
	/* hmac data */
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
	desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
	if (edesc->assoc_nents) {
		int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];

		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
			       sizeof(struct talitos_ptr));
		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;

		/* assoc_nents - 1 entries for assoc, 1 for IV */
		sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
					  areq->assoclen, tbl_ptr);

		/* add IV to link table */
		tbl_ptr += sg_count - 1;
		tbl_ptr->j_extent = 0;
		tbl_ptr++;
		to_talitos_ptr(tbl_ptr, edesc->iv_dma);
		tbl_ptr->len = cpu_to_be16(ivsize);
		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;

		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
	} else {
987 988 989 990 991
		if (areq->assoclen)
			to_talitos_ptr(&desc->ptr[1],
				       sg_dma_address(areq->assoc));
		else
			to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
992 993 994
		desc->ptr[1].j_extent = 0;
	}

995
	/* cipher iv */
996 997 998 999 1000
	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
	desc->ptr[2].len = cpu_to_be16(ivsize);
	desc->ptr[2].j_extent = 0;
	/* Sync needed for the aead_givencrypt case */
	dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1001 1002 1003

	/* cipher key */
	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1004
			       (char *)&ctx->key + ctx->authkeylen,
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
			       DMA_TO_DEVICE);

	/*
	 * cipher in
	 * map and adjust cipher len to aead request cryptlen.
	 * extent is bytes of HMAC postpended to ciphertext,
	 * typically 12 for ipsec
	 */
	desc->ptr[4].len = cpu_to_be16(cryptlen);
	desc->ptr[4].j_extent = authsize;

1016 1017 1018
	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
							   : DMA_TO_DEVICE,
1019
				  edesc->src_chained);
1020 1021

	if (sg_count == 1) {
1022
		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1023
	} else {
1024 1025
		sg_link_tbl_len = cryptlen;

1026
		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1027
			sg_link_tbl_len = cryptlen + authsize;
1028

1029
		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1030 1031 1032
					  &edesc->link_tbl[0]);
		if (sg_count > 1) {
			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1033
			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1034 1035 1036
			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
						   edesc->dma_len,
						   DMA_BIDIRECTIONAL);
1037 1038
		} else {
			/* Only one segment now, so no link tbl needed */
1039 1040
			to_talitos_ptr(&desc->ptr[4],
				       sg_dma_address(areq->src));
1041
		}
1042 1043 1044 1045 1046 1047
	}

	/* cipher out */
	desc->ptr[5].len = cpu_to_be16(cryptlen);
	desc->ptr[5].j_extent = authsize;

1048
	if (areq->src != areq->dst)
1049 1050
		sg_count = talitos_map_sg(dev, areq->dst,
					  edesc->dst_nents ? : 1,
1051
					  DMA_FROM_DEVICE, edesc->dst_chained);
1052 1053

	if (sg_count == 1) {
1054
		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1055
	} else {
1056 1057
		int tbl_off = edesc->src_nents + 1;
		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1058

1059
		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1060
			       tbl_off * sizeof(struct talitos_ptr));
1061
		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1062
					  tbl_ptr);
1063

1064
		/* Add an entry to the link table for ICV data */
1065 1066 1067 1068 1069
		tbl_ptr += sg_count - 1;
		tbl_ptr->j_extent = 0;
		tbl_ptr++;
		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
		tbl_ptr->len = cpu_to_be16(authsize);
1070 1071

		/* icv data follows link tables */
1072 1073 1074
		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
			       (tbl_off + edesc->dst_nents + 1 +
				edesc->assoc_nents) *
1075
			       sizeof(struct talitos_ptr));
1076 1077 1078 1079 1080 1081
		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
	}

	/* iv out */
1082
	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1083 1084
			       DMA_FROM_DEVICE);

1085
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1086 1087 1088 1089 1090
	if (ret != -EINPROGRESS) {
		ipsec_esp_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
1091 1092 1093 1094 1095
}

/*
 * derive number of elements in scatterlist
 */
1096
static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1097 1098 1099 1100
{
	struct scatterlist *sg = sg_list;
	int sg_nents = 0;

1101
	*chained = false;
1102
	while (nbytes > 0) {
1103 1104
		sg_nents++;
		nbytes -= sg->length;
1105
		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1106
			*chained = true;
1107
		sg = sg_next(sg);
1108 1109 1110 1111 1112 1113
	}

	return sg_nents;
}

/*
1114
 * allocate and map the extended descriptor
1115
 */
1116
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1117
						 struct scatterlist *assoc,
1118 1119
						 struct scatterlist *src,
						 struct scatterlist *dst,
1120 1121
						 u8 *iv,
						 unsigned int assoclen,
1122 1123
						 unsigned int cryptlen,
						 unsigned int authsize,
1124
						 unsigned int ivsize,
1125
						 int icv_stashing,
1126 1127
						 u32 cryptoflags,
						 bool encrypt)
1128
{
1129
	struct talitos_edesc *edesc;
1130 1131 1132
	int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
	bool assoc_chained = false, src_chained = false, dst_chained = false;
	dma_addr_t iv_dma = 0;
1133
	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1134
		      GFP_ATOMIC;
1135

1136 1137
	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
		dev_err(dev, "length exceeds h/w max limit\n");
1138 1139 1140
		return ERR_PTR(-EINVAL);
	}

1141
	if (ivsize)
1142 1143
		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);

1144
	if (assoclen) {
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
		/*
		 * Currently it is assumed that iv is provided whenever assoc
		 * is.
		 */
		BUG_ON(!iv);

		assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
		talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
			       assoc_chained);
		assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;

		if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
			assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
	}

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	if (!dst || dst == src) {
		src_nents = sg_count(src, cryptlen + authsize, &src_chained);
		src_nents = (src_nents == 1) ? 0 : src_nents;
		dst_nents = dst ? src_nents : 0;
	} else { /* dst && dst != src*/
		src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
				     &src_chained);
		src_nents = (src_nents == 1) ? 0 : src_nents;
		dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
				     &dst_chained);
		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1171 1172 1173 1174
	}

	/*
	 * allocate space for base edesc plus the link tables,
1175
	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1176 1177
	 * and the ICV data itself
	 */
1178
	alloc_len = sizeof(struct talitos_edesc);
1179 1180 1181
	if (assoc_nents || src_nents || dst_nents) {
		dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
			  sizeof(struct talitos_ptr) + authsize;
1182 1183 1184
		alloc_len += dma_len;
	} else {
		dma_len = 0;
1185
		alloc_len += icv_stashing ? authsize : 0;
1186 1187
	}

1188
	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1189
	if (!edesc) {
1190 1191 1192 1193 1194 1195 1196
		if (assoc_chained)
			talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
		else if (assoclen)
			dma_unmap_sg(dev, assoc,
				     assoc_nents ? assoc_nents - 1 : 1,
				     DMA_TO_DEVICE);

1197 1198
		if (iv_dma)
			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1199

1200
		dev_err(dev, "could not allocate edescriptor\n");
1201 1202 1203
		return ERR_PTR(-ENOMEM);
	}

1204
	edesc->assoc_nents = assoc_nents;
1205 1206
	edesc->src_nents = src_nents;
	edesc->dst_nents = dst_nents;
1207
	edesc->assoc_chained = assoc_chained;
1208 1209
	edesc->src_chained = src_chained;
	edesc->dst_chained = dst_chained;
1210
	edesc->iv_dma = iv_dma;
1211
	edesc->dma_len = dma_len;
1212 1213 1214 1215
	if (dma_len)
		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
						     edesc->dma_len,
						     DMA_BIDIRECTIONAL);
1216 1217 1218 1219

	return edesc;
}

1220
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1221
					      int icv_stashing, bool encrypt)
1222 1223 1224
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1225
	unsigned int ivsize = crypto_aead_ivsize(authenc);
1226

1227 1228 1229
	return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
				   iv, areq->assoclen, areq->cryptlen,
				   ctx->authsize, ivsize, icv_stashing,
1230
				   areq->base.flags, encrypt);
1231 1232
}

1233
static int aead_encrypt(struct aead_request *req)
1234 1235 1236
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1237
	struct talitos_edesc *edesc;
1238 1239

	/* allocate extended descriptor */
1240
	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1241 1242 1243 1244
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
1245
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1246

1247
	return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1248 1249
}

1250
static int aead_decrypt(struct aead_request *req)
1251 1252 1253 1254
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
	unsigned int authsize = ctx->authsize;
1255
	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1256
	struct talitos_edesc *edesc;
1257 1258 1259 1260 1261 1262
	struct scatterlist *sg;
	void *icvdata;

	req->cryptlen -= authsize;

	/* allocate extended descriptor */
1263
	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1264 1265 1266
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1267
	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1268 1269
	    ((!edesc->src_nents && !edesc->dst_nents) ||
	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1270

1271
		/* decrypt and check the ICV */
1272 1273
		edesc->desc.hdr = ctx->desc_hdr_template |
				  DESC_HDR_DIR_INBOUND |
1274
				  DESC_HDR_MODE1_MDEU_CICV;
1275

1276 1277
		/* reset integrity check result bits */
		edesc->desc.hdr_lo = 0;
1278

1279
		return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1280
	}
1281

1282 1283
	/* Have to check the ICV with software */
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1284

1285 1286 1287
	/* stash incoming ICV for later cmp with ICV generated by the h/w */
	if (edesc->dma_len)
		icvdata = &edesc->link_tbl[edesc->src_nents +
1288 1289
					   edesc->dst_nents + 2 +
					   edesc->assoc_nents];
1290 1291
	else
		icvdata = &edesc->link_tbl[0];
1292

1293
	sg = sg_last(req->src, edesc->src_nents ? : 1);
1294

1295 1296
	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
	       ctx->authsize);
1297

1298
	return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1299 1300
}

1301
static int aead_givencrypt(struct aead_givcrypt_request *req)
1302 1303 1304 1305
{
	struct aead_request *areq = &req->areq;
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1306
	struct talitos_edesc *edesc;
1307 1308

	/* allocate extended descriptor */
1309
	edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1310 1311 1312 1313
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
1314
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1315 1316

	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1317 1318
	/* avoid consecutive packets going out with same IV */
	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1319

1320
	return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1321 1322
}

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
			     const u8 *key, unsigned int keylen)
{
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);

	memcpy(&ctx->key, key, keylen);
	ctx->keylen = keylen;

	return 0;
}

1334 1335 1336 1337 1338 1339 1340
static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
				 struct scatterlist *dst, unsigned int len,
				 struct talitos_edesc *edesc)
{
	talitos_sg_unmap(dev, edesc, src, dst);
}

1341 1342 1343 1344 1345
static void common_nonsnoop_unmap(struct device *dev,
				  struct talitos_edesc *edesc,
				  struct ablkcipher_request *areq)
{
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1346 1347

	unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);
}

static void ablkcipher_done(struct device *dev,
			    struct talitos_desc *desc, void *context,
			    int err)
{
	struct ablkcipher_request *areq = context;
1361 1362 1363
	struct talitos_edesc *edesc;

	edesc = container_of(desc, struct talitos_edesc, desc);
1364 1365 1366 1367 1368 1369 1370 1371

	common_nonsnoop_unmap(dev, edesc, areq);

	kfree(edesc);

	areq->base.complete(&areq->base, err);
}

1372 1373 1374 1375 1376 1377 1378
int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
			  unsigned int len, struct talitos_edesc *edesc,
			  enum dma_data_direction dir, struct talitos_ptr *ptr)
{
	int sg_count;

	ptr->len = cpu_to_be16(len);
1379
	to_talitos_ptr_extent_clear(ptr);
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408

	sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
				  edesc->src_chained);

	if (sg_count == 1) {
		to_talitos_ptr(ptr, sg_dma_address(src));
	} else {
		sg_count = sg_to_link_tbl(src, sg_count, len,
					  &edesc->link_tbl[0]);
		if (sg_count > 1) {
			to_talitos_ptr(ptr, edesc->dma_link_tbl);
			ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
						   edesc->dma_len,
						   DMA_BIDIRECTIONAL);
		} else {
			/* Only one segment now, so no link tbl needed */
			to_talitos_ptr(ptr, sg_dma_address(src));
		}
	}
	return sg_count;
}

void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
			    unsigned int len, struct talitos_edesc *edesc,
			    enum dma_data_direction dir,
			    struct talitos_ptr *ptr, int sg_count)
{
	ptr->len = cpu_to_be16(len);
1409
	to_talitos_ptr_extent_clear(ptr);
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430

	if (dir != DMA_NONE)
		sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
					  dir, edesc->dst_chained);

	if (sg_count == 1) {
		to_talitos_ptr(ptr, sg_dma_address(dst));
	} else {
		struct talitos_ptr *link_tbl_ptr =
			&edesc->link_tbl[edesc->src_nents + 1];

		to_talitos_ptr(ptr, edesc->dma_link_tbl +
					      (edesc->src_nents + 1) *
					      sizeof(struct talitos_ptr));
		ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
		sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
	}
}

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
static int common_nonsnoop(struct talitos_edesc *edesc,
			   struct ablkcipher_request *areq,
			   void (*callback) (struct device *dev,
					     struct talitos_desc *desc,
					     void *context, int error))
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
	unsigned int cryptlen = areq->nbytes;
1442
	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1443 1444 1445
	int sg_count, ret;

	/* first DWORD empty */
1446
	desc->ptr[0] = zero_entry;
1447 1448

	/* cipher iv */
1449 1450
	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
	desc->ptr[1].len = cpu_to_be16(ivsize);
1451
	to_talitos_ptr_extent_clear(&desc->ptr[1]);
1452 1453 1454

	/* cipher key */
	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1455
			       (char *)&ctx->key, DMA_TO_DEVICE);
1456 1457 1458 1459

	/*
	 * cipher in
	 */
1460 1461 1462 1463
	sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
					 (areq->src == areq->dst) ?
					  DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
					  &desc->ptr[3]);
1464 1465

	/* cipher out */
1466 1467 1468 1469
	map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
			       (areq->src == areq->dst) ? DMA_NONE
							: DMA_FROM_DEVICE,
			       &desc->ptr[4], sg_count);
1470 1471

	/* iv out */
1472
	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1473 1474 1475
			       DMA_FROM_DEVICE);

	/* last DWORD empty */
1476
	desc->ptr[6] = zero_entry;
1477

1478
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1479 1480 1481 1482 1483 1484 1485
	if (ret != -EINPROGRESS) {
		common_nonsnoop_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
}

1486
static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1487
						    areq, bool encrypt)
1488 1489 1490
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1491
	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1492

1493 1494
	return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1495
				   areq->base.flags, encrypt);
1496 1497 1498 1499 1500 1501 1502 1503 1504
}

static int ablkcipher_encrypt(struct ablkcipher_request *areq)
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct talitos_edesc *edesc;

	/* allocate extended descriptor */
1505
	edesc = ablkcipher_edesc_alloc(areq, true);
1506 1507 1508 1509 1510 1511
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;

1512
	return common_nonsnoop(edesc, areq, ablkcipher_done);
1513 1514 1515 1516 1517 1518 1519 1520 1521
}

static int ablkcipher_decrypt(struct ablkcipher_request *areq)
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct talitos_edesc *edesc;

	/* allocate extended descriptor */
1522
	edesc = ablkcipher_edesc_alloc(areq, false);
1523 1524 1525 1526 1527
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;

1528
	return common_nonsnoop(edesc, areq, ablkcipher_done);
1529 1530
}

1531 1532 1533 1534 1535 1536 1537 1538
static void common_nonsnoop_hash_unmap(struct device *dev,
				       struct talitos_edesc *edesc,
				       struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);

1539 1540
	unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
	/* When using hashctx-in, must unmap it. */
	if (edesc->desc.ptr[1].len)
		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
					 DMA_TO_DEVICE);

	if (edesc->desc.ptr[2].len)
		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
					 DMA_TO_DEVICE);

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);

}

static void ahash_done(struct device *dev,
		       struct talitos_desc *desc, void *context,
		       int err)
{
	struct ahash_request *areq = context;
	struct talitos_edesc *edesc =
		 container_of(desc, struct talitos_edesc, desc);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	if (!req_ctx->last && req_ctx->to_hash_later) {
		/* Position any partial block for next update/final/finup */
		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1568
		req_ctx->nbuf = req_ctx->to_hash_later;
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
	}
	common_nonsnoop_hash_unmap(dev, edesc, areq);

	kfree(edesc);

	areq->base.complete(&areq->base, err);
}

static int common_nonsnoop_hash(struct talitos_edesc *edesc,
				struct ahash_request *areq, unsigned int length,
				void (*callback) (struct device *dev,
						  struct talitos_desc *desc,
						  void *context, int error))
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
1588
	int ret;
1589 1590 1591 1592

	/* first DWORD empty */
	desc->ptr[0] = zero_entry;

1593 1594
	/* hash context in */
	if (!req_ctx->first || req_ctx->swinit) {
1595 1596
		map_single_talitos_ptr(dev, &desc->ptr[1],
				       req_ctx->hw_context_size,
1597
				       (char *)req_ctx->hw_context,
1598
				       DMA_TO_DEVICE);
1599
		req_ctx->swinit = 0;
1600 1601 1602 1603 1604 1605 1606 1607 1608
	} else {
		desc->ptr[1] = zero_entry;
		/* Indicate next op is not the first. */
		req_ctx->first = 0;
	}

	/* HMAC key */
	if (ctx->keylen)
		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1609
				       (char *)&ctx->key, DMA_TO_DEVICE);
1610 1611 1612 1613 1614 1615
	else
		desc->ptr[2] = zero_entry;

	/*
	 * data in
	 */
1616 1617
	map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
			      DMA_TO_DEVICE, &desc->ptr[3]);
1618 1619 1620 1621 1622 1623 1624 1625

	/* fifth DWORD empty */
	desc->ptr[4] = zero_entry;

	/* hash/HMAC out -or- hash context out */
	if (req_ctx->last)
		map_single_talitos_ptr(dev, &desc->ptr[5],
				       crypto_ahash_digestsize(tfm),
1626
				       areq->result, DMA_FROM_DEVICE);
1627 1628 1629
	else
		map_single_talitos_ptr(dev, &desc->ptr[5],
				       req_ctx->hw_context_size,
1630
				       req_ctx->hw_context, DMA_FROM_DEVICE);
1631 1632 1633 1634

	/* last DWORD empty */
	desc->ptr[6] = zero_entry;

1635
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
	if (ret != -EINPROGRESS) {
		common_nonsnoop_hash_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
}

static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
					       unsigned int nbytes)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

1650
	return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1651
				   nbytes, 0, 0, 0, areq->base.flags, false);
1652 1653 1654 1655 1656 1657 1658 1659
}

static int ahash_init(struct ahash_request *areq)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	/* Initialize the context */
1660
	req_ctx->nbuf = 0;
1661 1662
	req_ctx->first = 1; /* first indicates h/w must init its context */
	req_ctx->swinit = 0; /* assume h/w init of context */
1663 1664 1665 1666 1667 1668 1669 1670
	req_ctx->hw_context_size =
		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;

	return 0;
}

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
/*
 * on h/w without explicit sha224 support, we initialize h/w context
 * manually with sha224 constants, and tell it to run sha256.
 */
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	ahash_init(areq);
	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/

1682 1683 1684 1685 1686 1687 1688 1689
	req_ctx->hw_context[0] = SHA224_H0;
	req_ctx->hw_context[1] = SHA224_H1;
	req_ctx->hw_context[2] = SHA224_H2;
	req_ctx->hw_context[3] = SHA224_H3;
	req_ctx->hw_context[4] = SHA224_H4;
	req_ctx->hw_context[5] = SHA224_H5;
	req_ctx->hw_context[6] = SHA224_H6;
	req_ctx->hw_context[7] = SHA224_H7;
1690 1691 1692 1693 1694 1695 1696 1697

	/* init 64-bit count */
	req_ctx->hw_context[8] = 0;
	req_ctx->hw_context[9] = 0;

	return 0;
}

1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
	struct talitos_edesc *edesc;
	unsigned int blocksize =
			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	unsigned int nbytes_to_hash;
	unsigned int to_hash_later;
1708
	unsigned int nsg;
1709
	bool chained;
1710

1711 1712
	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
		/* Buffer up to one whole block */
1713 1714
		sg_copy_to_buffer(areq->src,
				  sg_count(areq->src, nbytes, &chained),
1715 1716
				  req_ctx->buf + req_ctx->nbuf, nbytes);
		req_ctx->nbuf += nbytes;
1717 1718 1719
		return 0;
	}

1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
	/* At least (blocksize + 1) bytes are available to hash */
	nbytes_to_hash = nbytes + req_ctx->nbuf;
	to_hash_later = nbytes_to_hash & (blocksize - 1);

	if (req_ctx->last)
		to_hash_later = 0;
	else if (to_hash_later)
		/* There is a partial block. Hash the full block(s) now */
		nbytes_to_hash -= to_hash_later;
	else {
		/* Keep one block buffered */
		nbytes_to_hash -= blocksize;
		to_hash_later = blocksize;
	}

	/* Chain in any previously buffered data */
	if (req_ctx->nbuf) {
		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
		sg_init_table(req_ctx->bufsl, nsg);
		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
		if (nsg > 1)
			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1742
		req_ctx->psrc = req_ctx->bufsl;
1743
	} else
1744
		req_ctx->psrc = areq->src;
1745 1746 1747

	if (to_hash_later) {
		int nents = sg_count(areq->src, nbytes, &chained);
1748
		sg_pcopy_to_buffer(areq->src, nents,
1749 1750 1751
				      req_ctx->bufnext,
				      to_hash_later,
				      nbytes - to_hash_later);
1752
	}
1753
	req_ctx->to_hash_later = to_hash_later;
1754

1755
	/* Allocate extended descriptor */
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	edesc->desc.hdr = ctx->desc_hdr_template;

	/* On last one, request SEC to pad; otherwise continue */
	if (req_ctx->last)
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
	else
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;

1768 1769
	/* request SEC to INIT hash. */
	if (req_ctx->first && !req_ctx->swinit)
1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;

	/* When the tfm context has a keylen, it's an HMAC.
	 * A first or last (ie. not middle) descriptor must request HMAC.
	 */
	if (ctx->keylen && (req_ctx->first || req_ctx->last))
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;

	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
				    ahash_done);
}

static int ahash_update(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 0;

	return ahash_process_req(areq, areq->nbytes);
}

static int ahash_final(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 1;

	return ahash_process_req(areq, 0);
}

static int ahash_finup(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 1;

	return ahash_process_req(areq, areq->nbytes);
}

static int ahash_digest(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1812
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1813

1814
	ahash->init(areq);
1815 1816 1817 1818 1819
	req_ctx->last = 1;

	return ahash_process_req(areq, areq->nbytes);
}

L
Lee Nipper 已提交
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
struct keyhash_result {
	struct completion completion;
	int err;
};

static void keyhash_complete(struct crypto_async_request *req, int err)
{
	struct keyhash_result *res = req->data;

	if (err == -EINPROGRESS)
		return;

	res->err = err;
	complete(&res->completion);
}

static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
		   u8 *hash)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));

	struct scatterlist sg[1];
	struct ahash_request *req;
	struct keyhash_result hresult;
	int ret;

	init_completion(&hresult.completion);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	/* Keep tfm keylen == 0 during hash of the long key */
	ctx->keylen = 0;
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   keyhash_complete, &hresult);

	sg_init_one(&sg[0], key, keylen);

	ahash_request_set_crypt(req, sg, hash, keylen);
	ret = crypto_ahash_digest(req);
	switch (ret) {
	case 0:
		break;
	case -EINPROGRESS:
	case -EBUSY:
		ret = wait_for_completion_interruptible(
			&hresult.completion);
		if (!ret)
			ret = hresult.err;
		break;
	default:
		break;
	}
	ahash_request_free(req);

	return ret;
}

static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
			unsigned int keylen)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
	unsigned int blocksize =
			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	unsigned int digestsize = crypto_ahash_digestsize(tfm);
	unsigned int keysize = keylen;
	u8 hash[SHA512_DIGEST_SIZE];
	int ret;

	if (keylen <= blocksize)
		memcpy(ctx->key, key, keysize);
	else {
		/* Must get the hash of the long key */
		ret = keyhash(tfm, key, keylen, hash);

		if (ret) {
			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
			return -EINVAL;
		}

		keysize = digestsize;
		memcpy(ctx->key, hash, digestsize);
	}

	ctx->keylen = keysize;

	return 0;
}


1911
struct talitos_alg_template {
1912 1913 1914
	u32 type;
	union {
		struct crypto_alg crypto;
1915
		struct ahash_alg hash;
1916
	} alg;
1917 1918 1919 1920
	__be32 desc_hdr_template;
};

static struct talitos_alg_template driver_algs[] = {
1921
	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1922 1923
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
1924 1925 1926 1927 1928 1929 1930 1931 1932
			.cra_name = "authenc(hmac(sha1),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA1_DIGEST_SIZE,
			}
		},
1933 1934 1935 1936 1937 1938 1939
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1940
	},
1941 1942
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
1943 1944 1945 1946 1947 1948 1949 1950 1951
			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA1_DIGEST_SIZE,
			}
		},
1952 1953 1954 1955 1956 1957 1958 1959
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1960
	},
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
	{       .type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha224),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA224_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
				     DESC_HDR_SEL0_AESU |
				     DESC_HDR_MODE0_AESU_CBC |
				     DESC_HDR_SEL1_MDEUA |
				     DESC_HDR_MODE1_MDEU_INIT |
				     DESC_HDR_MODE1_MDEU_PAD |
				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA224_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
	},
2000 2001
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
2002 2003 2004 2005 2006 2007 2008 2009 2010
			.cra_name = "authenc(hmac(sha256),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA256_DIGEST_SIZE,
			}
		},
2011 2012 2013 2014 2015 2016 2017 2018
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
	},
2019 2020
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
2021 2022 2023 2024 2025 2026 2027 2028 2029
			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA256_DIGEST_SIZE,
			}
		},
2030 2031 2032 2033 2034 2035 2036 2037 2038
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
	},
2039
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha384),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA384_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA384_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha512),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA512_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA512_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2118
		.alg.crypto = {
2119 2120 2121 2122 2123 2124 2125 2126 2127
			.cra_name = "authenc(hmac(md5),cbc(aes))",
			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = MD5_DIGEST_SIZE,
			}
		},
2128 2129 2130 2131 2132 2133 2134 2135
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
	},
2136 2137
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
2138 2139 2140 2141 2142 2143 2144 2145 2146
			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = MD5_DIGEST_SIZE,
			}
		},
2147 2148 2149 2150 2151 2152 2153 2154
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2155 2156
	},
	/* ABLKCIPHER algorithms. */
2157 2158
	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.alg.crypto = {
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
			.cra_name = "cbc(aes)",
			.cra_driver_name = "cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
			.cra_ablkcipher = {
				.min_keysize = AES_MIN_KEY_SIZE,
				.max_keysize = AES_MAX_KEY_SIZE,
				.ivsize = AES_BLOCK_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_AESU |
				     DESC_HDR_MODE0_AESU_CBC,
	},
2174 2175
	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.alg.crypto = {
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
			.cra_name = "cbc(des3_ede)",
			.cra_driver_name = "cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
			.cra_ablkcipher = {
				.min_keysize = DES3_EDE_KEY_SIZE,
				.max_keysize = DES3_EDE_KEY_SIZE,
				.ivsize = DES3_EDE_BLOCK_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES,
2191 2192 2193 2194 2195 2196 2197 2198
	},
	/* AHASH algorithms. */
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = MD5_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "md5",
				.cra_driver_name = "md5-talitos",
2199
				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_MD5,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA1_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha1",
				.cra_driver_name = "sha1-talitos",
				.cra_blocksize = SHA1_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA1,
	},
2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA224_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha224",
				.cra_driver_name = "sha224-talitos",
				.cra_blocksize = SHA224_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA224,
	},
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA256_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha256",
				.cra_driver_name = "sha256-talitos",
				.cra_blocksize = SHA256_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA256,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA384_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha384",
				.cra_driver_name = "sha384-talitos",
				.cra_blocksize = SHA384_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA384,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA512_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha512",
				.cra_driver_name = "sha512-talitos",
				.cra_blocksize = SHA512_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA512,
	},
L
Lee Nipper 已提交
2283 2284 2285 2286 2287 2288
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = MD5_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(md5)",
				.cra_driver_name = "hmac-md5-talitos",
2289
				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
L
Lee Nipper 已提交
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_MD5,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA1_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha1)",
				.cra_driver_name = "hmac-sha1-talitos",
				.cra_blocksize = SHA1_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA1,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA224_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha224)",
				.cra_driver_name = "hmac-sha224-talitos",
				.cra_blocksize = SHA224_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA224,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA256_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha256)",
				.cra_driver_name = "hmac-sha256-talitos",
				.cra_blocksize = SHA256_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA256,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA384_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha384)",
				.cra_driver_name = "hmac-sha384-talitos",
				.cra_blocksize = SHA384_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA384,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA512_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha512)",
				.cra_driver_name = "hmac-sha512-talitos",
				.cra_blocksize = SHA512_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA512,
	}
2373 2374 2375 2376 2377
};

struct talitos_crypto_alg {
	struct list_head entry;
	struct device *dev;
2378
	struct talitos_alg_template algt;
2379 2380 2381 2382 2383
};

static int talitos_cra_init(struct crypto_tfm *tfm)
{
	struct crypto_alg *alg = tfm->__crt_alg;
2384
	struct talitos_crypto_alg *talitos_alg;
2385
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2386
	struct talitos_private *priv;
2387

2388 2389 2390 2391 2392 2393 2394
	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
		talitos_alg = container_of(__crypto_ahash_alg(alg),
					   struct talitos_crypto_alg,
					   algt.alg.hash);
	else
		talitos_alg = container_of(alg, struct talitos_crypto_alg,
					   algt.alg.crypto);
2395

2396 2397
	/* update context with ptr to dev */
	ctx->dev = talitos_alg->dev;
2398

2399 2400 2401 2402 2403
	/* assign SEC channel to tfm in round-robin fashion */
	priv = dev_get_drvdata(ctx->dev);
	ctx->ch = atomic_inc_return(&priv->last_chan) &
		  (priv->num_channels - 1);

2404
	/* copy descriptor header template value */
2405
	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2406

2407 2408 2409
	/* select done notification */
	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;

2410 2411 2412 2413 2414 2415 2416 2417
	return 0;
}

static int talitos_cra_init_aead(struct crypto_tfm *tfm)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);

	talitos_cra_init(tfm);
2418 2419

	/* random first IV */
2420
	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2421 2422 2423 2424

	return 0;
}

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);

	talitos_cra_init(tfm);

	ctx->keylen = 0;
	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(struct talitos_ahash_req_ctx));

	return 0;
}

2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
/*
 * given the alg's descriptor header template, determine whether descriptor
 * type and primary/secondary execution units required match the hw
 * capabilities description provided in the device tree node.
 */
static int hw_supports(struct device *dev, __be32 desc_hdr_template)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int ret;

	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);

	if (SECONDARY_EU(desc_hdr_template))
		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
		              & priv->exec_units);

	return ret;
}

2458
static int talitos_remove(struct platform_device *ofdev)
2459 2460 2461 2462 2463 2464 2465
{
	struct device *dev = &ofdev->dev;
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_crypto_alg *t_alg, *n;
	int i;

	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2466 2467 2468 2469 2470 2471 2472 2473 2474
		switch (t_alg->algt.type) {
		case CRYPTO_ALG_TYPE_ABLKCIPHER:
		case CRYPTO_ALG_TYPE_AEAD:
			crypto_unregister_alg(&t_alg->algt.alg.crypto);
			break;
		case CRYPTO_ALG_TYPE_AHASH:
			crypto_unregister_ahash(&t_alg->algt.alg.hash);
			break;
		}
2475 2476 2477 2478 2479 2480 2481
		list_del(&t_alg->entry);
		kfree(t_alg);
	}

	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
		talitos_unregister_rng(dev);

2482
	for (i = 0; i < priv->num_channels; i++)
2483
		kfree(priv->chan[i].fifo);
2484

2485
	kfree(priv->chan);
2486

2487
	for (i = 0; i < 2; i++)
2488
		if (priv->irq[i]) {
2489 2490 2491
			free_irq(priv->irq[i], dev);
			irq_dispose_mapping(priv->irq[i]);
		}
2492

2493
	tasklet_kill(&priv->done_task[0]);
2494
	if (priv->irq[1])
2495
		tasklet_kill(&priv->done_task[1]);
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507

	iounmap(priv->reg);

	kfree(priv);

	return 0;
}

static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
						    struct talitos_alg_template
						           *template)
{
2508
	struct talitos_private *priv = dev_get_drvdata(dev);
2509 2510 2511 2512 2513 2514 2515
	struct talitos_crypto_alg *t_alg;
	struct crypto_alg *alg;

	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
	if (!t_alg)
		return ERR_PTR(-ENOMEM);

2516 2517 2518 2519
	t_alg->algt = *template;

	switch (t_alg->algt.type) {
	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2520 2521
		alg = &t_alg->algt.alg.crypto;
		alg->cra_init = talitos_cra_init;
2522
		alg->cra_type = &crypto_ablkcipher_type;
2523 2524 2525 2526
		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
		alg->cra_ablkcipher.geniv = "eseqiv";
2527
		break;
2528 2529
	case CRYPTO_ALG_TYPE_AEAD:
		alg = &t_alg->algt.alg.crypto;
2530
		alg->cra_init = talitos_cra_init_aead;
2531
		alg->cra_type = &crypto_aead_type;
2532 2533 2534 2535 2536 2537
		alg->cra_aead.setkey = aead_setkey;
		alg->cra_aead.setauthsize = aead_setauthsize;
		alg->cra_aead.encrypt = aead_encrypt;
		alg->cra_aead.decrypt = aead_decrypt;
		alg->cra_aead.givencrypt = aead_givencrypt;
		alg->cra_aead.geniv = "<built-in>";
2538 2539 2540
		break;
	case CRYPTO_ALG_TYPE_AHASH:
		alg = &t_alg->algt.alg.hash.halg.base;
2541
		alg->cra_init = talitos_cra_init_ahash;
2542
		alg->cra_type = &crypto_ahash_type;
2543 2544 2545 2546 2547 2548 2549
		t_alg->algt.alg.hash.init = ahash_init;
		t_alg->algt.alg.hash.update = ahash_update;
		t_alg->algt.alg.hash.final = ahash_final;
		t_alg->algt.alg.hash.finup = ahash_finup;
		t_alg->algt.alg.hash.digest = ahash_digest;
		t_alg->algt.alg.hash.setkey = ahash_setkey;

L
Lee Nipper 已提交
2550
		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
K
Kim Phillips 已提交
2551 2552
		    !strncmp(alg->cra_name, "hmac", 4)) {
			kfree(t_alg);
L
Lee Nipper 已提交
2553
			return ERR_PTR(-ENOTSUPP);
K
Kim Phillips 已提交
2554
		}
2555
		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
L
Lee Nipper 已提交
2556 2557
		    (!strcmp(alg->cra_name, "sha224") ||
		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2558 2559 2560 2561 2562 2563
			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
			t_alg->algt.desc_hdr_template =
					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
					DESC_HDR_SEL0_MDEUA |
					DESC_HDR_MODE0_MDEU_SHA256;
		}
2564
		break;
2565 2566 2567
	default:
		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
		return ERR_PTR(-EINVAL);
2568
	}
2569 2570 2571 2572 2573

	alg->cra_module = THIS_MODULE;
	alg->cra_priority = TALITOS_CRA_PRIORITY;
	alg->cra_alignmask = 0;
	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2574
	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2575 2576 2577 2578 2579 2580

	t_alg->dev = dev;

	return t_alg;
}

2581 2582 2583 2584 2585 2586 2587 2588
static int talitos_probe_irq(struct platform_device *ofdev)
{
	struct device *dev = &ofdev->dev;
	struct device_node *np = ofdev->dev.of_node;
	struct talitos_private *priv = dev_get_drvdata(dev);
	int err;

	priv->irq[0] = irq_of_parse_and_map(np, 0);
2589
	if (!priv->irq[0]) {
2590 2591 2592 2593 2594 2595 2596
		dev_err(dev, "failed to map irq\n");
		return -EINVAL;
	}

	priv->irq[1] = irq_of_parse_and_map(np, 1);

	/* get the primary irq line */
2597
	if (!priv->irq[1]) {
2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
				  dev_driver_string(dev), dev);
		goto primary_out;
	}

	err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
			  dev_driver_string(dev), dev);
	if (err)
		goto primary_out;

	/* get the secondary irq line */
	err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
			  dev_driver_string(dev), dev);
	if (err) {
		dev_err(dev, "failed to request secondary irq\n");
		irq_dispose_mapping(priv->irq[1]);
2614
		priv->irq[1] = 0;
2615 2616 2617 2618 2619 2620 2621 2622
	}

	return err;

primary_out:
	if (err) {
		dev_err(dev, "failed to request primary irq\n");
		irq_dispose_mapping(priv->irq[0]);
2623
		priv->irq[0] = 0;
2624 2625 2626 2627 2628
	}

	return err;
}

2629
static int talitos_probe(struct platform_device *ofdev)
2630 2631
{
	struct device *dev = &ofdev->dev;
2632
	struct device_node *np = ofdev->dev.of_node;
2633 2634 2635 2636 2637 2638 2639 2640
	struct talitos_private *priv;
	const unsigned int *prop;
	int i, err;

	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

2641 2642
	INIT_LIST_HEAD(&priv->alg_list);

2643 2644 2645 2646
	dev_set_drvdata(dev, priv);

	priv->ofdev = ofdev;

2647 2648
	spin_lock_init(&priv->reg_lock);

2649 2650
	err = talitos_probe_irq(ofdev);
	if (err)
2651 2652
		goto err_out;

2653
	if (!priv->irq[1]) {
2654 2655 2656 2657 2658 2659 2660
		tasklet_init(&priv->done_task[0], talitos_done_4ch,
			     (unsigned long)dev);
	} else {
		tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
			     (unsigned long)dev);
		tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
			     (unsigned long)dev);
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693
	}

	priv->reg = of_iomap(np, 0);
	if (!priv->reg) {
		dev_err(dev, "failed to of_iomap\n");
		err = -ENOMEM;
		goto err_out;
	}

	/* get SEC version capabilities from device tree */
	prop = of_get_property(np, "fsl,num-channels", NULL);
	if (prop)
		priv->num_channels = *prop;

	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
	if (prop)
		priv->chfifo_len = *prop;

	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
	if (prop)
		priv->exec_units = *prop;

	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
	if (prop)
		priv->desc_types = *prop;

	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
	    !priv->exec_units || !priv->desc_types) {
		dev_err(dev, "invalid property data in device tree node\n");
		err = -EINVAL;
		goto err_out;
	}

2694 2695 2696
	if (of_device_is_compatible(np, "fsl,sec3.0"))
		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;

2697
	if (of_device_is_compatible(np, "fsl,sec2.1"))
2698
		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
L
Lee Nipper 已提交
2699 2700
				  TALITOS_FTR_SHA224_HWINIT |
				  TALITOS_FTR_HMAC_OK;
2701

2702 2703 2704 2705
	priv->chan = kzalloc(sizeof(struct talitos_channel) *
			     priv->num_channels, GFP_KERNEL);
	if (!priv->chan) {
		dev_err(dev, "failed to allocate channel management space\n");
2706 2707 2708 2709
		err = -ENOMEM;
		goto err_out;
	}

2710 2711
	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);

2712 2713
	for (i = 0; i < priv->num_channels; i++) {
		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2714
		if (!priv->irq[1] || !(i & 1))
2715
			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2716

2717 2718
		spin_lock_init(&priv->chan[i].head_lock);
		spin_lock_init(&priv->chan[i].tail_lock);
2719

2720 2721 2722
		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
					     priv->fifo_len, GFP_KERNEL);
		if (!priv->chan[i].fifo) {
2723 2724 2725 2726 2727
			dev_err(dev, "failed to allocate request fifo %d\n", i);
			err = -ENOMEM;
			goto err_out;
		}

2728 2729
		atomic_set(&priv->chan[i].submit_count,
			   -(priv->chfifo_len - 1));
2730
	}
2731

2732 2733
	dma_set_mask(dev, DMA_BIT_MASK(36));

2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754
	/* reset and initialize the h/w */
	err = init_device(dev);
	if (err) {
		dev_err(dev, "failed to initialize device\n");
		goto err_out;
	}

	/* register the RNG, if available */
	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
		err = talitos_register_rng(dev);
		if (err) {
			dev_err(dev, "failed to register hwrng: %d\n", err);
			goto err_out;
		} else
			dev_info(dev, "hwrng\n");
	}

	/* register crypto algorithms the device supports */
	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
			struct talitos_crypto_alg *t_alg;
2755
			char *name = NULL;
2756 2757 2758 2759

			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
			if (IS_ERR(t_alg)) {
				err = PTR_ERR(t_alg);
K
Kim Phillips 已提交
2760
				if (err == -ENOTSUPP)
L
Lee Nipper 已提交
2761
					continue;
2762 2763 2764
				goto err_out;
			}

2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778
			switch (t_alg->algt.type) {
			case CRYPTO_ALG_TYPE_ABLKCIPHER:
			case CRYPTO_ALG_TYPE_AEAD:
				err = crypto_register_alg(
						&t_alg->algt.alg.crypto);
				name = t_alg->algt.alg.crypto.cra_driver_name;
				break;
			case CRYPTO_ALG_TYPE_AHASH:
				err = crypto_register_ahash(
						&t_alg->algt.alg.hash);
				name =
				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
				break;
			}
2779 2780
			if (err) {
				dev_err(dev, "%s alg registration failed\n",
2781
					name);
2782
				kfree(t_alg);
2783
			} else
2784 2785 2786
				list_add_tail(&t_alg->entry, &priv->alg_list);
		}
	}
2787 2788 2789
	if (!list_empty(&priv->alg_list))
		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
			 (char *)of_get_property(np, "compatible", NULL));
2790 2791 2792 2793 2794 2795 2796 2797 2798

	return 0;

err_out:
	talitos_remove(ofdev);

	return err;
}

2799
static const struct of_device_id talitos_match[] = {
2800 2801 2802 2803 2804 2805 2806
	{
		.compatible = "fsl,sec2.0",
	},
	{},
};
MODULE_DEVICE_TABLE(of, talitos_match);

2807
static struct platform_driver talitos_driver = {
2808 2809 2810 2811
	.driver = {
		.name = "talitos",
		.of_match_table = talitos_match,
	},
2812
	.probe = talitos_probe,
A
Al Viro 已提交
2813
	.remove = talitos_remove,
2814 2815
};

2816
module_platform_driver(talitos_driver);
2817 2818 2819 2820

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");