talitos.c 80.1 KB
Newer Older
1 2 3
/*
 * talitos - Freescale Integrated Security Engine (SEC) device driver
 *
4
 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * Scatterlist Crypto API glue code copied from files with the following:
 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * Crypto algorithm registration code copied from hifn driver:
 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
35 36
#include <linux/of_address.h>
#include <linux/of_irq.h>
37 38 39 40 41
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
42
#include <linux/slab.h>
43 44 45

#include <crypto/algapi.h>
#include <crypto/aes.h>
46
#include <crypto/des.h>
47
#include <crypto/sha.h>
48
#include <crypto/md5.h>
49 50
#include <crypto/aead.h>
#include <crypto/authenc.h>
51
#include <crypto/skcipher.h>
52 53
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
54
#include <crypto/scatterwalk.h>
55 56 57

#include "talitos.h"

58 59
static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
			   bool is_sec1)
60
{
61
	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 63
	if (!is_sec1)
		ptr->eptr = upper_32_bits(dma_addr);
64 65
}

66 67
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len,
			       bool is_sec1)
68
{
69 70 71 72 73 74
	if (is_sec1) {
		ptr->res = 0;
		ptr->len1 = cpu_to_be16(len);
	} else {
		ptr->len = cpu_to_be16(len);
	}
75 76
}

77 78
static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
					   bool is_sec1)
79
{
80 81 82 83
	if (is_sec1)
		return be16_to_cpu(ptr->len1);
	else
		return be16_to_cpu(ptr->len);
84 85
}

86
static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
87
{
88 89
	if (!is_sec1)
		ptr->j_extent = 0;
90 91
}

92 93 94 95
/*
 * map virtual single (contiguous) pointer to h/w descriptor pointer
 */
static void map_single_talitos_ptr(struct device *dev,
96
				   struct talitos_ptr *ptr,
97 98 99
				   unsigned short len, void *data,
				   enum dma_data_direction dir)
{
100
	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
101 102
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
103

104 105 106
	to_talitos_ptr_len(ptr, len, is_sec1);
	to_talitos_ptr(ptr, dma_addr, is_sec1);
	to_talitos_ptr_extent_clear(ptr, is_sec1);
107 108 109 110 111 112
}

/*
 * unmap bus single (contiguous) h/w descriptor pointer
 */
static void unmap_single_talitos_ptr(struct device *dev,
113
				     struct talitos_ptr *ptr,
114 115
				     enum dma_data_direction dir)
{
116 117 118
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);

119
	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
120
			 from_talitos_ptr_len(ptr, is_sec1), dir);
121 122 123 124 125 126 127
}

static int reset_channel(struct device *dev, int ch)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;

128
	setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
129

130
	while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
131 132 133 134 135 136 137 138
	       && --timeout)
		cpu_relax();

	if (timeout == 0) {
		dev_err(dev, "failed to reset channel %d\n", ch);
		return -EIO;
	}

139
	/* set 36-bit addressing, done writeback enable and done IRQ enable */
140
	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
141
		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
142

143 144
	/* and ICCR writeback, if available */
	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
145
		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 147
		          TALITOS_CCCR_LO_IWSE);

148 149 150 151 152 153 154
	return 0;
}

static int reset_device(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;
155
	u32 mcr = TALITOS_MCR_SWR;
156

157
	setbits32(priv->reg + TALITOS_MCR, mcr);
158 159 160 161 162

	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
	       && --timeout)
		cpu_relax();

163
	if (priv->irq[1]) {
164 165 166 167
		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
		setbits32(priv->reg + TALITOS_MCR, mcr);
	}

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
	if (timeout == 0) {
		dev_err(dev, "failed to reset device\n");
		return -EIO;
	}

	return 0;
}

/*
 * Reset and initialize the device
 */
static int init_device(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int ch, err;

	/*
	 * Master reset
	 * errata documentation: warning: certain SEC interrupts
	 * are not fully cleared by writing the MCR:SWR bit,
	 * set bit twice to completely reset
	 */
	err = reset_device(dev);
	if (err)
		return err;

	err = reset_device(dev);
	if (err)
		return err;

	/* reset channels */
	for (ch = 0; ch < priv->num_channels; ch++) {
		err = reset_channel(dev, ch);
		if (err)
			return err;
	}

	/* enable channel done and error interrupts */
	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);

209 210 211 212 213
	/* disable integrity check error interrupts (use writeback instead) */
	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
		setbits32(priv->reg + TALITOS_MDEUICR_LO,
		          TALITOS_MDEUICR_LO_ICE);

214 215 216 217 218 219
	return 0;
}

/**
 * talitos_submit - submits a descriptor to the device for processing
 * @dev:	the SEC device to be used
220
 * @ch:		the SEC device channel to be used
221 222 223 224 225 226 227 228
 * @desc:	the descriptor to be processed by the device
 * @callback:	whom to call when processing is complete
 * @context:	a handle for use by caller (optional)
 *
 * desc must contain valid dma-mapped (bus physical) address pointers.
 * callback must check err and feedback in descriptor header
 * for device processing status.
 */
229 230 231 232 233
int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
		   void (*callback)(struct device *dev,
				    struct talitos_desc *desc,
				    void *context, int error),
		   void *context)
234 235 236
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_request *request;
237
	unsigned long flags;
238 239
	int head;

240
	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
241

242
	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
243
		/* h/w fifo is full */
244
		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
245 246 247
		return -EAGAIN;
	}

248 249
	head = priv->chan[ch].head;
	request = &priv->chan[ch].fifo[head];
250

251 252 253 254 255 256 257
	/* map descriptor and save caller data */
	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
					   DMA_BIDIRECTIONAL);
	request->callback = callback;
	request->context = context;

	/* increment fifo head */
258
	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
259 260 261 262 263 264

	smp_wmb();
	request->desc = desc;

	/* GO! */
	wmb();
265 266 267
	out_be32(priv->chan[ch].reg + TALITOS_FF,
		 upper_32_bits(request->dma_desc));
	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
268
		 lower_32_bits(request->dma_desc));
269

270
	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
271 272 273

	return -EINPROGRESS;
}
274
EXPORT_SYMBOL(talitos_submit);
275 276 277 278 279 280 281 282 283 284 285

/*
 * process what was done, notify callback of error if not
 */
static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_request *request, saved_req;
	unsigned long flags;
	int tail, status;

286
	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
287

288 289 290
	tail = priv->chan[ch].tail;
	while (priv->chan[ch].fifo[tail].desc) {
		request = &priv->chan[ch].fifo[tail];
291 292 293

		/* descriptors with their done bits set don't get the error */
		rmb();
294
		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
295
			status = 0;
296
		else
297 298 299 300 301 302
			if (!error)
				break;
			else
				status = error;

		dma_unmap_single(dev, request->dma_desc,
303 304
				 sizeof(struct talitos_desc),
				 DMA_BIDIRECTIONAL);
305 306 307 308 309 310 311 312 313 314 315

		/* copy entries so we can call callback outside lock */
		saved_req.desc = request->desc;
		saved_req.callback = request->callback;
		saved_req.context = request->context;

		/* release request entry in fifo */
		smp_wmb();
		request->desc = NULL;

		/* increment fifo tail */
316
		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
317

318
		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
319

320
		atomic_dec(&priv->chan[ch].submit_count);
321

322 323 324 325 326
		saved_req.callback(dev, saved_req.desc, saved_req.context,
				   status);
		/* channel may resume processing in single desc error case */
		if (error && !reset_ch && status == error)
			return;
327 328
		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
		tail = priv->chan[ch].tail;
329 330
	}

331
	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
332 333 334 335 336
}

/*
 * process completed requests for channels that have done status
 */
337 338 339 340 341
#define DEF_TALITOS_DONE(name, ch_done_mask)				\
static void talitos_done_##name(unsigned long data)			\
{									\
	struct device *dev = (struct device *)data;			\
	struct talitos_private *priv = dev_get_drvdata(dev);		\
342
	unsigned long flags;						\
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
									\
	if (ch_done_mask & 1)						\
		flush_channel(dev, 0, 0, 0);				\
	if (priv->num_channels == 1)					\
		goto out;						\
	if (ch_done_mask & (1 << 2))					\
		flush_channel(dev, 1, 0, 0);				\
	if (ch_done_mask & (1 << 4))					\
		flush_channel(dev, 2, 0, 0);				\
	if (ch_done_mask & (1 << 6))					\
		flush_channel(dev, 3, 0, 0);				\
									\
out:									\
	/* At this point, all completed channels have been processed */	\
	/* Unmask done interrupts for channels completed later on. */	\
358
	spin_lock_irqsave(&priv->reg_lock, flags);			\
359 360
	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
361
	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
362
}
363 364 365
DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
366 367 368 369

/*
 * locate current (offending) descriptor
 */
370
static u32 current_desc_hdr(struct device *dev, int ch)
371 372
{
	struct talitos_private *priv = dev_get_drvdata(dev);
373
	int tail, iter;
374 375
	dma_addr_t cur_desc;

376 377
	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
378

379 380 381 382 383 384 385 386 387 388 389
	if (!cur_desc) {
		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
		return 0;
	}

	tail = priv->chan[ch].tail;

	iter = tail;
	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
		iter = (iter + 1) & (priv->fifo_len - 1);
		if (iter == tail) {
390
			dev_err(dev, "couldn't locate current descriptor\n");
391
			return 0;
392 393 394
		}
	}

395
	return priv->chan[ch].fifo[iter].desc->hdr;
396 397 398 399 400
}

/*
 * user diagnostics; report root cause of error based on execution unit status
 */
401
static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
402 403 404 405
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int i;

406
	if (!desc_hdr)
407
		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
408 409

	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
	case DESC_HDR_SEL0_AFEU:
		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_AFEUISR),
			in_be32(priv->reg + TALITOS_AFEUISR_LO));
		break;
	case DESC_HDR_SEL0_DEU:
		dev_err(dev, "DEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_DEUISR),
			in_be32(priv->reg + TALITOS_DEUISR_LO));
		break;
	case DESC_HDR_SEL0_MDEUA:
	case DESC_HDR_SEL0_MDEUB:
		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_MDEUISR),
			in_be32(priv->reg + TALITOS_MDEUISR_LO));
		break;
	case DESC_HDR_SEL0_RNG:
		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_RNGUISR),
			in_be32(priv->reg + TALITOS_RNGUISR_LO));
		break;
	case DESC_HDR_SEL0_PKEU:
		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_PKEUISR),
			in_be32(priv->reg + TALITOS_PKEUISR_LO));
		break;
	case DESC_HDR_SEL0_AESU:
		dev_err(dev, "AESUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_AESUISR),
			in_be32(priv->reg + TALITOS_AESUISR_LO));
		break;
	case DESC_HDR_SEL0_CRCU:
		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_CRCUISR),
			in_be32(priv->reg + TALITOS_CRCUISR_LO));
		break;
	case DESC_HDR_SEL0_KEU:
		dev_err(dev, "KEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_KEUISR),
			in_be32(priv->reg + TALITOS_KEUISR_LO));
		break;
	}

453
	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	case DESC_HDR_SEL1_MDEUA:
	case DESC_HDR_SEL1_MDEUB:
		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_MDEUISR),
			in_be32(priv->reg + TALITOS_MDEUISR_LO));
		break;
	case DESC_HDR_SEL1_CRCU:
		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
			in_be32(priv->reg + TALITOS_CRCUISR),
			in_be32(priv->reg + TALITOS_CRCUISR_LO));
		break;
	}

	for (i = 0; i < 8; i++)
		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
469 470
			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
471 472 473 474 475
}

/*
 * recover from error interrupts
 */
476
static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
477 478 479 480
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;
	int ch, error, reset_dev = 0, reset_ch = 0;
481
	u32 v, v_lo;
482 483 484 485 486 487 488 489

	for (ch = 0; ch < priv->num_channels; ch++) {
		/* skip channels without errors */
		if (!(isr & (1 << (ch * 2 + 1))))
			continue;

		error = -EINVAL;

490 491
		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513

		if (v_lo & TALITOS_CCPSR_LO_DOF) {
			dev_err(dev, "double fetch fifo overflow error\n");
			error = -EAGAIN;
			reset_ch = 1;
		}
		if (v_lo & TALITOS_CCPSR_LO_SOF) {
			/* h/w dropped descriptor */
			dev_err(dev, "single fetch fifo overflow error\n");
			error = -EAGAIN;
		}
		if (v_lo & TALITOS_CCPSR_LO_MDTE)
			dev_err(dev, "master data transfer error\n");
		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
			dev_err(dev, "s/g data length zero error\n");
		if (v_lo & TALITOS_CCPSR_LO_FPZ)
			dev_err(dev, "fetch pointer zero error\n");
		if (v_lo & TALITOS_CCPSR_LO_IDH)
			dev_err(dev, "illegal descriptor header error\n");
		if (v_lo & TALITOS_CCPSR_LO_IEU)
			dev_err(dev, "invalid execution unit error\n");
		if (v_lo & TALITOS_CCPSR_LO_EU)
514
			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
515 516 517 518 519 520 521 522 523 524 525 526 527 528
		if (v_lo & TALITOS_CCPSR_LO_GB)
			dev_err(dev, "gather boundary error\n");
		if (v_lo & TALITOS_CCPSR_LO_GRL)
			dev_err(dev, "gather return/length error\n");
		if (v_lo & TALITOS_CCPSR_LO_SB)
			dev_err(dev, "scatter boundary error\n");
		if (v_lo & TALITOS_CCPSR_LO_SRL)
			dev_err(dev, "scatter return/length error\n");

		flush_channel(dev, ch, error, reset_ch);

		if (reset_ch) {
			reset_channel(dev, ch);
		} else {
529
			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
530
				  TALITOS_CCCR_CONT);
531 532
			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
533 534 535 536 537 538 539 540 541
			       TALITOS_CCCR_CONT) && --timeout)
				cpu_relax();
			if (timeout == 0) {
				dev_err(dev, "failed to restart channel %d\n",
					ch);
				reset_dev = 1;
			}
		}
	}
542
	if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
543 544 545 546 547 548 549 550 551 552 553 554
		dev_err(dev, "done overflow, internal time out, or rngu error: "
		        "ISR 0x%08x_%08x\n", isr, isr_lo);

		/* purge request queues */
		for (ch = 0; ch < priv->num_channels; ch++)
			flush_channel(dev, ch, -EIO, 1);

		/* reset and reinitialize the device */
		init_device(dev);
	}
}

555 556 557 558 559 560
#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
{									       \
	struct device *dev = data;					       \
	struct talitos_private *priv = dev_get_drvdata(dev);		       \
	u32 isr, isr_lo;						       \
561
	unsigned long flags;						       \
562
									       \
563
	spin_lock_irqsave(&priv->reg_lock, flags);			       \
564 565 566 567 568 569
	isr = in_be32(priv->reg + TALITOS_ISR);				       \
	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
	/* Acknowledge interrupt */					       \
	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
									       \
570 571 572 573 574
	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
	}								       \
	else {								       \
575 576 577 578 579 580
		if (likely(isr & ch_done_mask)) {			       \
			/* mask further done interrupts. */		       \
			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
			/* done_task will unmask done interrupts at exit */    \
			tasklet_schedule(&priv->done_task[tlet]);	       \
		}							       \
581 582
		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
	}								       \
583 584 585
									       \
	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
								IRQ_NONE;      \
586
}
587 588 589
DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668

/*
 * hwrng
 */
static int talitos_rng_data_present(struct hwrng *rng, int wait)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);
	u32 ofl;
	int i;

	for (i = 0; i < 20; i++) {
		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
		      TALITOS_RNGUSR_LO_OFL;
		if (ofl || !wait)
			break;
		udelay(10);
	}

	return !!ofl;
}

static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);

	/* rng fifo requires 64-bit accesses */
	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);

	return sizeof(u32);
}

static int talitos_rng_init(struct hwrng *rng)
{
	struct device *dev = (struct device *)rng->priv;
	struct talitos_private *priv = dev_get_drvdata(dev);
	unsigned int timeout = TALITOS_TIMEOUT;

	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
	       && --timeout)
		cpu_relax();
	if (timeout == 0) {
		dev_err(dev, "failed to reset rng hw\n");
		return -ENODEV;
	}

	/* start generating */
	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);

	return 0;
}

static int talitos_register_rng(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);

	priv->rng.name		= dev_driver_string(dev),
	priv->rng.init		= talitos_rng_init,
	priv->rng.data_present	= talitos_rng_data_present,
	priv->rng.data_read	= talitos_rng_data_read,
	priv->rng.priv		= (unsigned long)dev;

	return hwrng_register(&priv->rng);
}

static void talitos_unregister_rng(struct device *dev)
{
	struct talitos_private *priv = dev_get_drvdata(dev);

	hwrng_unregister(&priv->rng);
}

/*
 * crypto alg
 */
#define TALITOS_CRA_PRIORITY		3000
669
#define TALITOS_MAX_KEY_SIZE		96
670
#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
671

672 673
struct talitos_ctx {
	struct device *dev;
674
	int ch;
675 676
	__be32 desc_hdr_template;
	u8 key[TALITOS_MAX_KEY_SIZE];
677
	u8 iv[TALITOS_MAX_IV_LENGTH];
678 679 680 681 682 683
	unsigned int keylen;
	unsigned int enckeylen;
	unsigned int authkeylen;
	unsigned int authsize;
};

684 685 686 687
#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512

struct talitos_ahash_req_ctx {
688
	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
689 690 691
	unsigned int hw_context_size;
	u8 buf[HASH_MAX_BLOCK_SIZE];
	u8 bufnext[HASH_MAX_BLOCK_SIZE];
692
	unsigned int swinit;
693 694 695
	unsigned int first;
	unsigned int last;
	unsigned int to_hash_later;
696
	u64 nbuf;
697 698 699 700
	struct scatterlist bufsl[2];
	struct scatterlist *psrc;
};

701 702
static int aead_setauthsize(struct crypto_aead *authenc,
			    unsigned int authsize)
703 704 705 706 707 708 709 710
{
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);

	ctx->authsize = authsize;

	return 0;
}

711 712
static int aead_setkey(struct crypto_aead *authenc,
		       const u8 *key, unsigned int keylen)
713 714
{
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
715
	struct crypto_authenc_keys keys;
716

717
	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
718 719
		goto badkey;

720
	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
721 722
		goto badkey;

723 724
	memcpy(ctx->key, keys.authkey, keys.authkeylen);
	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
725

726 727 728
	ctx->keylen = keys.authkeylen + keys.enckeylen;
	ctx->enckeylen = keys.enckeylen;
	ctx->authkeylen = keys.authkeylen;
729 730 731 732 733 734 735 736 737

	return 0;

badkey:
	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
	return -EINVAL;
}

/*
738
 * talitos_edesc - s/w-extended descriptor
739
 * @assoc_nents: number of segments in associated data scatterlist
740 741
 * @src_nents: number of segments in input scatterlist
 * @dst_nents: number of segments in output scatterlist
742
 * @assoc_chained: whether assoc is chained or not
743 744
 * @src_chained: whether src is chained or not
 * @dst_chained: whether dst is chained or not
745
 * @iv_dma: dma address of iv for checking continuity and link table
746 747 748 749 750 751 752 753 754
 * @dma_len: length of dma mapped link_tbl space
 * @dma_link_tbl: bus physical address of link_tbl
 * @desc: h/w descriptor
 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
 *
 * if decrypting (with authcheck), or either one of src_nents or dst_nents
 * is greater than 1, an integrity check value is concatenated to the end
 * of link_tbl data
 */
755
struct talitos_edesc {
756
	int assoc_nents;
757 758
	int src_nents;
	int dst_nents;
759
	bool assoc_chained;
760 761
	bool src_chained;
	bool dst_chained;
762
	dma_addr_t iv_dma;
763 764 765 766 767 768
	int dma_len;
	dma_addr_t dma_link_tbl;
	struct talitos_desc desc;
	struct talitos_ptr link_tbl[0];
};

769 770
static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
			  unsigned int nents, enum dma_data_direction dir,
771
			  bool chained)
772 773 774 775
{
	if (unlikely(chained))
		while (sg) {
			dma_map_sg(dev, sg, 1, dir);
776
			sg = sg_next(sg);
777 778 779 780 781 782 783 784 785 786 787
		}
	else
		dma_map_sg(dev, sg, nents, dir);
	return nents;
}

static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
				   enum dma_data_direction dir)
{
	while (sg) {
		dma_unmap_sg(dev, sg, 1, dir);
788
		sg = sg_next(sg);
789 790 791 792 793 794 795 796 797 798 799 800
	}
}

static void talitos_sg_unmap(struct device *dev,
			     struct talitos_edesc *edesc,
			     struct scatterlist *src,
			     struct scatterlist *dst)
{
	unsigned int src_nents = edesc->src_nents ? : 1;
	unsigned int dst_nents = edesc->dst_nents ? : 1;

	if (src != dst) {
801
		if (edesc->src_chained)
802 803 804 805
			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
		else
			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);

806
		if (dst) {
807
			if (edesc->dst_chained)
808 809 810 811 812 813
				talitos_unmap_sg_chain(dev, dst,
						       DMA_FROM_DEVICE);
			else
				dma_unmap_sg(dev, dst, dst_nents,
					     DMA_FROM_DEVICE);
		}
814
	} else
815
		if (edesc->src_chained)
816 817 818 819 820
			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
		else
			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}

821
static void ipsec_esp_unmap(struct device *dev,
822
			    struct talitos_edesc *edesc,
823 824 825 826 827 828 829
			    struct aead_request *areq)
{
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);

830 831
	if (edesc->assoc_chained)
		talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
832
	else if (areq->assoclen)
833 834 835 836
		/* assoc_nents counts also for IV in non-contiguous cases */
		dma_unmap_sg(dev, areq->assoc,
			     edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
			     DMA_TO_DEVICE);
837

838
	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);
}

/*
 * ipsec_esp descriptor callbacks
 */
static void ipsec_esp_encrypt_done(struct device *dev,
				   struct talitos_desc *desc, void *context,
				   int err)
{
	struct aead_request *areq = context;
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
855
	struct talitos_edesc *edesc;
856 857 858
	struct scatterlist *sg;
	void *icvdata;

859 860
	edesc = container_of(desc, struct talitos_edesc, desc);

861 862 863
	ipsec_esp_unmap(dev, edesc, areq);

	/* copy the generated ICV to dst */
864
	if (edesc->dst_nents) {
865
		icvdata = &edesc->link_tbl[edesc->src_nents +
866 867
					   edesc->dst_nents + 2 +
					   edesc->assoc_nents];
868 869 870 871 872 873 874 875 876 877
		sg = sg_last(areq->dst, edesc->dst_nents);
		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
		       icvdata, ctx->authsize);
	}

	kfree(edesc);

	aead_request_complete(areq, err);
}

878
static void ipsec_esp_decrypt_swauth_done(struct device *dev,
879 880
					  struct talitos_desc *desc,
					  void *context, int err)
881 882 883 884
{
	struct aead_request *req = context;
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
885
	struct talitos_edesc *edesc;
886 887 888
	struct scatterlist *sg;
	void *icvdata;

889 890
	edesc = container_of(desc, struct talitos_edesc, desc);

891 892 893 894 895 896
	ipsec_esp_unmap(dev, edesc, req);

	if (!err) {
		/* auth check */
		if (edesc->dma_len)
			icvdata = &edesc->link_tbl[edesc->src_nents +
897 898
						   edesc->dst_nents + 2 +
						   edesc->assoc_nents];
899 900 901 902 903 904 905 906 907 908 909 910 911
		else
			icvdata = &edesc->link_tbl[0];

		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
	}

	kfree(edesc);

	aead_request_complete(req, err);
}

912
static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
913 914
					  struct talitos_desc *desc,
					  void *context, int err)
915 916
{
	struct aead_request *req = context;
917 918 919
	struct talitos_edesc *edesc;

	edesc = container_of(desc, struct talitos_edesc, desc);
920 921 922 923

	ipsec_esp_unmap(dev, edesc, req);

	/* check ICV auth status */
924 925 926
	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
		     DESC_HDR_LO_ICCR1_PASS))
		err = -EBADMSG;
927 928 929 930 931 932

	kfree(edesc);

	aead_request_complete(req, err);
}

933 934 935 936
/*
 * convert scatterlist to SEC h/w link table format
 * stop at cryptlen bytes
 */
937
static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
938 939
			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
{
940 941 942
	int n_sg = sg_count;

	while (n_sg--) {
943
		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
944 945 946 947
		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
		link_tbl_ptr->j_extent = 0;
		link_tbl_ptr++;
		cryptlen -= sg_dma_len(sg);
948
		sg = sg_next(sg);
949 950
	}

951
	/* adjust (decrease) last one (or two) entry's len to cryptlen */
952
	link_tbl_ptr--;
K
Kim Phillips 已提交
953
	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
954 955 956 957 958 959
		/* Empty this entry, and move to previous one */
		cryptlen += be16_to_cpu(link_tbl_ptr->len);
		link_tbl_ptr->len = 0;
		sg_count--;
		link_tbl_ptr--;
	}
960
	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
961 962 963

	/* tag end of link table */
	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
964 965

	return sg_count;
966 967 968 969 970
}

/*
 * fill in and submit ipsec_esp descriptor
 */
971
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
972 973 974
		     u64 seq, void (*callback) (struct device *dev,
						struct talitos_desc *desc,
						void *context, int error))
975 976 977 978 979 980 981
{
	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
	unsigned int cryptlen = areq->cryptlen;
	unsigned int authsize = ctx->authsize;
982
	unsigned int ivsize = crypto_aead_ivsize(aead);
983
	int sg_count, ret;
984
	int sg_link_tbl_len;
985 986 987

	/* hmac key */
	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
988
			       DMA_TO_DEVICE);
989

990
	/* hmac data */
991 992 993 994 995 996
	desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
	if (edesc->assoc_nents) {
		int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];

		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
997
			       sizeof(struct talitos_ptr), 0);
998 999 1000 1001 1002 1003 1004 1005 1006 1007
		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;

		/* assoc_nents - 1 entries for assoc, 1 for IV */
		sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
					  areq->assoclen, tbl_ptr);

		/* add IV to link table */
		tbl_ptr += sg_count - 1;
		tbl_ptr->j_extent = 0;
		tbl_ptr++;
1008
		to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
1009 1010 1011 1012 1013 1014
		tbl_ptr->len = cpu_to_be16(ivsize);
		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;

		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
	} else {
1015 1016
		if (areq->assoclen)
			to_talitos_ptr(&desc->ptr[1],
1017
				       sg_dma_address(areq->assoc), 0);
1018
		else
1019
			to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
1020 1021 1022
		desc->ptr[1].j_extent = 0;
	}

1023
	/* cipher iv */
1024
	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1025 1026 1027 1028
	desc->ptr[2].len = cpu_to_be16(ivsize);
	desc->ptr[2].j_extent = 0;
	/* Sync needed for the aead_givencrypt case */
	dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1029 1030 1031

	/* cipher key */
	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1032
			       (char *)&ctx->key + ctx->authkeylen,
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
			       DMA_TO_DEVICE);

	/*
	 * cipher in
	 * map and adjust cipher len to aead request cryptlen.
	 * extent is bytes of HMAC postpended to ciphertext,
	 * typically 12 for ipsec
	 */
	desc->ptr[4].len = cpu_to_be16(cryptlen);
	desc->ptr[4].j_extent = authsize;

1044 1045 1046
	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
							   : DMA_TO_DEVICE,
1047
				  edesc->src_chained);
1048 1049

	if (sg_count == 1) {
1050
		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1051
	} else {
1052 1053
		sg_link_tbl_len = cryptlen;

1054
		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1055
			sg_link_tbl_len = cryptlen + authsize;
1056

1057
		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1058 1059 1060
					  &edesc->link_tbl[0]);
		if (sg_count > 1) {
			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1061
			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
1062 1063 1064
			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
						   edesc->dma_len,
						   DMA_BIDIRECTIONAL);
1065 1066
		} else {
			/* Only one segment now, so no link tbl needed */
1067
			to_talitos_ptr(&desc->ptr[4],
1068
				       sg_dma_address(areq->src), 0);
1069
		}
1070 1071 1072 1073 1074 1075
	}

	/* cipher out */
	desc->ptr[5].len = cpu_to_be16(cryptlen);
	desc->ptr[5].j_extent = authsize;

1076
	if (areq->src != areq->dst)
1077 1078
		sg_count = talitos_map_sg(dev, areq->dst,
					  edesc->dst_nents ? : 1,
1079
					  DMA_FROM_DEVICE, edesc->dst_chained);
1080 1081

	if (sg_count == 1) {
1082
		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1083
	} else {
1084 1085
		int tbl_off = edesc->src_nents + 1;
		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1086

1087
		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1088
			       tbl_off * sizeof(struct talitos_ptr), 0);
1089
		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1090
					  tbl_ptr);
1091

1092
		/* Add an entry to the link table for ICV data */
1093 1094 1095 1096 1097
		tbl_ptr += sg_count - 1;
		tbl_ptr->j_extent = 0;
		tbl_ptr++;
		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
		tbl_ptr->len = cpu_to_be16(authsize);
1098 1099

		/* icv data follows link tables */
1100 1101 1102
		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
			       (tbl_off + edesc->dst_nents + 1 +
				edesc->assoc_nents) *
1103
			       sizeof(struct talitos_ptr), 0);
1104 1105 1106 1107 1108 1109
		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
	}

	/* iv out */
1110
	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1111 1112
			       DMA_FROM_DEVICE);

1113
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1114 1115 1116 1117 1118
	if (ret != -EINPROGRESS) {
		ipsec_esp_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
1119 1120 1121 1122 1123
}

/*
 * derive number of elements in scatterlist
 */
1124
static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1125 1126 1127 1128
{
	struct scatterlist *sg = sg_list;
	int sg_nents = 0;

1129
	*chained = false;
1130
	while (nbytes > 0) {
1131 1132
		sg_nents++;
		nbytes -= sg->length;
1133
		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1134
			*chained = true;
1135
		sg = sg_next(sg);
1136 1137 1138 1139 1140 1141
	}

	return sg_nents;
}

/*
1142
 * allocate and map the extended descriptor
1143
 */
1144
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1145
						 struct scatterlist *assoc,
1146 1147
						 struct scatterlist *src,
						 struct scatterlist *dst,
1148 1149
						 u8 *iv,
						 unsigned int assoclen,
1150 1151
						 unsigned int cryptlen,
						 unsigned int authsize,
1152
						 unsigned int ivsize,
1153
						 int icv_stashing,
1154 1155
						 u32 cryptoflags,
						 bool encrypt)
1156
{
1157
	struct talitos_edesc *edesc;
1158 1159 1160
	int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
	bool assoc_chained = false, src_chained = false, dst_chained = false;
	dma_addr_t iv_dma = 0;
1161
	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1162
		      GFP_ATOMIC;
1163

1164 1165
	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
		dev_err(dev, "length exceeds h/w max limit\n");
1166 1167 1168
		return ERR_PTR(-EINVAL);
	}

1169
	if (ivsize)
1170 1171
		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);

1172
	if (assoclen) {
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
		/*
		 * Currently it is assumed that iv is provided whenever assoc
		 * is.
		 */
		BUG_ON(!iv);

		assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
		talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
			       assoc_chained);
		assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;

		if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
			assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
	}

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	if (!dst || dst == src) {
		src_nents = sg_count(src, cryptlen + authsize, &src_chained);
		src_nents = (src_nents == 1) ? 0 : src_nents;
		dst_nents = dst ? src_nents : 0;
	} else { /* dst && dst != src*/
		src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
				     &src_chained);
		src_nents = (src_nents == 1) ? 0 : src_nents;
		dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
				     &dst_chained);
		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1199 1200 1201 1202
	}

	/*
	 * allocate space for base edesc plus the link tables,
1203
	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1204 1205
	 * and the ICV data itself
	 */
1206
	alloc_len = sizeof(struct talitos_edesc);
1207 1208 1209
	if (assoc_nents || src_nents || dst_nents) {
		dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
			  sizeof(struct talitos_ptr) + authsize;
1210 1211 1212
		alloc_len += dma_len;
	} else {
		dma_len = 0;
1213
		alloc_len += icv_stashing ? authsize : 0;
1214 1215
	}

1216
	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1217
	if (!edesc) {
1218 1219 1220 1221 1222 1223 1224
		if (assoc_chained)
			talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
		else if (assoclen)
			dma_unmap_sg(dev, assoc,
				     assoc_nents ? assoc_nents - 1 : 1,
				     DMA_TO_DEVICE);

1225 1226
		if (iv_dma)
			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1227

1228
		dev_err(dev, "could not allocate edescriptor\n");
1229 1230 1231
		return ERR_PTR(-ENOMEM);
	}

1232
	edesc->assoc_nents = assoc_nents;
1233 1234
	edesc->src_nents = src_nents;
	edesc->dst_nents = dst_nents;
1235
	edesc->assoc_chained = assoc_chained;
1236 1237
	edesc->src_chained = src_chained;
	edesc->dst_chained = dst_chained;
1238
	edesc->iv_dma = iv_dma;
1239
	edesc->dma_len = dma_len;
1240 1241 1242 1243
	if (dma_len)
		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
						     edesc->dma_len,
						     DMA_BIDIRECTIONAL);
1244 1245 1246 1247

	return edesc;
}

1248
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1249
					      int icv_stashing, bool encrypt)
1250 1251 1252
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1253
	unsigned int ivsize = crypto_aead_ivsize(authenc);
1254

1255 1256 1257
	return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
				   iv, areq->assoclen, areq->cryptlen,
				   ctx->authsize, ivsize, icv_stashing,
1258
				   areq->base.flags, encrypt);
1259 1260
}

1261
static int aead_encrypt(struct aead_request *req)
1262 1263 1264
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1265
	struct talitos_edesc *edesc;
1266 1267

	/* allocate extended descriptor */
1268
	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1269 1270 1271 1272
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
1273
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1274

1275
	return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1276 1277
}

1278
static int aead_decrypt(struct aead_request *req)
1279 1280 1281 1282
{
	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
	unsigned int authsize = ctx->authsize;
1283
	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1284
	struct talitos_edesc *edesc;
1285 1286 1287 1288 1289 1290
	struct scatterlist *sg;
	void *icvdata;

	req->cryptlen -= authsize;

	/* allocate extended descriptor */
1291
	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1292 1293 1294
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1295
	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1296 1297
	    ((!edesc->src_nents && !edesc->dst_nents) ||
	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1298

1299
		/* decrypt and check the ICV */
1300 1301
		edesc->desc.hdr = ctx->desc_hdr_template |
				  DESC_HDR_DIR_INBOUND |
1302
				  DESC_HDR_MODE1_MDEU_CICV;
1303

1304 1305
		/* reset integrity check result bits */
		edesc->desc.hdr_lo = 0;
1306

1307
		return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1308
	}
1309

1310 1311
	/* Have to check the ICV with software */
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1312

1313 1314 1315
	/* stash incoming ICV for later cmp with ICV generated by the h/w */
	if (edesc->dma_len)
		icvdata = &edesc->link_tbl[edesc->src_nents +
1316 1317
					   edesc->dst_nents + 2 +
					   edesc->assoc_nents];
1318 1319
	else
		icvdata = &edesc->link_tbl[0];
1320

1321
	sg = sg_last(req->src, edesc->src_nents ? : 1);
1322

1323 1324
	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
	       ctx->authsize);
1325

1326
	return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1327 1328
}

1329
static int aead_givencrypt(struct aead_givcrypt_request *req)
1330 1331 1332 1333
{
	struct aead_request *areq = &req->areq;
	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1334
	struct talitos_edesc *edesc;
1335 1336

	/* allocate extended descriptor */
1337
	edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1338 1339 1340 1341
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
1342
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1343 1344

	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1345 1346
	/* avoid consecutive packets going out with same IV */
	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1347

1348
	return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1349 1350
}

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
			     const u8 *key, unsigned int keylen)
{
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);

	memcpy(&ctx->key, key, keylen);
	ctx->keylen = keylen;

	return 0;
}

1362 1363 1364 1365 1366 1367 1368
static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
				 struct scatterlist *dst, unsigned int len,
				 struct talitos_edesc *edesc)
{
	talitos_sg_unmap(dev, edesc, src, dst);
}

1369 1370 1371 1372 1373
static void common_nonsnoop_unmap(struct device *dev,
				  struct talitos_edesc *edesc,
				  struct ablkcipher_request *areq)
{
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1374 1375

	unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);
}

static void ablkcipher_done(struct device *dev,
			    struct talitos_desc *desc, void *context,
			    int err)
{
	struct ablkcipher_request *areq = context;
1389 1390 1391
	struct talitos_edesc *edesc;

	edesc = container_of(desc, struct talitos_edesc, desc);
1392 1393 1394 1395 1396 1397 1398 1399

	common_nonsnoop_unmap(dev, edesc, areq);

	kfree(edesc);

	areq->base.complete(&areq->base, err);
}

1400 1401 1402 1403 1404
int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
			  unsigned int len, struct talitos_edesc *edesc,
			  enum dma_data_direction dir, struct talitos_ptr *ptr)
{
	int sg_count;
1405 1406
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1407

1408 1409
	to_talitos_ptr_len(ptr, len, is_sec1);
	to_talitos_ptr_extent_clear(ptr, is_sec1);
1410 1411 1412 1413 1414

	sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
				  edesc->src_chained);

	if (sg_count == 1) {
1415
		to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1416 1417 1418 1419
	} else {
		sg_count = sg_to_link_tbl(src, sg_count, len,
					  &edesc->link_tbl[0]);
		if (sg_count > 1) {
1420
			to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1421 1422 1423 1424 1425 1426
			ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
						   edesc->dma_len,
						   DMA_BIDIRECTIONAL);
		} else {
			/* Only one segment now, so no link tbl needed */
1427
			to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
		}
	}
	return sg_count;
}

void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
			    unsigned int len, struct talitos_edesc *edesc,
			    enum dma_data_direction dir,
			    struct talitos_ptr *ptr, int sg_count)
{
1438 1439 1440 1441 1442
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);

	to_talitos_ptr_len(ptr, len, is_sec1);
	to_talitos_ptr_extent_clear(ptr, is_sec1);
1443 1444 1445 1446 1447 1448

	if (dir != DMA_NONE)
		sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
					  dir, edesc->dst_chained);

	if (sg_count == 1) {
1449
		to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1450 1451 1452 1453 1454 1455
	} else {
		struct talitos_ptr *link_tbl_ptr =
			&edesc->link_tbl[edesc->src_nents + 1];

		to_talitos_ptr(ptr, edesc->dma_link_tbl +
					      (edesc->src_nents + 1) *
1456
					      sizeof(struct talitos_ptr), 0);
1457 1458 1459 1460 1461 1462 1463
		ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
		sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
					   edesc->dma_len, DMA_BIDIRECTIONAL);
	}
}

1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
static int common_nonsnoop(struct talitos_edesc *edesc,
			   struct ablkcipher_request *areq,
			   void (*callback) (struct device *dev,
					     struct talitos_desc *desc,
					     void *context, int error))
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
	unsigned int cryptlen = areq->nbytes;
1475
	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1476
	int sg_count, ret;
1477 1478
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1479 1480

	/* first DWORD empty */
1481
	desc->ptr[0] = zero_entry;
1482 1483

	/* cipher iv */
1484 1485 1486
	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
	to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
	to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1487 1488 1489

	/* cipher key */
	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1490
			       (char *)&ctx->key, DMA_TO_DEVICE);
1491 1492 1493 1494

	/*
	 * cipher in
	 */
1495 1496 1497 1498
	sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
					 (areq->src == areq->dst) ?
					  DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
					  &desc->ptr[3]);
1499 1500

	/* cipher out */
1501 1502 1503 1504
	map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
			       (areq->src == areq->dst) ? DMA_NONE
							: DMA_FROM_DEVICE,
			       &desc->ptr[4], sg_count);
1505 1506

	/* iv out */
1507
	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1508 1509 1510
			       DMA_FROM_DEVICE);

	/* last DWORD empty */
1511
	desc->ptr[6] = zero_entry;
1512

1513
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1514 1515 1516 1517 1518 1519 1520
	if (ret != -EINPROGRESS) {
		common_nonsnoop_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
}

1521
static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1522
						    areq, bool encrypt)
1523 1524 1525
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1526
	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1527

1528 1529
	return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1530
				   areq->base.flags, encrypt);
1531 1532 1533 1534 1535 1536 1537 1538 1539
}

static int ablkcipher_encrypt(struct ablkcipher_request *areq)
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct talitos_edesc *edesc;

	/* allocate extended descriptor */
1540
	edesc = ablkcipher_edesc_alloc(areq, true);
1541 1542 1543 1544 1545 1546
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* set encrypt */
	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;

1547
	return common_nonsnoop(edesc, areq, ablkcipher_done);
1548 1549 1550 1551 1552 1553 1554 1555 1556
}

static int ablkcipher_decrypt(struct ablkcipher_request *areq)
{
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	struct talitos_edesc *edesc;

	/* allocate extended descriptor */
1557
	edesc = ablkcipher_edesc_alloc(areq, false);
1558 1559 1560 1561 1562
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;

1563
	return common_nonsnoop(edesc, areq, ablkcipher_done);
1564 1565
}

1566 1567 1568 1569 1570
static void common_nonsnoop_hash_unmap(struct device *dev,
				       struct talitos_edesc *edesc,
				       struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1571 1572
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1573 1574 1575

	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);

1576 1577
	unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);

1578
	/* When using hashctx-in, must unmap it. */
1579
	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1580 1581 1582
		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
					 DMA_TO_DEVICE);

1583
	if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
					 DMA_TO_DEVICE);

	if (edesc->dma_len)
		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
				 DMA_BIDIRECTIONAL);

}

static void ahash_done(struct device *dev,
		       struct talitos_desc *desc, void *context,
		       int err)
{
	struct ahash_request *areq = context;
	struct talitos_edesc *edesc =
		 container_of(desc, struct talitos_edesc, desc);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	if (!req_ctx->last && req_ctx->to_hash_later) {
		/* Position any partial block for next update/final/finup */
		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1605
		req_ctx->nbuf = req_ctx->to_hash_later;
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
	}
	common_nonsnoop_hash_unmap(dev, edesc, areq);

	kfree(edesc);

	areq->base.complete(&areq->base, err);
}

static int common_nonsnoop_hash(struct talitos_edesc *edesc,
				struct ahash_request *areq, unsigned int length,
				void (*callback) (struct device *dev,
						  struct talitos_desc *desc,
						  void *context, int error))
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
	struct device *dev = ctx->dev;
	struct talitos_desc *desc = &edesc->desc;
1625
	int ret;
1626 1627
	struct talitos_private *priv = dev_get_drvdata(dev);
	bool is_sec1 = has_ftr_sec1(priv);
1628 1629 1630 1631

	/* first DWORD empty */
	desc->ptr[0] = zero_entry;

1632 1633
	/* hash context in */
	if (!req_ctx->first || req_ctx->swinit) {
1634 1635
		map_single_talitos_ptr(dev, &desc->ptr[1],
				       req_ctx->hw_context_size,
1636
				       (char *)req_ctx->hw_context,
1637
				       DMA_TO_DEVICE);
1638
		req_ctx->swinit = 0;
1639 1640 1641 1642 1643 1644 1645 1646 1647
	} else {
		desc->ptr[1] = zero_entry;
		/* Indicate next op is not the first. */
		req_ctx->first = 0;
	}

	/* HMAC key */
	if (ctx->keylen)
		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1648
				       (char *)&ctx->key, DMA_TO_DEVICE);
1649 1650 1651 1652 1653 1654
	else
		desc->ptr[2] = zero_entry;

	/*
	 * data in
	 */
1655 1656
	map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
			      DMA_TO_DEVICE, &desc->ptr[3]);
1657 1658 1659 1660 1661 1662 1663 1664

	/* fifth DWORD empty */
	desc->ptr[4] = zero_entry;

	/* hash/HMAC out -or- hash context out */
	if (req_ctx->last)
		map_single_talitos_ptr(dev, &desc->ptr[5],
				       crypto_ahash_digestsize(tfm),
1665
				       areq->result, DMA_FROM_DEVICE);
1666 1667 1668
	else
		map_single_talitos_ptr(dev, &desc->ptr[5],
				       req_ctx->hw_context_size,
1669
				       req_ctx->hw_context, DMA_FROM_DEVICE);
1670 1671 1672 1673

	/* last DWORD empty */
	desc->ptr[6] = zero_entry;

1674
	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
	if (ret != -EINPROGRESS) {
		common_nonsnoop_hash_unmap(dev, edesc, areq);
		kfree(edesc);
	}
	return ret;
}

static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
					       unsigned int nbytes)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

1689
	return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1690
				   nbytes, 0, 0, 0, areq->base.flags, false);
1691 1692 1693 1694 1695 1696 1697 1698
}

static int ahash_init(struct ahash_request *areq)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	/* Initialize the context */
1699
	req_ctx->nbuf = 0;
1700 1701
	req_ctx->first = 1; /* first indicates h/w must init its context */
	req_ctx->swinit = 0; /* assume h/w init of context */
1702 1703 1704 1705 1706 1707 1708 1709
	req_ctx->hw_context_size =
		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;

	return 0;
}

1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
/*
 * on h/w without explicit sha224 support, we initialize h/w context
 * manually with sha224 constants, and tell it to run sha256.
 */
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	ahash_init(areq);
	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/

1721 1722 1723 1724 1725 1726 1727 1728
	req_ctx->hw_context[0] = SHA224_H0;
	req_ctx->hw_context[1] = SHA224_H1;
	req_ctx->hw_context[2] = SHA224_H2;
	req_ctx->hw_context[3] = SHA224_H3;
	req_ctx->hw_context[4] = SHA224_H4;
	req_ctx->hw_context[5] = SHA224_H5;
	req_ctx->hw_context[6] = SHA224_H6;
	req_ctx->hw_context[7] = SHA224_H7;
1729 1730 1731 1732 1733 1734 1735 1736

	/* init 64-bit count */
	req_ctx->hw_context[8] = 0;
	req_ctx->hw_context[9] = 0;

	return 0;
}

1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
	struct talitos_edesc *edesc;
	unsigned int blocksize =
			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	unsigned int nbytes_to_hash;
	unsigned int to_hash_later;
1747
	unsigned int nsg;
1748
	bool chained;
1749

1750 1751
	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
		/* Buffer up to one whole block */
1752 1753
		sg_copy_to_buffer(areq->src,
				  sg_count(areq->src, nbytes, &chained),
1754 1755
				  req_ctx->buf + req_ctx->nbuf, nbytes);
		req_ctx->nbuf += nbytes;
1756 1757 1758
		return 0;
	}

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
	/* At least (blocksize + 1) bytes are available to hash */
	nbytes_to_hash = nbytes + req_ctx->nbuf;
	to_hash_later = nbytes_to_hash & (blocksize - 1);

	if (req_ctx->last)
		to_hash_later = 0;
	else if (to_hash_later)
		/* There is a partial block. Hash the full block(s) now */
		nbytes_to_hash -= to_hash_later;
	else {
		/* Keep one block buffered */
		nbytes_to_hash -= blocksize;
		to_hash_later = blocksize;
	}

	/* Chain in any previously buffered data */
	if (req_ctx->nbuf) {
		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
		sg_init_table(req_ctx->bufsl, nsg);
		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
		if (nsg > 1)
			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1781
		req_ctx->psrc = req_ctx->bufsl;
1782
	} else
1783
		req_ctx->psrc = areq->src;
1784 1785 1786

	if (to_hash_later) {
		int nents = sg_count(areq->src, nbytes, &chained);
1787
		sg_pcopy_to_buffer(areq->src, nents,
1788 1789 1790
				      req_ctx->bufnext,
				      to_hash_later,
				      nbytes - to_hash_later);
1791
	}
1792
	req_ctx->to_hash_later = to_hash_later;
1793

1794
	/* Allocate extended descriptor */
1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	edesc->desc.hdr = ctx->desc_hdr_template;

	/* On last one, request SEC to pad; otherwise continue */
	if (req_ctx->last)
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
	else
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;

1807 1808
	/* request SEC to INIT hash. */
	if (req_ctx->first && !req_ctx->swinit)
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;

	/* When the tfm context has a keylen, it's an HMAC.
	 * A first or last (ie. not middle) descriptor must request HMAC.
	 */
	if (ctx->keylen && (req_ctx->first || req_ctx->last))
		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;

	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
				    ahash_done);
}

static int ahash_update(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 0;

	return ahash_process_req(areq, areq->nbytes);
}

static int ahash_final(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 1;

	return ahash_process_req(areq, 0);
}

static int ahash_finup(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);

	req_ctx->last = 1;

	return ahash_process_req(areq, areq->nbytes);
}

static int ahash_digest(struct ahash_request *areq)
{
	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1851
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1852

1853
	ahash->init(areq);
1854 1855 1856 1857 1858
	req_ctx->last = 1;

	return ahash_process_req(areq, areq->nbytes);
}

L
Lee Nipper 已提交
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
struct keyhash_result {
	struct completion completion;
	int err;
};

static void keyhash_complete(struct crypto_async_request *req, int err)
{
	struct keyhash_result *res = req->data;

	if (err == -EINPROGRESS)
		return;

	res->err = err;
	complete(&res->completion);
}

static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
		   u8 *hash)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));

	struct scatterlist sg[1];
	struct ahash_request *req;
	struct keyhash_result hresult;
	int ret;

	init_completion(&hresult.completion);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	/* Keep tfm keylen == 0 during hash of the long key */
	ctx->keylen = 0;
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   keyhash_complete, &hresult);

	sg_init_one(&sg[0], key, keylen);

	ahash_request_set_crypt(req, sg, hash, keylen);
	ret = crypto_ahash_digest(req);
	switch (ret) {
	case 0:
		break;
	case -EINPROGRESS:
	case -EBUSY:
		ret = wait_for_completion_interruptible(
			&hresult.completion);
		if (!ret)
			ret = hresult.err;
		break;
	default:
		break;
	}
	ahash_request_free(req);

	return ret;
}

static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
			unsigned int keylen)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
	unsigned int blocksize =
			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	unsigned int digestsize = crypto_ahash_digestsize(tfm);
	unsigned int keysize = keylen;
	u8 hash[SHA512_DIGEST_SIZE];
	int ret;

	if (keylen <= blocksize)
		memcpy(ctx->key, key, keysize);
	else {
		/* Must get the hash of the long key */
		ret = keyhash(tfm, key, keylen, hash);

		if (ret) {
			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
			return -EINVAL;
		}

		keysize = digestsize;
		memcpy(ctx->key, hash, digestsize);
	}

	ctx->keylen = keysize;

	return 0;
}


1950
struct talitos_alg_template {
1951 1952 1953
	u32 type;
	union {
		struct crypto_alg crypto;
1954
		struct ahash_alg hash;
1955
	} alg;
1956 1957 1958 1959
	__be32 desc_hdr_template;
};

static struct talitos_alg_template driver_algs[] = {
1960
	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1961 1962
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
1963 1964 1965 1966 1967 1968 1969 1970 1971
			.cra_name = "authenc(hmac(sha1),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA1_DIGEST_SIZE,
			}
		},
1972 1973 1974 1975 1976 1977 1978
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1979
	},
1980 1981
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
1982 1983 1984 1985 1986 1987 1988 1989 1990
			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA1_DIGEST_SIZE,
			}
		},
1991 1992 1993 1994 1995 1996 1997 1998
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1999
	},
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
	{       .type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha224),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA224_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
				     DESC_HDR_SEL0_AESU |
				     DESC_HDR_MODE0_AESU_CBC |
				     DESC_HDR_SEL1_MDEUA |
				     DESC_HDR_MODE1_MDEU_INIT |
				     DESC_HDR_MODE1_MDEU_PAD |
				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA224_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
	},
2039 2040
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
2041 2042 2043 2044 2045 2046 2047 2048 2049
			.cra_name = "authenc(hmac(sha256),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA256_DIGEST_SIZE,
			}
		},
2050 2051 2052 2053 2054 2055 2056 2057
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
	},
2058 2059
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
2060 2061 2062 2063 2064 2065 2066 2067 2068
			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA256_DIGEST_SIZE,
			}
		},
2069 2070 2071 2072 2073 2074 2075 2076 2077
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
	},
2078
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha384),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA384_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA384_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha512),cbc(aes))",
			.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = SHA512_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = SHA512_DIGEST_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUB |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
	},
	{	.type = CRYPTO_ALG_TYPE_AEAD,
2157
		.alg.crypto = {
2158 2159 2160 2161 2162 2163 2164 2165 2166
			.cra_name = "authenc(hmac(md5),cbc(aes))",
			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = AES_BLOCK_SIZE,
				.maxauthsize = MD5_DIGEST_SIZE,
			}
		},
2167 2168 2169 2170 2171 2172 2173 2174
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_AESU |
		                     DESC_HDR_MODE0_AESU_CBC |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
	},
2175 2176
	{	.type = CRYPTO_ALG_TYPE_AEAD,
		.alg.crypto = {
2177 2178 2179 2180 2181 2182 2183 2184 2185
			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
			.cra_aead = {
				.ivsize = DES3_EDE_BLOCK_SIZE,
				.maxauthsize = MD5_DIGEST_SIZE,
			}
		},
2186 2187 2188 2189 2190 2191 2192 2193
		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES |
		                     DESC_HDR_SEL1_MDEUA |
		                     DESC_HDR_MODE1_MDEU_INIT |
		                     DESC_HDR_MODE1_MDEU_PAD |
		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2194 2195
	},
	/* ABLKCIPHER algorithms. */
2196 2197
	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.alg.crypto = {
2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
			.cra_name = "cbc(aes)",
			.cra_driver_name = "cbc-aes-talitos",
			.cra_blocksize = AES_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
			.cra_ablkcipher = {
				.min_keysize = AES_MIN_KEY_SIZE,
				.max_keysize = AES_MAX_KEY_SIZE,
				.ivsize = AES_BLOCK_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_AESU |
				     DESC_HDR_MODE0_AESU_CBC,
	},
2213 2214
	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.alg.crypto = {
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
			.cra_name = "cbc(des3_ede)",
			.cra_driver_name = "cbc-3des-talitos",
			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
			.cra_ablkcipher = {
				.min_keysize = DES3_EDE_KEY_SIZE,
				.max_keysize = DES3_EDE_KEY_SIZE,
				.ivsize = DES3_EDE_BLOCK_SIZE,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
			             DESC_HDR_SEL0_DEU |
		                     DESC_HDR_MODE0_DEU_CBC |
		                     DESC_HDR_MODE0_DEU_3DES,
2230 2231 2232 2233 2234 2235 2236 2237
	},
	/* AHASH algorithms. */
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = MD5_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "md5",
				.cra_driver_name = "md5-talitos",
2238
				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_MD5,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA1_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha1",
				.cra_driver_name = "sha1-talitos",
				.cra_blocksize = SHA1_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA1,
	},
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA224_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha224",
				.cra_driver_name = "sha224-talitos",
				.cra_blocksize = SHA224_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA224,
	},
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA256_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha256",
				.cra_driver_name = "sha256-talitos",
				.cra_blocksize = SHA256_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA256,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA384_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha384",
				.cra_driver_name = "sha384-talitos",
				.cra_blocksize = SHA384_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA384,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA512_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "sha512",
				.cra_driver_name = "sha512-talitos",
				.cra_blocksize = SHA512_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA512,
	},
L
Lee Nipper 已提交
2322 2323 2324 2325 2326 2327
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = MD5_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(md5)",
				.cra_driver_name = "hmac-md5-talitos",
2328
				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
L
Lee Nipper 已提交
2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_MD5,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA1_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha1)",
				.cra_driver_name = "hmac-sha1-talitos",
				.cra_blocksize = SHA1_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA1,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA224_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha224)",
				.cra_driver_name = "hmac-sha224-talitos",
				.cra_blocksize = SHA224_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA224,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA256_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha256)",
				.cra_driver_name = "hmac-sha256-talitos",
				.cra_blocksize = SHA256_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUA |
				     DESC_HDR_MODE0_MDEU_SHA256,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA384_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha384)",
				.cra_driver_name = "hmac-sha384-talitos",
				.cra_blocksize = SHA384_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA384,
	},
	{	.type = CRYPTO_ALG_TYPE_AHASH,
		.alg.hash = {
			.halg.digestsize = SHA512_DIGEST_SIZE,
			.halg.base = {
				.cra_name = "hmac(sha512)",
				.cra_driver_name = "hmac-sha512-talitos",
				.cra_blocksize = SHA512_BLOCK_SIZE,
				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
					     CRYPTO_ALG_ASYNC,
			}
		},
		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
				     DESC_HDR_SEL0_MDEUB |
				     DESC_HDR_MODE0_MDEUB_SHA512,
	}
2412 2413 2414 2415 2416
};

struct talitos_crypto_alg {
	struct list_head entry;
	struct device *dev;
2417
	struct talitos_alg_template algt;
2418 2419 2420 2421 2422
};

static int talitos_cra_init(struct crypto_tfm *tfm)
{
	struct crypto_alg *alg = tfm->__crt_alg;
2423
	struct talitos_crypto_alg *talitos_alg;
2424
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2425
	struct talitos_private *priv;
2426

2427 2428 2429 2430 2431 2432 2433
	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
		talitos_alg = container_of(__crypto_ahash_alg(alg),
					   struct talitos_crypto_alg,
					   algt.alg.hash);
	else
		talitos_alg = container_of(alg, struct talitos_crypto_alg,
					   algt.alg.crypto);
2434

2435 2436
	/* update context with ptr to dev */
	ctx->dev = talitos_alg->dev;
2437

2438 2439 2440 2441 2442
	/* assign SEC channel to tfm in round-robin fashion */
	priv = dev_get_drvdata(ctx->dev);
	ctx->ch = atomic_inc_return(&priv->last_chan) &
		  (priv->num_channels - 1);

2443
	/* copy descriptor header template value */
2444
	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2445

2446 2447 2448
	/* select done notification */
	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;

2449 2450 2451 2452 2453 2454 2455 2456
	return 0;
}

static int talitos_cra_init_aead(struct crypto_tfm *tfm)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);

	talitos_cra_init(tfm);
2457 2458

	/* random first IV */
2459
	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2460 2461 2462 2463

	return 0;
}

2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
{
	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);

	talitos_cra_init(tfm);

	ctx->keylen = 0;
	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(struct talitos_ahash_req_ctx));

	return 0;
}

2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
/*
 * given the alg's descriptor header template, determine whether descriptor
 * type and primary/secondary execution units required match the hw
 * capabilities description provided in the device tree node.
 */
static int hw_supports(struct device *dev, __be32 desc_hdr_template)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	int ret;

	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);

	if (SECONDARY_EU(desc_hdr_template))
		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
		              & priv->exec_units);

	return ret;
}

2497
static int talitos_remove(struct platform_device *ofdev)
2498 2499 2500 2501 2502 2503 2504
{
	struct device *dev = &ofdev->dev;
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_crypto_alg *t_alg, *n;
	int i;

	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2505 2506 2507 2508 2509 2510 2511 2512 2513
		switch (t_alg->algt.type) {
		case CRYPTO_ALG_TYPE_ABLKCIPHER:
		case CRYPTO_ALG_TYPE_AEAD:
			crypto_unregister_alg(&t_alg->algt.alg.crypto);
			break;
		case CRYPTO_ALG_TYPE_AHASH:
			crypto_unregister_ahash(&t_alg->algt.alg.hash);
			break;
		}
2514 2515 2516 2517 2518 2519 2520
		list_del(&t_alg->entry);
		kfree(t_alg);
	}

	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
		talitos_unregister_rng(dev);

2521
	for (i = 0; i < priv->num_channels; i++)
2522
		kfree(priv->chan[i].fifo);
2523

2524
	kfree(priv->chan);
2525

2526
	for (i = 0; i < 2; i++)
2527
		if (priv->irq[i]) {
2528 2529 2530
			free_irq(priv->irq[i], dev);
			irq_dispose_mapping(priv->irq[i]);
		}
2531

2532
	tasklet_kill(&priv->done_task[0]);
2533
	if (priv->irq[1])
2534
		tasklet_kill(&priv->done_task[1]);
2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546

	iounmap(priv->reg);

	kfree(priv);

	return 0;
}

static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
						    struct talitos_alg_template
						           *template)
{
2547
	struct talitos_private *priv = dev_get_drvdata(dev);
2548 2549 2550 2551 2552 2553 2554
	struct talitos_crypto_alg *t_alg;
	struct crypto_alg *alg;

	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
	if (!t_alg)
		return ERR_PTR(-ENOMEM);

2555 2556 2557 2558
	t_alg->algt = *template;

	switch (t_alg->algt.type) {
	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2559 2560
		alg = &t_alg->algt.alg.crypto;
		alg->cra_init = talitos_cra_init;
2561
		alg->cra_type = &crypto_ablkcipher_type;
2562 2563 2564 2565
		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
		alg->cra_ablkcipher.geniv = "eseqiv";
2566
		break;
2567 2568
	case CRYPTO_ALG_TYPE_AEAD:
		alg = &t_alg->algt.alg.crypto;
2569
		alg->cra_init = talitos_cra_init_aead;
2570
		alg->cra_type = &crypto_aead_type;
2571 2572 2573 2574 2575 2576
		alg->cra_aead.setkey = aead_setkey;
		alg->cra_aead.setauthsize = aead_setauthsize;
		alg->cra_aead.encrypt = aead_encrypt;
		alg->cra_aead.decrypt = aead_decrypt;
		alg->cra_aead.givencrypt = aead_givencrypt;
		alg->cra_aead.geniv = "<built-in>";
2577 2578 2579
		break;
	case CRYPTO_ALG_TYPE_AHASH:
		alg = &t_alg->algt.alg.hash.halg.base;
2580
		alg->cra_init = talitos_cra_init_ahash;
2581
		alg->cra_type = &crypto_ahash_type;
2582 2583 2584 2585 2586 2587 2588
		t_alg->algt.alg.hash.init = ahash_init;
		t_alg->algt.alg.hash.update = ahash_update;
		t_alg->algt.alg.hash.final = ahash_final;
		t_alg->algt.alg.hash.finup = ahash_finup;
		t_alg->algt.alg.hash.digest = ahash_digest;
		t_alg->algt.alg.hash.setkey = ahash_setkey;

L
Lee Nipper 已提交
2589
		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
K
Kim Phillips 已提交
2590 2591
		    !strncmp(alg->cra_name, "hmac", 4)) {
			kfree(t_alg);
L
Lee Nipper 已提交
2592
			return ERR_PTR(-ENOTSUPP);
K
Kim Phillips 已提交
2593
		}
2594
		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
L
Lee Nipper 已提交
2595 2596
		    (!strcmp(alg->cra_name, "sha224") ||
		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2597 2598 2599 2600 2601 2602
			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
			t_alg->algt.desc_hdr_template =
					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
					DESC_HDR_SEL0_MDEUA |
					DESC_HDR_MODE0_MDEU_SHA256;
		}
2603
		break;
2604 2605 2606
	default:
		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
		return ERR_PTR(-EINVAL);
2607
	}
2608 2609 2610 2611 2612

	alg->cra_module = THIS_MODULE;
	alg->cra_priority = TALITOS_CRA_PRIORITY;
	alg->cra_alignmask = 0;
	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2613
	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2614 2615 2616 2617 2618 2619

	t_alg->dev = dev;

	return t_alg;
}

2620 2621 2622 2623 2624 2625 2626 2627
static int talitos_probe_irq(struct platform_device *ofdev)
{
	struct device *dev = &ofdev->dev;
	struct device_node *np = ofdev->dev.of_node;
	struct talitos_private *priv = dev_get_drvdata(dev);
	int err;

	priv->irq[0] = irq_of_parse_and_map(np, 0);
2628
	if (!priv->irq[0]) {
2629 2630 2631 2632 2633 2634 2635
		dev_err(dev, "failed to map irq\n");
		return -EINVAL;
	}

	priv->irq[1] = irq_of_parse_and_map(np, 1);

	/* get the primary irq line */
2636
	if (!priv->irq[1]) {
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
				  dev_driver_string(dev), dev);
		goto primary_out;
	}

	err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
			  dev_driver_string(dev), dev);
	if (err)
		goto primary_out;

	/* get the secondary irq line */
	err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
			  dev_driver_string(dev), dev);
	if (err) {
		dev_err(dev, "failed to request secondary irq\n");
		irq_dispose_mapping(priv->irq[1]);
2653
		priv->irq[1] = 0;
2654 2655 2656 2657 2658 2659 2660 2661
	}

	return err;

primary_out:
	if (err) {
		dev_err(dev, "failed to request primary irq\n");
		irq_dispose_mapping(priv->irq[0]);
2662
		priv->irq[0] = 0;
2663 2664 2665 2666 2667
	}

	return err;
}

2668
static int talitos_probe(struct platform_device *ofdev)
2669 2670
{
	struct device *dev = &ofdev->dev;
2671
	struct device_node *np = ofdev->dev.of_node;
2672 2673 2674 2675 2676 2677 2678 2679
	struct talitos_private *priv;
	const unsigned int *prop;
	int i, err;

	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

2680 2681
	INIT_LIST_HEAD(&priv->alg_list);

2682 2683 2684 2685
	dev_set_drvdata(dev, priv);

	priv->ofdev = ofdev;

2686 2687
	spin_lock_init(&priv->reg_lock);

2688 2689
	err = talitos_probe_irq(ofdev);
	if (err)
2690 2691
		goto err_out;

2692
	if (!priv->irq[1]) {
2693 2694 2695 2696 2697 2698 2699
		tasklet_init(&priv->done_task[0], talitos_done_4ch,
			     (unsigned long)dev);
	} else {
		tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
			     (unsigned long)dev);
		tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
			     (unsigned long)dev);
2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732
	}

	priv->reg = of_iomap(np, 0);
	if (!priv->reg) {
		dev_err(dev, "failed to of_iomap\n");
		err = -ENOMEM;
		goto err_out;
	}

	/* get SEC version capabilities from device tree */
	prop = of_get_property(np, "fsl,num-channels", NULL);
	if (prop)
		priv->num_channels = *prop;

	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
	if (prop)
		priv->chfifo_len = *prop;

	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
	if (prop)
		priv->exec_units = *prop;

	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
	if (prop)
		priv->desc_types = *prop;

	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
	    !priv->exec_units || !priv->desc_types) {
		dev_err(dev, "invalid property data in device tree node\n");
		err = -EINVAL;
		goto err_out;
	}

2733 2734 2735
	if (of_device_is_compatible(np, "fsl,sec3.0"))
		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;

2736
	if (of_device_is_compatible(np, "fsl,sec2.1"))
2737
		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
L
Lee Nipper 已提交
2738 2739
				  TALITOS_FTR_SHA224_HWINIT |
				  TALITOS_FTR_HMAC_OK;
2740

2741 2742 2743
	if (of_device_is_compatible(np, "fsl,sec1.0"))
		priv->features |= TALITOS_FTR_SEC1;

2744 2745 2746 2747
	priv->chan = kzalloc(sizeof(struct talitos_channel) *
			     priv->num_channels, GFP_KERNEL);
	if (!priv->chan) {
		dev_err(dev, "failed to allocate channel management space\n");
2748 2749 2750 2751
		err = -ENOMEM;
		goto err_out;
	}

2752 2753
	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);

2754 2755
	for (i = 0; i < priv->num_channels; i++) {
		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2756
		if (!priv->irq[1] || !(i & 1))
2757
			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2758

2759 2760
		spin_lock_init(&priv->chan[i].head_lock);
		spin_lock_init(&priv->chan[i].tail_lock);
2761

2762 2763 2764
		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
					     priv->fifo_len, GFP_KERNEL);
		if (!priv->chan[i].fifo) {
2765 2766 2767 2768 2769
			dev_err(dev, "failed to allocate request fifo %d\n", i);
			err = -ENOMEM;
			goto err_out;
		}

2770 2771
		atomic_set(&priv->chan[i].submit_count,
			   -(priv->chfifo_len - 1));
2772
	}
2773

2774 2775
	dma_set_mask(dev, DMA_BIT_MASK(36));

2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
	/* reset and initialize the h/w */
	err = init_device(dev);
	if (err) {
		dev_err(dev, "failed to initialize device\n");
		goto err_out;
	}

	/* register the RNG, if available */
	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
		err = talitos_register_rng(dev);
		if (err) {
			dev_err(dev, "failed to register hwrng: %d\n", err);
			goto err_out;
		} else
			dev_info(dev, "hwrng\n");
	}

	/* register crypto algorithms the device supports */
	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
			struct talitos_crypto_alg *t_alg;
2797
			char *name = NULL;
2798 2799 2800 2801

			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
			if (IS_ERR(t_alg)) {
				err = PTR_ERR(t_alg);
K
Kim Phillips 已提交
2802
				if (err == -ENOTSUPP)
L
Lee Nipper 已提交
2803
					continue;
2804 2805 2806
				goto err_out;
			}

2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
			switch (t_alg->algt.type) {
			case CRYPTO_ALG_TYPE_ABLKCIPHER:
			case CRYPTO_ALG_TYPE_AEAD:
				err = crypto_register_alg(
						&t_alg->algt.alg.crypto);
				name = t_alg->algt.alg.crypto.cra_driver_name;
				break;
			case CRYPTO_ALG_TYPE_AHASH:
				err = crypto_register_ahash(
						&t_alg->algt.alg.hash);
				name =
				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
				break;
			}
2821 2822
			if (err) {
				dev_err(dev, "%s alg registration failed\n",
2823
					name);
2824
				kfree(t_alg);
2825
			} else
2826 2827 2828
				list_add_tail(&t_alg->entry, &priv->alg_list);
		}
	}
2829 2830 2831
	if (!list_empty(&priv->alg_list))
		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
			 (char *)of_get_property(np, "compatible", NULL));
2832 2833 2834 2835 2836 2837 2838 2839 2840

	return 0;

err_out:
	talitos_remove(ofdev);

	return err;
}

2841
static const struct of_device_id talitos_match[] = {
2842 2843 2844 2845 2846 2847 2848
	{
		.compatible = "fsl,sec2.0",
	},
	{},
};
MODULE_DEVICE_TABLE(of, talitos_match);

2849
static struct platform_driver talitos_driver = {
2850 2851 2852 2853
	.driver = {
		.name = "talitos",
		.of_match_table = talitos_match,
	},
2854
	.probe = talitos_probe,
A
Al Viro 已提交
2855
	.remove = talitos_remove,
2856 2857
};

2858
module_platform_driver(talitos_driver);
2859 2860 2861 2862

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");