提交 8ad06e56 编写于 作者: L Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Algorithms:
   - add private key generation to ecdh

  Drivers:
   - add generic gcm(aes) to aesni-intel
   - add SafeXcel EIP197 crypto engine driver
   - add ecb(aes), cfb(aes) and ecb(des3_ede) to cavium
   - add support for CNN55XX adapters in cavium
   - add ctr mode to chcr
   - add support for gcm(aes) to omap"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (140 commits)
  crypto: testmgr - Reenable sha1/aes in FIPS mode
  crypto: ccp - Release locks before returning
  crypto: cavium/nitrox - dma_mapping_error() returns bool
  crypto: doc - fix typo in docs
  Documentation/bindings: Document the SafeXel cryptographic engine driver
  crypto: caam - fix gfp allocation flags (part II)
  crypto: caam - fix gfp allocation flags (part I)
  crypto: drbg - Fixes panic in wait_for_completion call
  crypto: caam - make of_device_ids const.
  crypto: vmx - remove unnecessary check
  crypto: n2 - make of_device_ids const
  crypto: inside-secure - use the base_end pointer in ring rollback
  crypto: inside-secure - increase the batch size
  crypto: inside-secure - only dequeue when needed
  crypto: inside-secure - get the backlog before dequeueing the request
  crypto: inside-secure - stop requeueing failed requests
  crypto: inside-secure - use one queue per hw ring
  crypto: inside-secure - update the context and request later
  crypto: inside-secure - align the cipher and hash send functions
  crypto: inside-secure - optimize DSE bufferability control
  ...
......@@ -155,9 +155,9 @@ Code Example For Use of Operational State Memory With SHASH
char ctx[];
};
static struct sdesc init_sdesc(struct crypto_shash *alg)
static struct sdesc *init_sdesc(struct crypto_shash *alg)
{
struct sdesc sdesc;
struct sdesc *sdesc;
int size;
size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
......@@ -169,15 +169,16 @@ Code Example For Use of Operational State Memory With SHASH
return sdesc;
}
static int calc_hash(struct crypto_shashalg,
const unsigned chardata, unsigned int datalen,
unsigned chardigest) {
struct sdesc sdesc;
static int calc_hash(struct crypto_shash *alg,
const unsigned char *data, unsigned int datalen,
unsigned char *digest)
{
struct sdesc *sdesc;
int ret;
sdesc = init_sdesc(alg);
if (IS_ERR(sdesc)) {
pr_info("trusted_key: can't alloc %s\n", hash_alg);
pr_info("can't alloc sdesc\n");
return PTR_ERR(sdesc);
}
......@@ -186,6 +187,23 @@ Code Example For Use of Operational State Memory With SHASH
return ret;
}
static int test_hash(const unsigned char *data, unsigned int datalen,
unsigned char *digest)
{
struct crypto_shash *alg;
char *hash_alg_name = "sha1-padlock-nano";
int ret;
alg = crypto_alloc_shash(hash_alg_name, CRYPTO_ALG_TYPE_SHASH, 0);
if (IS_ERR(alg)) {
pr_info("can't alloc alg %s\n", hash_alg_name);
return PTR_ERR(alg);
}
ret = calc_hash(alg, data, datalen, digest);
crypto_free_shash(alg);
return ret;
}
Code Example For Random Number Generator Usage
----------------------------------------------
......@@ -195,8 +213,8 @@ Code Example For Random Number Generator Usage
static int get_random_numbers(u8 *buf, unsigned int len)
{
struct crypto_rngrng = NULL;
chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
struct crypto_rng *rng = NULL;
char *drbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
int ret;
if (!buf || !len) {
......@@ -207,7 +225,7 @@ Code Example For Random Number Generator Usage
rng = crypto_alloc_rng(drbg, 0, 0);
if (IS_ERR(rng)) {
pr_debug("could not allocate RNG handle for %s\n", drbg);
return -PTR_ERR(rng);
return PTR_ERR(rng);
}
ret = crypto_rng_get_bytes(rng, buf, len);
......
......@@ -327,7 +327,7 @@ boundary. Non-aligned data can be used as well, but may require more
operations of the kernel which would defeat the speed gains obtained
from the zero-copy interface.
The system-interent limit for the size of one zero-copy operation is 16
The system-inherent limit for the size of one zero-copy operation is 16
pages. If more data is to be sent to AF_ALG, user space must slice the
input into segments with a maximum size of 16 pages.
......
Inside Secure SafeXcel cryptographic engine
Required properties:
- compatible: Should be "inside-secure,safexcel-eip197".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt numbers for the rings and engine.
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
Optional properties:
- clocks: Reference to the crypto engine clock.
- dma-mask: The address mask limitation. Defaults to 64.
Example:
crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
reg = <0x800000 0x200000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3",
"eip";
clocks = <&cpm_syscon0 1 26>;
dma-mask = <0xff 0xffffffff>;
status = "disabled";
};
......@@ -6,8 +6,7 @@ Required properties:
- interrupts: Should contain the five crypto engines interrupts in numeric
order. These are global system and four descriptor rings.
- clocks: the clock used by the core
- clock-names: the names of the clock listed in the clocks property. These are
"ethif", "cryp"
- clock-names: Must contain "cryp".
- power-domains: Must contain a reference to the PM domain.
......@@ -20,8 +19,7 @@ Example:
<GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_ETHIF_SEL>,
<&ethsys CLK_ETHSYS_CRYPTO>;
clock-names = "ethif","cryp";
clocks = <&ethsys CLK_ETHSYS_CRYPTO>;
clock-names = "cryp";
power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
};
......@@ -2,7 +2,9 @@ Device-Tree bindings for Mediatek random number generator
found in Mediatek SoC family
Required properties:
- compatible : Should be "mediatek,mt7623-rng"
- compatible : Should be
"mediatek,mt7622-rng", "mediatek,mt7623-rng" : for MT7622
"mediatek,mt7623-rng" : for MT7623
- clocks : list of clock specifiers, corresponding to
entries in clock-names property;
- clock-names : Should contain "rng" entries;
......
......@@ -5,6 +5,13 @@ Required properties:
- reg : base address to sample from
- period : wait time in microseconds to use between samples
Optional properties:
- quality : estimated number of bits of true entropy per 1024 bits read from the
rng. Defaults to zero which causes the kernel's default quality to
be used instead. Note that the default quality is usually zero
which disables using this rng to automatically fill the kernel's
entropy pool.
N.B. currently 'reg' must be four bytes wide and aligned
Example:
......
......@@ -3746,6 +3746,13 @@ S: Supported
F: drivers/infiniband/hw/cxgb4/
F: include/uapi/rdma/cxgb4-abi.h
CXGB4 CRYPTO DRIVER (chcr)
M: Harsh Jain <harsh@chelsio.com>
L: linux-crypto@vger.kernel.org
W: http://www.chelsio.com
S: Supported
F: drivers/crypto/chelsio
CXGB4VF ETHERNET DRIVER (CXGB4VF)
M: Casey Leedom <leedom@chelsio.com>
L: netdev@vger.kernel.org
......@@ -6647,6 +6654,12 @@ F: Documentation/input/multi-touch-protocol.rst
F: drivers/input/input-mt.c
K: \b(ABS|SYN)_MT_
INSIDE SECURE CRYPTO DRIVER
M: Antoine Tenart <antoine.tenart@free-electrons.com>
F: drivers/crypto/inside-secure/
S: Maintained
L: linux-crypto@vger.kernel.org
INTEL ASoC BDW/HSW DRIVERS
M: Jie Yang <yang.jie@linux.intel.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
......@@ -8306,6 +8319,11 @@ L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
MEDIATEK RANDOM NUMBER GENERATOR SUPPORT
M: Sean Wang <sean.wang@mediatek.com>
S: Maintained
F: drivers/char/hw_random/mtk-rng.c
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
M: Peter Senna Tschudin <peter.senna@collabora.com>
M: Martin Donnelly <martin.donnelly@ge.com>
......
......@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/cpufeature.h>
#include <linux/module.h>
#include <crypto/xts.h>
......@@ -425,9 +426,6 @@ static int __init aes_init(void)
int err;
int i;
if (!(elf_hwcap2 & HWCAP2_AES))
return -ENODEV;
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
if (err)
return err;
......@@ -451,5 +449,5 @@ static int __init aes_init(void)
return err;
}
module_init(aes_init);
module_cpu_feature_match(AES, aes_init);
module_exit(aes_exit);
......@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/cpufeature.h>
#include <linux/crc32.h>
#include <linux/init.h>
#include <linux/kernel.h>
......@@ -233,6 +234,11 @@ static void __exit crc32_pmull_mod_exit(void)
ARRAY_SIZE(crc32_pmull_algs));
}
static const struct cpu_feature crc32_cpu_feature[] = {
{ cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
};
MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
module_init(crc32_pmull_mod_init);
module_exit(crc32_pmull_mod_exit);
......
......@@ -15,6 +15,7 @@
#include <crypto/cryptd.h>
#include <crypto/internal/hash.h>
#include <crypto/gf128mul.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
......@@ -311,9 +312,6 @@ static int __init ghash_ce_mod_init(void)
{
int err;
if (!(elf_hwcap2 & HWCAP2_PMULL))
return -ENODEV;
err = crypto_register_shash(&ghash_alg);
if (err)
return err;
......@@ -334,5 +332,5 @@ static void __exit ghash_ce_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
module_init(ghash_ce_mod_init);
module_cpu_feature_match(PMULL, ghash_ce_mod_init);
module_exit(ghash_ce_mod_exit);
......@@ -11,6 +11,7 @@
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
......@@ -82,8 +83,6 @@ static struct shash_alg alg = {
static int __init sha1_ce_mod_init(void)
{
if (!(elf_hwcap2 & HWCAP2_SHA1))
return -ENODEV;
return crypto_register_shash(&alg);
}
......@@ -92,5 +91,5 @@ static void __exit sha1_ce_mod_fini(void)
crypto_unregister_shash(&alg);
}
module_init(sha1_ce_mod_init);
module_cpu_feature_match(SHA1, sha1_ce_mod_init);
module_exit(sha1_ce_mod_fini);
......@@ -11,6 +11,7 @@
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
......@@ -100,8 +101,6 @@ static struct shash_alg algs[] = { {
static int __init sha2_ce_mod_init(void)
{
if (!(elf_hwcap2 & HWCAP2_SHA2))
return -ENODEV;
return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
......@@ -110,5 +109,5 @@ static void __exit sha2_ce_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(sha2_ce_mod_init);
module_cpu_feature_match(SHA2, sha2_ce_mod_init);
module_exit(sha2_ce_mod_fini);
......@@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
ldr dgb, [x0, #16]
/* load sha1_ce_state::finalize */
ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
ldr_l w4, sha1_ce_offsetof_finalize, x4
ldr w4, [x0, x4]
/* load input */
0: ld1 {v8.4s-v11.4s}, [x1], #64
......@@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
* the padding is handled by the C code in that case.
*/
cbz x4, 3f
ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
ldr_l w4, sha1_ce_offsetof_count, x4
ldr x4, [x0, x4]
movi v9.2d, #0
mov x8, #0x80000000
movi v10.2d, #0
......
......@@ -17,9 +17,6 @@
#include <linux/crypto.h>
#include <linux/module.h>
#define ASM_EXPORT(sym, val) \
asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
......@@ -32,6 +29,9 @@ struct sha1_ce_state {
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
......@@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
ASM_EXPORT(sha1_ce_offsetof_count,
offsetof(struct sha1_ce_state, sst.count));
ASM_EXPORT(sha1_ce_offsetof_finalize,
offsetof(struct sha1_ce_state, finalize));
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
......
......@@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
ld1 {dgav.4s, dgbv.4s}, [x0]
/* load sha256_ce_state::finalize */
ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
ldr_l w4, sha256_ce_offsetof_finalize, x4
ldr w4, [x0, x4]
/* load input */
0: ld1 {v16.4s-v19.4s}, [x1], #64
......@@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
* the padding is handled by the C code in that case.
*/
cbz x4, 3f
ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
ldr_l w4, sha256_ce_offsetof_count, x4
ldr x4, [x0, x4]
movi v17.2d, #0
mov x8, #0x80000000
movi v18.2d, #0
......
......@@ -17,9 +17,6 @@
#include <linux/crypto.h>
#include <linux/module.h>
#define ASM_EXPORT(sym, val) \
asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
......@@ -32,6 +29,11 @@ struct sha256_ce_state {
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
sst.count);
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
finalize);
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
......@@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
ASM_EXPORT(sha256_ce_offsetof_count,
offsetof(struct sha256_ce_state, sst.count));
ASM_EXPORT(sha256_ce_offsetof_finalize,
offsetof(struct sha256_ce_state, finalize));
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
......
......@@ -42,17 +42,15 @@
#define R5E %esi
#define R6 %rdi
#define R6E %edi
#define R7 %rbp
#define R7E %ebp
#define R7 %r9 /* don't use %rbp; it breaks stack traces */
#define R7E %r9d
#define R8 %r8
#define R9 %r9
#define R10 %r10
#define R11 %r11
#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
#define prologue(FUNC,KEY,B128,B192,r1,r2,r5,r6,r7,r8,r9,r10,r11) \
ENTRY(FUNC); \
movq r1,r2; \
movq r3,r4; \
leaq KEY+48(r8),r9; \
movq r10,r11; \
movl (r7),r5 ## E; \
......@@ -70,9 +68,8 @@
je B192; \
leaq 32(r9),r9;
#define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
#define epilogue(FUNC,r1,r2,r5,r6,r7,r8,r9) \
movq r1,r2; \
movq r3,r4; \
movl r5 ## E,(r9); \
movl r6 ## E,4(r9); \
movl r7 ## E,8(r9); \
......@@ -88,12 +85,12 @@
movl TAB(,r6,4),r6 ## E; \
roll $16,r2 ## E; \
shrl $16,r4 ## E; \
movzbl r4 ## H,r7 ## E; \
movzbl r4 ## L,r4 ## E; \
movzbl r4 ## L,r7 ## E; \
movzbl r4 ## H,r4 ## E; \
xorl OFFSET(r8),ra ## E; \
xorl OFFSET+4(r8),rb ## E; \
xorl TAB+3072(,r7,4),r5 ## E;\
xorl TAB+2048(,r4,4),r6 ## E;\
xorl TAB+3072(,r4,4),r5 ## E;\
xorl TAB+2048(,r7,4),r6 ## E;\
movzbl r1 ## L,r7 ## E; \
movzbl r1 ## H,r4 ## E; \
movl TAB+1024(,r4,4),r4 ## E;\
......@@ -101,19 +98,19 @@
roll $16,r1 ## E; \
shrl $16,r3 ## E; \
xorl TAB(,r7,4),r5 ## E; \
movzbl r3 ## H,r7 ## E; \
movzbl r3 ## L,r3 ## E; \
xorl TAB+3072(,r7,4),r4 ## E;\
xorl TAB+2048(,r3,4),r5 ## E;\
movzbl r1 ## H,r7 ## E; \
movzbl r1 ## L,r3 ## E; \
movzbl r3 ## L,r7 ## E; \
movzbl r3 ## H,r3 ## E; \
xorl TAB+3072(,r3,4),r4 ## E;\
xorl TAB+2048(,r7,4),r5 ## E;\
movzbl r1 ## L,r7 ## E; \
movzbl r1 ## H,r3 ## E; \
shrl $16,r1 ## E; \
xorl TAB+3072(,r7,4),r6 ## E;\
movl TAB+2048(,r3,4),r3 ## E;\
movzbl r1 ## H,r7 ## E; \
movzbl r1 ## L,r1 ## E; \
xorl TAB+1024(,r7,4),r6 ## E;\
xorl TAB(,r1,4),r3 ## E; \
xorl TAB+3072(,r3,4),r6 ## E;\
movl TAB+2048(,r7,4),r3 ## E;\
movzbl r1 ## L,r7 ## E; \
movzbl r1 ## H,r1 ## E; \
xorl TAB+1024(,r1,4),r6 ## E;\
xorl TAB(,r7,4),r3 ## E; \
movzbl r2 ## H,r1 ## E; \
movzbl r2 ## L,r7 ## E; \
shrl $16,r2 ## E; \
......@@ -131,9 +128,9 @@
movl r4 ## E,r2 ## E;
#define entry(FUNC,KEY,B128,B192) \
prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
prologue(FUNC,KEY,B128,B192,R2,R8,R1,R3,R4,R6,R10,R5,R11)
#define return(FUNC) epilogue(FUNC,R8,R2,R9,R7,R5,R6,R3,R4,R11)
#define return(FUNC) epilogue(FUNC,R8,R2,R5,R6,R3,R4,R11)
#define encrypt_round(TAB,OFFSET) \
round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4) \
......
......@@ -89,6 +89,29 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000
.section .rodata
.align 16
.type aad_shift_arr, @object
.size aad_shift_arr, 272
aad_shift_arr:
.octa 0xffffffffffffffffffffffffffffffff
.octa 0xffffffffffffffffffffffffffffff0C
.octa 0xffffffffffffffffffffffffffff0D0C
.octa 0xffffffffffffffffffffffffff0E0D0C
.octa 0xffffffffffffffffffffffff0F0E0D0C
.octa 0xffffffffffffffffffffff0C0B0A0908
.octa 0xffffffffffffffffffff0D0C0B0A0908
.octa 0xffffffffffffffffff0E0D0C0B0A0908
.octa 0xffffffffffffffff0F0E0D0C0B0A0908
.octa 0xffffffffffffff0C0B0A090807060504
.octa 0xffffffffffff0D0C0B0A090807060504
.octa 0xffffffffff0E0D0C0B0A090807060504
.octa 0xffffffff0F0E0D0C0B0A090807060504
.octa 0xffffff0C0B0A09080706050403020100
.octa 0xffff0D0C0B0A09080706050403020100
.octa 0xff0E0D0C0B0A09080706050403020100
.octa 0x0F0E0D0C0B0A09080706050403020100
.text
......@@ -252,31 +275,65 @@ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
mov arg8, %r12 # %r12 = aadLen
mov %r12, %r11
pxor %xmm\i, %xmm\i
pxor \XMM2, \XMM2
_get_AAD_loop\num_initial_blocks\operation:
movd (%r10), \TMP1
pslldq $12, \TMP1
psrldq $4, %xmm\i
cmp $16, %r11
jl _get_AAD_rest8\num_initial_blocks\operation
_get_AAD_blocks\num_initial_blocks\operation:
movdqu (%r10), %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor %xmm\i, \XMM2
GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
add $16, %r10
sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\num_initial_blocks\operation
movdqu \XMM2, %xmm\i
cmp $0, %r11
je _get_AAD_done\num_initial_blocks\operation
pxor %xmm\i,%xmm\i
/* read the last <16B of AAD. since we have at least 4B of
data right after the AAD (the ICV, and maybe some CT), we can
read 4B/8B blocks safely, and then get rid of the extra stuff */
_get_AAD_rest8\num_initial_blocks\operation:
cmp $4, %r11
jle _get_AAD_rest4\num_initial_blocks\operation
movq (%r10), \TMP1
add $8, %r10
sub $8, %r11
pslldq $8, \TMP1
psrldq $8, %xmm\i
pxor \TMP1, %xmm\i
jmp _get_AAD_rest8\num_initial_blocks\operation
_get_AAD_rest4\num_initial_blocks\operation:
cmp $0, %r11
jle _get_AAD_rest0\num_initial_blocks\operation
mov (%r10), %eax
movq %rax, \TMP1
add $4, %r10
sub $4, %r12
jne _get_AAD_loop\num_initial_blocks\operation
cmp $16, %r11
je _get_AAD_loop2_done\num_initial_blocks\operation
mov $16, %r12
_get_AAD_loop2\num_initial_blocks\operation:
sub $4, %r10
pslldq $12, \TMP1
psrldq $4, %xmm\i
sub $4, %r12
cmp %r11, %r12
jne _get_AAD_loop2\num_initial_blocks\operation
_get_AAD_loop2_done\num_initial_blocks\operation:
pxor \TMP1, %xmm\i
_get_AAD_rest0\num_initial_blocks\operation:
/* finalize: shift out the extra bytes we read, and align
left. since pslldq can only shift by an immediate, we use
vpshufb and an array of shuffle masks */
movq %r12, %r11
salq $4, %r11
movdqu aad_shift_arr(%r11), \TMP1
PSHUFB_XMM \TMP1, %xmm\i
_get_AAD_rest_final\num_initial_blocks\operation:
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor \XMM2, %xmm\i
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
_get_AAD_done\num_initial_blocks\operation:
xor %r11, %r11 # initialise the data pointer offset as zero
# start AES for num_initial_blocks blocks
mov %arg5, %rax # %rax = *Y0
......@@ -322,7 +379,7 @@ aes_loop_initial_dec\num_initial_blocks:
# prepare plaintext/ciphertext for GHASH computation
.endr
.endif
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
# apply GHASH on num_initial_blocks blocks
.if \i == 5
......@@ -477,27 +534,65 @@ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
mov arg8, %r12 # %r12 = aadLen
mov %r12, %r11
pxor %xmm\i, %xmm\i
_get_AAD_loop\num_initial_blocks\operation:
movd (%r10), \TMP1
pslldq $12, \TMP1
psrldq $4, %xmm\i
pxor \XMM2, \XMM2
cmp $16, %r11
jl _get_AAD_rest8\num_initial_blocks\operation
_get_AAD_blocks\num_initial_blocks\operation:
movdqu (%r10), %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor %xmm\i, \XMM2
GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
add $16, %r10
sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\num_initial_blocks\operation
movdqu \XMM2, %xmm\i
cmp $0, %r11
je _get_AAD_done\num_initial_blocks\operation
pxor %xmm\i,%xmm\i
/* read the last <16B of AAD. since we have at least 4B of
data right after the AAD (the ICV, and maybe some PT), we can
read 4B/8B blocks safely, and then get rid of the extra stuff */
_get_AAD_rest8\num_initial_blocks\operation:
cmp $4, %r11
jle _get_AAD_rest4\num_initial_blocks\operation
movq (%r10), \TMP1
add $8, %r10
sub $8, %r11
pslldq $8, \TMP1
psrldq $8, %xmm\i
pxor \TMP1, %xmm\i
jmp _get_AAD_rest8\num_initial_blocks\operation
_get_AAD_rest4\num_initial_blocks\operation:
cmp $0, %r11
jle _get_AAD_rest0\num_initial_blocks\operation
mov (%r10), %eax
movq %rax, \TMP1
add $4, %r10
sub $4, %r12
jne _get_AAD_loop\num_initial_blocks\operation
cmp $16, %r11
je _get_AAD_loop2_done\num_initial_blocks\operation
mov $16, %r12
_get_AAD_loop2\num_initial_blocks\operation:
sub $4, %r10
pslldq $12, \TMP1
psrldq $4, %xmm\i
sub $4, %r12
cmp %r11, %r12
jne _get_AAD_loop2\num_initial_blocks\operation
_get_AAD_loop2_done\num_initial_blocks\operation:
pxor \TMP1, %xmm\i
_get_AAD_rest0\num_initial_blocks\operation:
/* finalize: shift out the extra bytes we read, and align
left. since pslldq can only shift by an immediate, we use
vpshufb and an array of shuffle masks */
movq %r12, %r11
salq $4, %r11
movdqu aad_shift_arr(%r11), \TMP1
PSHUFB_XMM \TMP1, %xmm\i
_get_AAD_rest_final\num_initial_blocks\operation:
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor \XMM2, %xmm\i
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
_get_AAD_done\num_initial_blocks\operation:
xor %r11, %r11 # initialise the data pointer offset as zero
# start AES for num_initial_blocks blocks
mov %arg5, %rax # %rax = *Y0
......@@ -543,7 +638,7 @@ aes_loop_initial_enc\num_initial_blocks:
# prepare plaintext/ciphertext for GHASH computation
.endr
.endif
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
# apply GHASH on num_initial_blocks blocks
.if \i == 5
......@@ -1454,18 +1549,35 @@ _return_T_decrypt:
mov arg10, %r11 # %r11 = auth_tag_len
cmp $16, %r11
je _T_16_decrypt
cmp $12, %r11
je _T_12_decrypt
cmp $8, %r11
jl _T_4_decrypt
_T_8_decrypt:
MOVQ_R64_XMM %xmm0, %rax
mov %rax, (%r10)
jmp _return_T_done_decrypt
_T_12_decrypt:
MOVQ_R64_XMM %xmm0, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
psrldq $8, %xmm0
cmp $0, %r11
je _return_T_done_decrypt
_T_4_decrypt:
movd %xmm0, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
psrldq $4, %xmm0
cmp $0, %r11
je _return_T_done_decrypt
_T_123_decrypt:
movd %xmm0, %eax
mov %eax, 8(%r10)
cmp $2, %r11
jl _T_1_decrypt
mov %ax, (%r10)
cmp $2, %r11
je _return_T_done_decrypt
add $2, %r10
sar $16, %eax
_T_1_decrypt:
mov %al, (%r10)
jmp _return_T_done_decrypt
_T_16_decrypt:
movdqu %xmm0, (%r10)
......@@ -1718,18 +1830,35 @@ _return_T_encrypt:
mov arg10, %r11 # %r11 = auth_tag_len
cmp $16, %r11
je _T_16_encrypt
cmp $12, %r11
je _T_12_encrypt
cmp $8, %r11
jl _T_4_encrypt
_T_8_encrypt:
MOVQ_R64_XMM %xmm0, %rax
mov %rax, (%r10)
jmp _return_T_done_encrypt
_T_12_encrypt:
MOVQ_R64_XMM %xmm0, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
psrldq $8, %xmm0
cmp $0, %r11
je _return_T_done_encrypt
_T_4_encrypt:
movd %xmm0, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
psrldq $4, %xmm0
cmp $0, %r11
je _return_T_done_encrypt
_T_123_encrypt:
movd %xmm0, %eax
mov %eax, 8(%r10)
cmp $2, %r11
jl _T_1_encrypt
mov %ax, (%r10)
cmp $2, %r11
je _return_T_done_encrypt
add $2, %r10
sar $16, %eax
_T_1_encrypt:
mov %al, (%r10)
jmp _return_T_done_encrypt
_T_16_encrypt:
movdqu %xmm0, (%r10)
......
......@@ -155,6 +155,30 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000
.section .rodata
.align 16
.type aad_shift_arr, @object
.size aad_shift_arr, 272
aad_shift_arr:
.octa 0xffffffffffffffffffffffffffffffff
.octa 0xffffffffffffffffffffffffffffff0C
.octa 0xffffffffffffffffffffffffffff0D0C
.octa 0xffffffffffffffffffffffffff0E0D0C
.octa 0xffffffffffffffffffffffff0F0E0D0C
.octa 0xffffffffffffffffffffff0C0B0A0908
.octa 0xffffffffffffffffffff0D0C0B0A0908
.octa 0xffffffffffffffffff0E0D0C0B0A0908
.octa 0xffffffffffffffff0F0E0D0C0B0A0908
.octa 0xffffffffffffff0C0B0A090807060504
.octa 0xffffffffffff0D0C0B0A090807060504
.octa 0xffffffffff0E0D0C0B0A090807060504
.octa 0xffffffff0F0E0D0C0B0A090807060504
.octa 0xffffff0C0B0A09080706050403020100
.octa 0xffff0D0C0B0A09080706050403020100
.octa 0xff0E0D0C0B0A09080706050403020100
.octa 0x0F0E0D0C0B0A09080706050403020100
.text
......@@ -372,6 +396,7 @@ VARIABLE_OFFSET = 16*8
.macro INITIAL_BLOCKS_AVX num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC
i = (8-\num_initial_blocks)
j = 0
setreg
mov arg6, %r10 # r10 = AAD
......@@ -380,33 +405,63 @@ VARIABLE_OFFSET = 16*8
mov %r12, %r11
vpxor reg_j, reg_j, reg_j
vpxor reg_i, reg_i, reg_i
_get_AAD_loop\@:
vmovd (%r10), \T1
vpslldq $12, \T1, \T1
vpsrldq $4, reg_i, reg_i
vpxor \T1, reg_i, reg_i
add $4, %r10
sub $4, %r12
jg _get_AAD_loop\@
cmp $16, %r11
je _get_AAD_loop2_done\@
mov $16, %r12
_get_AAD_loop2\@:
vpsrldq $4, reg_i, reg_i
sub $4, %r12
cmp %r11, %r12
jg _get_AAD_loop2\@
jl _get_AAD_rest8\@
_get_AAD_blocks\@:
vmovdqu (%r10), reg_i
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_i, reg_j, reg_j
GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6
add $16, %r10
sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\@
vmovdqu reg_j, reg_i
cmp $0, %r11
je _get_AAD_done\@
_get_AAD_loop2_done\@:
vpxor reg_i, reg_i, reg_i
#byte-reflect the AAD data
/* read the last <16B of AAD. since we have at least 4B of
data right after the AAD (the ICV, and maybe some CT), we can
read 4B/8B blocks safely, and then get rid of the extra stuff */
_get_AAD_rest8\@:
cmp $4, %r11
jle _get_AAD_rest4\@
movq (%r10), \T1
add $8, %r10
sub $8, %r11
vpslldq $8, \T1, \T1
vpsrldq $8, reg_i, reg_i
vpxor \T1, reg_i, reg_i
jmp _get_AAD_rest8\@
_get_AAD_rest4\@:
cmp $0, %r11
jle _get_AAD_rest0\@
mov (%r10), %eax
movq %rax, \T1
add $4, %r10
sub $4, %r11
vpslldq $12, \T1, \T1
vpsrldq $4, reg_i, reg_i
vpxor \T1, reg_i, reg_i
_get_AAD_rest0\@:
/* finalize: shift out the extra bytes we read, and align
left. since pslldq can only shift by an immediate, we use
vpshufb and an array of shuffle masks */
movq %r12, %r11
salq $4, %r11
movdqu aad_shift_arr(%r11), \T1
vpshufb \T1, reg_i, reg_i
_get_AAD_rest_final\@:
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_j, reg_i, reg_i
GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6
_get_AAD_done\@:
# initialize the data pointer offset as zero
xor %r11, %r11
......@@ -480,7 +535,6 @@ _get_AAD_loop2_done\@:
i = (8-\num_initial_blocks)
j = (9-\num_initial_blocks)
setreg
GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6
.rep \num_initial_blocks
vpxor reg_i, reg_j, reg_j
......@@ -1427,19 +1481,36 @@ _return_T\@:
cmp $16, %r11
je _T_16\@
cmp $12, %r11
je _T_12\@
cmp $8, %r11
jl _T_4\@
_T_8\@:
vmovq %xmm9, %rax
mov %rax, (%r10)
jmp _return_T_done\@
_T_12\@:
vmovq %xmm9, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
vpsrldq $8, %xmm9, %xmm9
cmp $0, %r11
je _return_T_done\@
_T_4\@:
vmovd %xmm9, %eax
mov %eax, 8(%r10)
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
vpsrldq $4, %xmm9, %xmm9
cmp $0, %r11
je _return_T_done\@
_T_123\@:
vmovd %xmm9, %eax
cmp $2, %r11
jl _T_1\@
mov %ax, (%r10)
cmp $2, %r11
je _return_T_done\@
add $2, %r10
sar $16, %eax
_T_1\@:
mov %al, (%r10)
jmp _return_T_done\@
_T_16\@:
......@@ -1631,6 +1702,7 @@ ENDPROC(aesni_gcm_dec_avx_gen2)
.macro INITIAL_BLOCKS_AVX2 num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER
i = (8-\num_initial_blocks)
j = 0
setreg
mov arg6, %r10 # r10 = AAD
......@@ -1639,33 +1711,64 @@ ENDPROC(aesni_gcm_dec_avx_gen2)
mov %r12, %r11
vpxor reg_j, reg_j, reg_j
vpxor reg_i, reg_i, reg_i
_get_AAD_loop\@:
vmovd (%r10), \T1
vpslldq $12, \T1, \T1
vpsrldq $4, reg_i, reg_i
vpxor \T1, reg_i, reg_i
add $4, %r10
sub $4, %r12
jg _get_AAD_loop\@
cmp $16, %r11
je _get_AAD_loop2_done\@
mov $16, %r12
_get_AAD_loop2\@:
vpsrldq $4, reg_i, reg_i
sub $4, %r12
cmp %r11, %r12
jg _get_AAD_loop2\@
jl _get_AAD_rest8\@
_get_AAD_blocks\@:
vmovdqu (%r10), reg_i
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_i, reg_j, reg_j
GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6
add $16, %r10
sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\@
vmovdqu reg_j, reg_i
cmp $0, %r11
je _get_AAD_done\@
_get_AAD_loop2_done\@:
vpxor reg_i, reg_i, reg_i
#byte-reflect the AAD data
/* read the last <16B of AAD. since we have at least 4B of
data right after the AAD (the ICV, and maybe some CT), we can
read 4B/8B blocks safely, and then get rid of the extra stuff */
_get_AAD_rest8\@:
cmp $4, %r11
jle _get_AAD_rest4\@
movq (%r10), \T1
add $8, %r10
sub $8, %r11
vpslldq $8, \T1, \T1
vpsrldq $8, reg_i, reg_i
vpxor \T1, reg_i, reg_i
jmp _get_AAD_rest8\@
_get_AAD_rest4\@:
cmp $0, %r11
jle _get_AAD_rest0\@
mov (%r10), %eax
movq %rax, \T1
add $4, %r10
sub $4, %r11
vpslldq $12, \T1, \T1
vpsrldq $4, reg_i, reg_i
vpxor \T1, reg_i, reg_i
_get_AAD_rest0\@:
/* finalize: shift out the extra bytes we read, and align
left. since pslldq can only shift by an immediate, we use
vpshufb and an array of shuffle masks */
movq %r12, %r11
salq $4, %r11
movdqu aad_shift_arr(%r11), \T1
vpshufb \T1, reg_i, reg_i
_get_AAD_rest_final\@:
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_j, reg_i, reg_i
GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6
_get_AAD_done\@:
# initialize the data pointer offset as zero
xor %r11, %r11
......@@ -1740,7 +1843,6 @@ _get_AAD_loop2_done\@:
i = (8-\num_initial_blocks)
j = (9-\num_initial_blocks)
setreg
GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6
.rep \num_initial_blocks
vpxor reg_i, reg_j, reg_j
......@@ -2702,19 +2804,36 @@ _return_T\@:
cmp $16, %r11
je _T_16\@
cmp $12, %r11
je _T_12\@
cmp $8, %r11
jl _T_4\@
_T_8\@:
vmovq %xmm9, %rax
mov %rax, (%r10)
jmp _return_T_done\@
_T_12\@:
vmovq %xmm9, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
vpsrldq $8, %xmm9, %xmm9
cmp $0, %r11
je _return_T_done\@
_T_4\@:
vmovd %xmm9, %eax
mov %eax, (%r10)
add $4, %r10
sub $4, %r11
vpsrldq $4, %xmm9, %xmm9
cmp $0, %r11
je _return_T_done\@
_T_123\@:
vmovd %xmm9, %eax
mov %eax, 8(%r10)
cmp $2, %r11
jl _T_1\@
mov %ax, (%r10)
cmp $2, %r11
je _return_T_done\@
add $2, %r10
sar $16, %eax
_T_1\@:
mov %al, (%r10)
jmp _return_T_done\@
_T_16\@:
......
......@@ -61,6 +61,11 @@ struct aesni_rfc4106_gcm_ctx {
u8 nonce[4];
};
struct generic_gcmaes_ctx {
u8 hash_subkey[16] AESNI_ALIGN_ATTR;
struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
};
struct aesni_xts_ctx {
u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
......@@ -102,13 +107,11 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
* u8 *out, Ciphertext output. Encrypt in-place is allowed.
* const u8 *in, Plaintext input
* unsigned long plaintext_len, Length of data in bytes for encryption.
* u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
* concatenated with 8 byte Initialisation Vector (from IPSec ESP
* Payload) concatenated with 0x00000001. 16-byte aligned pointer.
* u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
* 16-byte aligned pointer.
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
* const u8 *aad, Additional Authentication Data (AAD)
* unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
* is going to be 8 or 12 bytes
* unsigned long aad_len, Length of AAD in bytes.
* u8 *auth_tag, Authenticated Tag output.
* unsigned long auth_tag_len), Authenticated Tag Length in bytes.
* Valid values are 16 (most likely), 12 or 8.
......@@ -123,9 +126,8 @@ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
* u8 *out, Plaintext output. Decrypt in-place is allowed.
* const u8 *in, Ciphertext input
* unsigned long ciphertext_len, Length of data in bytes for decryption.
* u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
* concatenated with 8 byte Initialisation Vector (from IPSec ESP
* Payload) concatenated with 0x00000001. 16-byte aligned pointer.
* u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
* 16-byte aligned pointer.
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
* const u8 *aad, Additional Authentication Data (AAD)
* unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
......@@ -275,6 +277,16 @@ aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
align = 1;
return PTR_ALIGN(crypto_aead_ctx(tfm), align);
}
static inline struct
generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
{
unsigned long align = AESNI_ALIGN;
if (align <= crypto_tfm_ctx_alignment())
align = 1;
return PTR_ALIGN(crypto_aead_ctx(tfm), align);
}
#endif
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
......@@ -712,32 +724,34 @@ static int rfc4106_set_authsize(struct crypto_aead *parent,
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
}
static int helper_rfc4106_encrypt(struct aead_request *req)
static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
switch (authsize) {
case 4:
case 8:
case 12:
case 13:
case 14:
case 15:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
struct scatter_walk dst_sg_walk = {};
unsigned int i;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length equal */
/* to 16 or 20 bytes */
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
return -EINVAL;
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
if (sg_is_last(req->src) &&
(!PageHighMem(sg_page(req->src)) ||
......@@ -768,7 +782,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
kernel_fpu_begin();
aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
ctx->hash_subkey, assoc, req->assoclen - 8,
hash_subkey, assoc, assoclen,
dst + req->cryptlen, auth_tag_len);
kernel_fpu_end();
......@@ -791,37 +805,20 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
return 0;
}
static int helper_rfc4106_decrypt(struct aead_request *req)
static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
unsigned long tempCipherLen = 0;
__be32 counter = cpu_to_be32(1);
int retval = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 authTag[16];
struct scatter_walk src_sg_walk;
struct scatter_walk dst_sg_walk = {};
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
return -EINVAL;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
/* equal to 16 or 20 bytes */
int retval = 0;
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
if (sg_is_last(req->src) &&
(!PageHighMem(sg_page(req->src)) ||
......@@ -838,7 +835,6 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
}
} else {
/* Allocate memory for src, dst, assoc */
assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
......@@ -850,9 +846,10 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
dst = src;
}
kernel_fpu_begin();
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
ctx->hash_subkey, assoc, req->assoclen - 8,
hash_subkey, assoc, assoclen,
authTag, auth_tag_len);
kernel_fpu_end();
......@@ -875,6 +872,60 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
kfree(assoc);
}
return retval;
}
static int helper_rfc4106_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
unsigned int i;
__be32 counter = cpu_to_be32(1);
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length equal */
/* to 16 or 20 bytes */
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
return -EINVAL;
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
aes_ctx);
}
static int helper_rfc4106_decrypt(struct aead_request *req)
{
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
return -EINVAL;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
/* equal to 16 or 20 bytes */
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
aes_ctx);
}
static int rfc4106_encrypt(struct aead_request *req)
......@@ -1035,6 +1086,46 @@ struct {
};
#ifdef CONFIG_X86_64
static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
unsigned int key_len)
{
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
return aes_set_key_common(crypto_aead_tfm(aead),
&ctx->aes_key_expanded, key, key_len) ?:
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
static int generic_gcmaes_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
__be32 counter = cpu_to_be32(1);
memcpy(iv, req->iv, 12);
*((__be32 *)(iv+12)) = counter;
return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
aes_ctx);
}
static int generic_gcmaes_decrypt(struct aead_request *req)
{
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
memcpy(iv, req->iv, 12);
*((__be32 *)(iv+12)) = counter;
return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
aes_ctx);
}
static struct aead_alg aesni_aead_algs[] = { {
.setkey = common_rfc4106_set_key,
.setauthsize = common_rfc4106_set_authsize,
......@@ -1069,6 +1160,23 @@ static struct aead_alg aesni_aead_algs[] = { {
.cra_ctxsize = sizeof(struct cryptd_aead *),
.cra_module = THIS_MODULE,
},
}, {
.setkey = generic_gcmaes_set_key,
.setauthsize = generic_gcmaes_set_authsize,
.encrypt = generic_gcmaes_encrypt,
.decrypt = generic_gcmaes_decrypt,
.ivsize = 12,
.maxauthsize = 16,
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "generic-gcm-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
.cra_alignmask = AESNI_ALIGN - 1,
.cra_module = THIS_MODULE,
},
} };
#else
static struct aead_alg aesni_aead_algs[0];
......
......@@ -176,9 +176,6 @@ __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
src -= 1;
dst -= 1;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
goto done;
}
}
......
......@@ -269,19 +269,19 @@ static struct sha512_hash_ctx
* LAST
*/
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
return ctx;
goto unlock;
}
if (ctx->status & HASH_CTX_STS_PROCESSING) {
/* Cannot submit to a currently processing job. */
ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
return ctx;
goto unlock;
}
if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
/* Cannot update a finished job. */
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
return ctx;
goto unlock;
}
......@@ -363,6 +363,7 @@ static struct sha512_hash_ctx
}
ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
unlock:
spin_unlock_irqrestore(&cstate->work_lock, irqflags);
return ctx;
}
......
......@@ -130,6 +130,7 @@ config CRYPTO_DH
config CRYPTO_ECDH
tristate "ECDH algorithm"
select CRYTPO_KPP
select CRYPTO_RNG_DEFAULT
help
Generic implementation of the ECDH algorithm
......
......@@ -33,10 +33,6 @@ obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
dh_generic-y := dh.o
dh_generic-y += dh_helper.o
obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
ecdh_generic-y := ecc.o
ecdh_generic-y += ecdh.o
ecdh_generic-y += ecdh_helper.o
obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
......@@ -138,6 +134,11 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
ecdh_generic-y := ecc.o
ecdh_generic-y += ecdh.o
ecdh_generic-y += ecdh_helper.o
obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
#
# generic algorithms and the async_tx api
#
......
......@@ -114,7 +114,7 @@ static u32 mix_columns(u32 x)
* | 0x2 0x3 0x1 0x1 | | x[0] |
* | 0x1 0x2 0x3 0x1 | | x[1] |
* | 0x1 0x1 0x2 0x3 | x | x[2] |
* | 0x3 0x1 0x1 0x3 | | x[3] |
* | 0x3 0x1 0x1 0x2 | | x[3] |
*/
u32 y = mul_by_x(x) ^ ror32(x, 16);
......
......@@ -260,7 +260,7 @@ void crypto_alg_tested(const char *name, int err)
goto found;
}
printk(KERN_ERR "alg: Unexpected test result for %s: %d\n", name, err);
pr_err("alg: Unexpected test result for %s: %d\n", name, err);
goto unlock;
found:
......
......@@ -70,7 +70,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (engine->unprepare_crypt_hardware &&
engine->unprepare_crypt_hardware(engine))
pr_err("failed to unprepare crypt hardware\n");
dev_err(engine->dev, "failed to unprepare crypt hardware\n");
spin_lock_irqsave(&engine->queue_lock, flags);
engine->idling = false;
......@@ -99,7 +99,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
pr_err("failed to prepare crypt hardware\n");
dev_err(engine->dev, "failed to prepare crypt hardware\n");
goto req_err;
}
}
......@@ -110,14 +110,15 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (engine->prepare_hash_request) {
ret = engine->prepare_hash_request(engine, hreq);
if (ret) {
pr_err("failed to prepare request: %d\n", ret);
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
goto req_err;
}
engine->cur_req_prepared = true;
}
ret = engine->hash_one_request(engine, hreq);
if (ret) {
pr_err("failed to hash one request from queue\n");
dev_err(engine->dev, "failed to hash one request from queue\n");
goto req_err;
}
return;
......@@ -126,19 +127,20 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (engine->prepare_cipher_request) {
ret = engine->prepare_cipher_request(engine, breq);
if (ret) {
pr_err("failed to prepare request: %d\n", ret);
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
goto req_err;
}
engine->cur_req_prepared = true;
}
ret = engine->cipher_one_request(engine, breq);
if (ret) {
pr_err("failed to cipher one request from queue\n");
dev_err(engine->dev, "failed to cipher one request from queue\n");
goto req_err;
}
return;
default:
pr_err("failed to prepare request of unknown type\n");
dev_err(engine->dev, "failed to prepare request of unknown type\n");
return;
}
......@@ -275,7 +277,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
engine->unprepare_cipher_request) {
ret = engine->unprepare_cipher_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
dev_err(engine->dev, "failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
......@@ -312,7 +314,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
engine->unprepare_hash_request) {
ret = engine->unprepare_hash_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
dev_err(engine->dev, "failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
......@@ -384,7 +386,7 @@ int crypto_engine_stop(struct crypto_engine *engine)
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (ret)
pr_warn("could not stop engine\n");
dev_warn(engine->dev, "could not stop engine\n");
return ret;
}
......@@ -411,6 +413,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
if (!engine)
return NULL;
engine->dev = dev;
engine->rt = rt;
engine->running = false;
engine->busy = false;
......
......@@ -4,9 +4,9 @@
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
......@@ -85,6 +85,9 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct dh_ctx *ctx = dh_get_ctx(tfm);
struct dh params;
/* Free the old MPI key if any */
dh_free_ctx(ctx);
if (crypto_dh_decode_key(buf, len, &params) < 0)
return -EINVAL;
......@@ -144,7 +147,7 @@ static int dh_compute_value(struct kpp_request *req)
return ret;
}
static int dh_max_size(struct crypto_kpp *tfm)
static unsigned int dh_max_size(struct crypto_kpp *tfm)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
......
......@@ -3,9 +3,9 @@
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/export.h>
......
......@@ -1691,6 +1691,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
init_completion(&drbg->ctr_completion);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
......
......@@ -29,6 +29,7 @@
#include <linux/swab.h>
#include <linux/fips.h>
#include <crypto/ecdh.h>
#include <crypto/rng.h>
#include "ecc.h"
#include "ecc_curve_defs.h"
......@@ -904,7 +905,7 @@ static inline void ecc_swap_digits(const u64 *in, u64 *out,
}
int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
const u8 *private_key, unsigned int private_key_len)
const u64 *private_key, unsigned int private_key_len)
{
int nbytes;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
......@@ -917,24 +918,77 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
if (private_key_len != nbytes)
return -EINVAL;
if (vli_is_zero((const u64 *)&private_key[0], ndigits))
if (vli_is_zero(private_key, ndigits))
return -EINVAL;
/* Make sure the private key is in the range [1, n-1]. */
if (vli_cmp(curve->n, (const u64 *)&private_key[0], ndigits) != 1)
if (vli_cmp(curve->n, private_key, ndigits) != 1)
return -EINVAL;
return 0;
}
int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits,
const u8 *private_key, unsigned int private_key_len,
u8 *public_key, unsigned int public_key_len)
/*
* ECC private keys are generated using the method of extra random bits,
* equivalent to that described in FIPS 186-4, Appendix B.4.1.
*
* d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
* than requested
* 0 <= c mod(n-1) <= n-2 and implies that
* 1 <= d <= n-1
*
* This method generates a private key uniformly distributed in the range
* [1, n-1].
*/
int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
{
const struct ecc_curve *curve = ecc_get_curve(curve_id);
u64 priv[ndigits];
unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
unsigned int nbits = vli_num_bits(curve->n, ndigits);
int err;
/* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
if (nbits < 160)
return -EINVAL;
/*
* FIPS 186-4 recommends that the private key should be obtained from a
* RBG with a security strength equal to or greater than the security
* strength associated with N.
*
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
* This condition is met by the default RNG because it selects a favored
* DRBG with a security strength of 256.
*/
if (crypto_get_default_rng())
err = -EFAULT;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
if (err)
return err;
if (vli_is_zero(priv, ndigits))
return -EINVAL;
/* Make sure the private key is in the range [1, n-1]. */
if (vli_cmp(curve->n, priv, ndigits) != 1)
return -EINVAL;
ecc_swap_digits(priv, privkey, ndigits);
return 0;
}
int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, u64 *public_key)
{
int ret = 0;
struct ecc_point *pk;
u64 priv[ndigits];
unsigned int nbytes;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
if (!private_key || !curve) {
......@@ -942,7 +996,7 @@ int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits,
goto out;
}
ecc_swap_digits((const u64 *)private_key, priv, ndigits);
ecc_swap_digits(private_key, priv, ndigits);
pk = ecc_alloc_point(ndigits);
if (!pk) {
......@@ -956,9 +1010,8 @@ int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits,
goto err_free_point;
}
nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
ecc_swap_digits(pk->x, (u64 *)public_key, ndigits);
ecc_swap_digits(pk->y, (u64 *)&public_key[nbytes], ndigits);
ecc_swap_digits(pk->x, public_key, ndigits);
ecc_swap_digits(pk->y, &public_key[ndigits], ndigits);
err_free_point:
ecc_free_point(pk);
......@@ -967,9 +1020,8 @@ int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits,
}
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u8 *private_key, unsigned int private_key_len,
const u8 *public_key, unsigned int public_key_len,
u8 *secret, unsigned int secret_len)
const u64 *private_key, const u64 *public_key,
u64 *secret)
{
int ret = 0;
struct ecc_point *product, *pk;
......@@ -999,13 +1051,13 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto err_alloc_product;
}
ecc_swap_digits((const u64 *)public_key, pk->x, ndigits);
ecc_swap_digits((const u64 *)&public_key[nbytes], pk->y, ndigits);
ecc_swap_digits((const u64 *)private_key, priv, ndigits);
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
ecc_swap_digits(private_key, priv, ndigits);
ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
ecc_swap_digits(product->x, (u64 *)secret, ndigits);
ecc_swap_digits(product->x, secret, ndigits);
if (ecc_point_is_zero(product))
ret = -EFAULT;
......
......@@ -34,41 +34,51 @@
* ecc_is_key_valid() - Validate a given ECDH private key
*
* @curve_id: id representing the curve to use
* @ndigits: curve number of digits
* @ndigits: curve's number of digits
* @private_key: private key to be used for the given curve
* @private_key_len: private key len
* @private_key_len: private key length
*
* Returns 0 if the key is acceptable, a negative value otherwise
*/
int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
const u8 *private_key, unsigned int private_key_len);
const u64 *private_key, unsigned int private_key_len);
/**
* ecc_gen_privkey() - Generates an ECC private key.
* The private key is a random integer in the range 0 < random < n, where n is a
* prime that is the order of the cyclic subgroup generated by the distinguished
* point G.
* @curve_id: id representing the curve to use
* @ndigits: curve number of digits
* @private_key: buffer for storing the generated private key
*
* Returns 0 if the private key was generated successfully, a negative value
* if an error occurred.
*/
int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey);
/**
* ecdh_make_pub_key() - Compute an ECC public key
* ecc_make_pub_key() - Compute an ECC public key
*
* @curve_id: id representing the curve to use
* @ndigits: curve's number of digits
* @private_key: pregenerated private key for the given curve
* @private_key_len: length of private_key
* @public_key: buffer for storing the public key generated
* @public_key_len: length of the public_key buffer
* @public_key: buffer for storing the generated public key
*
* Returns 0 if the public key was generated successfully, a negative value
* if an error occurred.
*/
int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
const u8 *private_key, unsigned int private_key_len,
u8 *public_key, unsigned int public_key_len);
int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, u64 *public_key);
/**
* crypto_ecdh_shared_secret() - Compute a shared secret
*
* @curve_id: id representing the curve to use
* @ndigits: curve's number of digits
* @private_key: private key of part A
* @private_key_len: length of private_key
* @public_key: public key of counterpart B
* @public_key_len: length of public_key
* @secret: buffer for storing the calculated shared secret
* @secret_len: length of the secret buffer
*
* Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
* before using it for symmetric encryption or HMAC.
......@@ -77,7 +87,6 @@ int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
* if an error occurred.
*/
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u8 *private_key, unsigned int private_key_len,
const u8 *public_key, unsigned int public_key_len,
u8 *secret, unsigned int secret_len);
const u64 *private_key, const u64 *public_key,
u64 *secret);
#endif
......@@ -4,9 +4,9 @@
* Authors: Salvator Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
......@@ -55,8 +55,12 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
ctx->curve_id = params.curve_id;
ctx->ndigits = ndigits;
if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
(const u8 *)params.key, params.key_size) < 0)
(const u64 *)params.key, params.key_size) < 0)
return -EINVAL;
memcpy(ctx->private_key, params.key, params.key_size);
......@@ -81,16 +85,14 @@ static int ecdh_compute_value(struct kpp_request *req)
return -EINVAL;
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
(const u8 *)ctx->private_key, nbytes,
(const u8 *)ctx->public_key, 2 * nbytes,
(u8 *)ctx->shared_secret, nbytes);
ctx->private_key,
ctx->public_key,
ctx->shared_secret);
buf = ctx->shared_secret;
} else {
ret = ecdh_make_pub_key(ctx->curve_id, ctx->ndigits,
(const u8 *)ctx->private_key, nbytes,
(u8 *)ctx->public_key,
sizeof(ctx->public_key));
ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits,
ctx->private_key, ctx->public_key);
buf = ctx->public_key;
/* Public part is a point thus it has both coordinates */
nbytes *= 2;
......@@ -106,13 +108,12 @@ static int ecdh_compute_value(struct kpp_request *req)
return ret;
}
static int ecdh_max_size(struct crypto_kpp *tfm)
static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
int nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
/* Public key is made of two coordinates */
return 2 * nbytes;
/* Public key is made of two coordinates, add one to the left shift */
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
}
static void no_exit_tfm(struct crypto_kpp *tfm)
......
......@@ -3,9 +3,9 @@
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/export.h>
......
......@@ -16,6 +16,7 @@
*
*/
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
......@@ -74,8 +75,8 @@ static int hmac_setkey(struct crypto_shash *parent,
memcpy(opad, ipad, bs);
for (i = 0; i < bs; i++) {
ipad[i] ^= 0x36;
opad[i] ^= 0x5c;
ipad[i] ^= HMAC_IPAD_VALUE;
opad[i] ^= HMAC_OPAD_VALUE;
}
return crypto_shash_init(shash) ?:
......
......@@ -33,11 +33,6 @@ struct crypto_rng *crypto_default_rng;
EXPORT_SYMBOL_GPL(crypto_default_rng);
static int crypto_default_rng_refcnt;
static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_rng, base);
}
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
u8 *buf = NULL;
......
......@@ -120,9 +120,6 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
/* Find out new modulus size from rsa implementation */
err = crypto_akcipher_maxsize(ctx->child);
if (err < 0)
return err;
if (err > PAGE_SIZE)
return -ENOTSUPP;
......@@ -144,9 +141,6 @@ static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
/* Find out new modulus size from rsa implementation */
err = crypto_akcipher_maxsize(ctx->child);
if (err < 0)
return err;
if (err > PAGE_SIZE)
return -ENOTSUPP;
......@@ -154,7 +148,7 @@ static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return 0;
}
static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
......@@ -164,7 +158,7 @@ static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
* decrypt/verify.
*/
return ctx->key_size ?: -EINVAL;
return ctx->key_size;
}
static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
......@@ -496,7 +490,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
goto done;
pos++;
if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
goto done;
pos += digest_info->size;
......
......@@ -337,11 +337,11 @@ static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return -ENOMEM;
}
static int rsa_max_size(struct crypto_akcipher *tfm)
static unsigned int rsa_max_size(struct crypto_akcipher *tfm)
{
struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
return pkey->n ? mpi_get_size(pkey->n) : -EINVAL;
return mpi_get_size(pkey->n);
}
static void rsa_exit_tfm(struct crypto_akcipher *tfm)
......
......@@ -138,8 +138,6 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
int ret = 0;
int i;
local_irq_disable();
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
......@@ -169,8 +167,6 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
}
out:
local_irq_enable();
if (ret == 0)
printk("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
......
......@@ -218,14 +218,14 @@ static int ahash_partial_update(struct ahash_request **preq,
crypto_ahash_reqtfm(req));
state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
if (!state) {
pr_err("alt: hash: Failed to alloc state for %s\n", algo);
pr_err("alg: hash: Failed to alloc state for %s\n", algo);
goto out_nostate;
}
memcpy(state + statesize, guard, sizeof(guard));
ret = crypto_ahash_export(req, state);
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alt: hash: Failed to export() for %s\n", algo);
pr_err("alg: hash: Failed to export() for %s\n", algo);
goto out;
}
ahash_request_free(req);
......@@ -344,19 +344,19 @@ static int __test_hash(struct crypto_ahash *tfm,
} else {
ret = wait_async_op(&tresult, crypto_ahash_init(req));
if (ret) {
pr_err("alt: hash: init failed on test %d "
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
ret = wait_async_op(&tresult, crypto_ahash_update(req));
if (ret) {
pr_err("alt: hash: update failed on test %d "
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
ret = wait_async_op(&tresult, crypto_ahash_final(req));
if (ret) {
pr_err("alt: hash: final failed on test %d "
pr_err("alg: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
......@@ -488,13 +488,13 @@ static int __test_hash(struct crypto_ahash *tfm,
ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
ret = wait_async_op(&tresult, crypto_ahash_init(req));
if (ret) {
pr_err("alt: hash: init failed on test %d for %s: ret=%d\n",
pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out;
}
ret = wait_async_op(&tresult, crypto_ahash_update(req));
if (ret) {
pr_err("alt: hash: update failed on test %d for %s: ret=%d\n",
pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out;
}
......@@ -505,7 +505,7 @@ static int __test_hash(struct crypto_ahash *tfm,
hash_buff, k, temp, &sg[0], algo, result,
&tresult);
if (ret) {
pr_err("hash: partial update failed on test %d for %s: ret=%d\n",
pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out_noreq;
}
......@@ -513,7 +513,7 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ret = wait_async_op(&tresult, crypto_ahash_final(req));
if (ret) {
pr_err("alt: hash: final failed on test %d for %s: ret=%d\n",
pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out;
}
......@@ -1997,6 +1997,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
struct kpp_request *req;
void *input_buf = NULL;
void *output_buf = NULL;
void *a_public = NULL;
void *a_ss = NULL;
void *shared_secret = NULL;
struct tcrypt_result result;
unsigned int out_len_max;
int err = -ENOMEM;
......@@ -2026,21 +2029,32 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
/* Compute public key */
/* Compute party A's public key */
err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
if (err) {
pr_err("alg: %s: generate public key test failed. err %d\n",
pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
alg, err);
goto free_output;
}
if (vec->genkey) {
/* Save party A's public key */
a_public = kzalloc(out_len_max, GFP_KERNEL);
if (!a_public) {
err = -ENOMEM;
goto free_output;
}
memcpy(a_public, sg_virt(req->dst), out_len_max);
} else {
/* Verify calculated public key */
if (memcmp(vec->expected_a_public, sg_virt(req->dst),
vec->expected_a_public_size)) {
pr_err("alg: %s: generate public key test failed. Invalid output\n",
pr_err("alg: %s: Party A: generate public key test failed. Invalid output\n",
alg);
err = -EINVAL;
goto free_output;
}
}
/* Calculate shared secret key by using counter part (b) public key. */
input_buf = kzalloc(vec->b_public_size, GFP_KERNEL);
......@@ -2058,15 +2072,53 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
tcrypt_complete, &result);
err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
if (err) {
pr_err("alg: %s: compute shard secret test failed. err %d\n",
pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
alg, err);
goto free_all;
}
if (vec->genkey) {
/* Save the shared secret obtained by party A */
a_ss = kzalloc(vec->expected_ss_size, GFP_KERNEL);
if (!a_ss) {
err = -ENOMEM;
goto free_all;
}
memcpy(a_ss, sg_virt(req->dst), vec->expected_ss_size);
/*
* Calculate party B's shared secret by using party A's
* public key.
*/
err = crypto_kpp_set_secret(tfm, vec->b_secret,
vec->b_secret_size);
if (err < 0)
goto free_all;
sg_init_one(&src, a_public, vec->expected_a_public_size);
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_input(req, &src, vec->expected_a_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
err = wait_async_op(&result,
crypto_kpp_compute_shared_secret(req));
if (err) {
pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
alg, err);
goto free_all;
}
shared_secret = a_ss;
} else {
shared_secret = (void *)vec->expected_ss;
}
/*
* verify shared secret from which the user will derive
* secret key by executing whatever hash it has chosen
*/
if (memcmp(vec->expected_ss, sg_virt(req->dst),
if (memcmp(shared_secret, sg_virt(req->dst),
vec->expected_ss_size)) {
pr_err("alg: %s: compute shared secret test failed. Invalid output\n",
alg);
......@@ -2074,8 +2126,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
}
free_all:
kfree(a_ss);
kfree(input_buf);
free_output:
kfree(a_public);
kfree(output_buf);
free_req:
kpp_request_free(req);
......@@ -2168,8 +2222,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
/* Run RSA encrypt - c = m^e mod n;*/
err = wait_async_op(&result, crypto_akcipher_encrypt(req));
err = wait_async_op(&result, vecs->siggen_sigver_test ?
/* Run asymmetric signature generation */
crypto_akcipher_sign(req) :
/* Run asymmetric encrypt */
crypto_akcipher_encrypt(req));
if (err) {
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
......@@ -2207,8 +2264,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
init_completion(&result.completion);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
/* Run RSA decrypt - m = c^d mod n;*/
err = wait_async_op(&result, crypto_akcipher_decrypt(req));
err = wait_async_op(&result, vecs->siggen_sigver_test ?
/* Run asymmetric signature verification */
crypto_akcipher_verify(req) :
/* Run asymmetric decrypt */
crypto_akcipher_decrypt(req));
if (err) {
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
......@@ -2306,6 +2366,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
......@@ -3254,6 +3315,25 @@ static const struct alg_test_desc alg_test_descs[] = {
.dec = __VECS(fcrypt_pcbc_dec_tv_template)
}
}
}, {
.alg = "pkcs1pad(rsa,sha224)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha256)",
.test = alg_test_akcipher,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(pkcs1pad_rsa_tv_template)
}
}, {
.alg = "pkcs1pad(rsa,sha384)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha512)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "poly1305",
.test = alg_test_hash,
......
......@@ -133,17 +133,21 @@ struct akcipher_testvec {
unsigned int m_size;
unsigned int c_size;
bool public_key_vec;
bool siggen_sigver_test;
};
struct kpp_testvec {
const unsigned char *secret;
const unsigned char *b_secret;
const unsigned char *b_public;
const unsigned char *expected_a_public;
const unsigned char *expected_ss;
unsigned short secret_size;
unsigned short b_secret_size;
unsigned short b_public_size;
unsigned short expected_a_public_size;
unsigned short expected_ss_size;
bool genkey;
};
static const char zeroed_string[48];
......@@ -538,6 +542,101 @@ static const struct akcipher_testvec rsa_tv_template[] = {
}
};
/*
* PKCS#1 RSA test vectors. Obtained from CAVS testing.
*/
static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
{
.key =
"\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
"\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28"
"\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67"
"\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d"
"\x70\xa7\x67\x22\xb3\x4d\x7a\x94\xc3\xba\x4b\x7c\x4b\xa9\x32\x7c"
"\xb7\x38\x95\x45\x64\xa4\x05\xa8\x9f\x12\x7c\x4e\xc6\xc8\x2d\x40"
"\x06\x30\xf4\x60\xa6\x91\xbb\x9b\xca\x04\x79\x11\x13\x75\xf0\xae"
"\xd3\x51\x89\xc5\x74\xb9\xaa\x3f\xb6\x83\xe4\x78\x6b\xcd\xf9\x5c"
"\x4c\x85\xea\x52\x3b\x51\x93\xfc\x14\x6b\x33\x5d\x30\x70\xfa\x50"
"\x1b\x1b\x38\x81\x13\x8d\xf7\xa5\x0c\xc0\x8e\xf9\x63\x52\x18\x4e"
"\xa9\xf9\xf8\x5c\x5d\xcd\x7a\x0d\xd4\x8e\x7b\xee\x91\x7b\xad\x7d"
"\xb4\x92\xd5\xab\x16\x3b\x0a\x8a\xce\x8e\xde\x47\x1a\x17\x01\x86"
"\x7b\xab\x99\xf1\x4b\x0c\x3a\x0d\x82\x47\xc1\x91\x8c\xbb\x2e\x22"
"\x9e\x49\x63\x6e\x02\xc1\xc9\x3a\x9b\xa5\x22\x1b\x07\x95\xd6\x10"
"\x02\x50\xfd\xfd\xd1\x9b\xbe\xab\xc2\xc0\x74\xd7\xec\x00\xfb\x11"
"\x71\xcb\x7a\xdc\x81\x79\x9f\x86\x68\x46\x63\x82\x4d\xb7\xf1\xe6"
"\x16\x6f\x42\x63\xf4\x94\xa0\xca\x33\xcc\x75\x13\x02\x82\x01\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01"
"\x02\x82\x01\x00\x62\xb5\x60\x31\x4f\x3f\x66\x16\xc1\x60\xac\x47"
"\x2a\xff\x6b\x69\x00\x4a\xb2\x5c\xe1\x50\xb9\x18\x74\xa8\xe4\xdc"
"\xa8\xec\xcd\x30\xbb\xc1\xc6\xe3\xc6\xac\x20\x2a\x3e\x5e\x8b\x12"
"\xe6\x82\x08\x09\x38\x0b\xab\x7c\xb3\xcc\x9c\xce\x97\x67\xdd\xef"
"\x95\x40\x4e\x92\xe2\x44\xe9\x1d\xc1\x14\xfd\xa9\xb1\xdc\x71\x9c"
"\x46\x21\xbd\x58\x88\x6e\x22\x15\x56\xc1\xef\xe0\xc9\x8d\xe5\x80"
"\x3e\xda\x7e\x93\x0f\x52\xf6\xf5\xc1\x91\x90\x9e\x42\x49\x4f\x8d"
"\x9c\xba\x38\x83\xe9\x33\xc2\x50\x4f\xec\xc2\xf0\xa8\xb7\x6e\x28"
"\x25\x56\x6b\x62\x67\xfe\x08\xf1\x56\xe5\x6f\x0e\x99\xf1\xe5\x95"
"\x7b\xef\xeb\x0a\x2c\x92\x97\x57\x23\x33\x36\x07\xdd\xfb\xae\xf1"
"\xb1\xd8\x33\xb7\x96\x71\x42\x36\xc5\xa4\xa9\x19\x4b\x1b\x52\x4c"
"\x50\x69\x91\xf0\x0e\xfa\x80\x37\x4b\xb5\xd0\x2f\xb7\x44\x0d\xd4"
"\xf8\x39\x8d\xab\x71\x67\x59\x05\x88\x3d\xeb\x48\x48\x33\x88\x4e"
"\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a"
"\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda"
"\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46"
"\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30"
"\x02\x01\x30",
.key_len = 804,
/*
* m is SHA256 hash of following message:
* "\x49\x41\xbe\x0a\x0c\xc9\xf6\x35\x51\xe4\x27\x56\x13\x71\x4b\xd0"
* "\x36\x92\x84\x89\x1b\xf8\x56\x4a\x72\x61\x14\x69\x4f\x5e\x98\xa5"
* "\x80\x5a\x37\x51\x1f\xd8\xf5\xb5\x63\xfc\xf4\xb1\xbb\x4d\x33\xa3"
* "\x1e\xb9\x75\x8b\x9c\xda\x7e\x6d\x3a\x77\x85\xf7\xfc\x4e\xe7\x64"
* "\x43\x10\x19\xa0\x59\xae\xe0\xad\x4b\xd3\xc4\x45\xf7\xb1\xc2\xc1"
* "\x65\x01\x41\x39\x5b\x45\x47\xed\x2b\x51\xed\xe3\xd0\x09\x10\xd2"
* "\x39\x6c\x4a\x3f\xe5\xd2\x20\xe6\xb0\x71\x7d\x5b\xed\x26\x60\xf1"
* "\xb4\x73\xd1\xdb\x7d\xc4\x19\x91\xee\xf6\x32\x76\xf2\x19\x7d\xb7"
*/
.m =
"\x3e\xc8\xa1\x26\x20\x54\x44\x52\x48\x0d\xe5\x66\xf3\xb3\xf5\x04"
"\xbe\x10\xa8\x48\x94\x22\x2d\xdd\xba\x7a\xb4\x76\x8d\x79\x98\x89",
.m_size = 32,
.c =
"\xc7\xa3\x98\xeb\x43\xd1\x08\xc2\x3d\x78\x45\x04\x70\xc9\x01\xee"
"\xf8\x85\x37\x7c\x0b\xf9\x19\x70\x5c\x45\x7b\x2f\x3a\x0b\xb7\x8b"
"\xc4\x0d\x7b\x3a\x64\x0b\x0f\xdb\x78\xa9\x0b\xfd\x8d\x82\xa4\x86"
"\x39\xbf\x21\xb8\x84\xc4\xce\x9f\xc2\xe8\xb6\x61\x46\x17\xb9\x4e"
"\x0b\x57\x05\xb4\x4f\xf9\x9c\x93\x2d\x9b\xd5\x48\x1d\x80\x12\xef"
"\x3a\x77\x7f\xbc\xb5\x8e\x2b\x6b\x7c\xfc\x9f\x8c\x9d\xa2\xc4\x85"
"\xb0\x87\xe9\x17\x9b\xb6\x23\x62\xd2\xa9\x9f\x57\xe8\xf7\x04\x45"
"\x24\x3a\x45\xeb\xeb\x6a\x08\x8e\xaf\xc8\xa0\x84\xbc\x5d\x13\x38"
"\xf5\x17\x8c\xa3\x96\x9b\xa9\x38\x8d\xf0\x35\xad\x32\x8a\x72\x5b"
"\xdf\x21\xab\x4b\x0e\xa8\x29\xbb\x61\x54\xbf\x05\xdb\x84\x84\xde"
"\xdd\x16\x36\x31\xda\xf3\x42\x6d\x7a\x90\x22\x9b\x11\x29\xa6\xf8"
"\x30\x61\xda\xd3\x8b\x54\x1e\x42\xd1\x47\x1d\x6f\xd1\xcd\x42\x0b"
"\xd1\xe4\x15\x85\x7e\x08\xd6\x59\x64\x4c\x01\x34\x91\x92\x26\xe8"
"\xb0\x25\x8c\xf8\xf4\xfa\x8b\xc9\x31\x33\x76\x72\xfb\x64\x92\x9f"
"\xda\x62\x8d\xe1\x2a\x71\x91\x43\x40\x61\x3c\x5a\xbe\x86\xfc\x5b"
"\xe6\xf9\xa9\x16\x31\x1f\xaf\x25\x6d\xc2\x4a\x23\x6e\x63\x02\xa2",
.c_size = 256,
.siggen_sigver_test = true,
}
};
static const struct kpp_testvec dh_tv_template[] = {
{
.secret =
......@@ -840,6 +939,50 @@ static const struct kpp_testvec ecdh_tv_template[] = {
.b_public_size = 64,
.expected_a_public_size = 64,
.expected_ss_size = 32
}, {
.secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
"\x08\x00" /* len */
"\x02\x00" /* curve_id */
"\x00\x00", /* key_size */
#else
"\x00\x02" /* type */
"\x00\x08" /* len */
"\x00\x02" /* curve_id */
"\x00\x00", /* key_size */
#endif
.b_secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
"\x28\x00" /* len */
"\x02\x00" /* curve_id */
"\x20\x00" /* key_size */
#else
"\x00\x02" /* type */
"\x00\x28" /* len */
"\x00\x02" /* curve_id */
"\x00\x20" /* key_size */
#endif
"\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
"\xf6\x62\x1b\x6e\x43\x84\x3a\xa3"
"\x8b\xe0\x86\xc3\x20\x19\xda\x92"
"\x50\x53\x03\xe1\xc0\xea\xb8\x82",
.b_public =
"\x1a\x7f\xeb\x52\x00\xbd\x3c\x31"
"\x7d\xb6\x70\xc1\x86\xa6\xc7\xc4"
"\x3b\xc5\x5f\x6c\x6f\x58\x3c\xf5"
"\xb6\x63\x82\x77\x33\x24\xa1\x5f"
"\x6a\xca\x43\x6f\xf7\x7e\xff\x02"
"\x37\x08\xcc\x40\x5e\x7a\xfd\x6a"
"\x6a\x02\x6e\x41\x87\x68\x38\x77"
"\xfa\xa9\x44\x43\x2d\xef\x09\xdf",
.secret_size = 8,
.b_secret_size = 40,
.b_public_size = 64,
.expected_a_public_size = 64,
.expected_ss_size = 32,
.genkey = true,
}
};
......@@ -25,6 +25,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
/* Runtime PM autosuspend timeout: */
#define RNG_AUTOSUSPEND_TIMEOUT 100
#define USEC_POLL 2
#define TIMEOUT_POLL 20
......@@ -90,6 +94,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct mtk_rng *priv = to_mtk_rng(rng);
int retval = 0;
pm_runtime_get_sync((struct device *)priv->rng.priv);
while (max >= sizeof(u32)) {
if (!mtk_rng_wait_ready(rng, wait))
break;
......@@ -100,6 +106,9 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
return retval || !wait ? retval : -EIO;
}
......@@ -120,9 +129,12 @@ static int mtk_rng_probe(struct platform_device *pdev)
return -ENOMEM;
priv->rng.name = pdev->name;
#ifndef CONFIG_PM
priv->rng.init = mtk_rng_init;
priv->rng.cleanup = mtk_rng_cleanup;
#endif
priv->rng.read = mtk_rng_read;
priv->rng.priv = (unsigned long)&pdev->dev;
priv->clk = devm_clk_get(&pdev->dev, "rng");
if (IS_ERR(priv->clk)) {
......@@ -142,11 +154,40 @@ static int mtk_rng_probe(struct platform_device *pdev)
return ret;
}
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "registered RNG driver\n");
return 0;
}
#ifdef CONFIG_PM
static int mtk_rng_runtime_suspend(struct device *dev)
{
struct mtk_rng *priv = dev_get_drvdata(dev);
mtk_rng_cleanup(&priv->rng);
return 0;
}
static int mtk_rng_runtime_resume(struct device *dev)
{
struct mtk_rng *priv = dev_get_drvdata(dev);
return mtk_rng_init(&priv->rng);
}
static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend,
mtk_rng_runtime_resume, NULL);
#define MTK_RNG_PM_OPS (&mtk_rng_pm_ops)
#else /* CONFIG_PM */
#define MTK_RNG_PM_OPS NULL
#endif /* CONFIG_PM */
static const struct of_device_id mtk_rng_match[] = {
{ .compatible = "mediatek,mt7623-rng" },
{},
......@@ -157,6 +198,7 @@ static struct platform_driver mtk_rng_driver = {
.probe = mtk_rng_probe,
.driver = {
.name = MTK_RNG_DEV,
.pm = MTK_RNG_PM_OPS,
.of_match_table = mtk_rng_match,
},
};
......
......@@ -53,7 +53,10 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
cancel_delayed_work_sync(&idle_work);
if (rng_idle) {
clk_prepare_enable(rng_clk);
r = clk_prepare_enable(rng_clk);
if (r)
return r;
r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
if (r != 0) {
clk_disable_unprepare(rng_clk);
......@@ -88,6 +91,8 @@ static struct hwrng omap3_rom_rng_ops = {
static int omap3_rom_rng_probe(struct platform_device *pdev)
{
int ret = 0;
pr_info("initializing\n");
omap3_rom_rng_call = pdev->dev.platform_data;
......@@ -104,7 +109,9 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
}
/* Leave the RNG in reset state. */
clk_prepare_enable(rng_clk);
ret = clk_prepare_enable(rng_clk);
if (ret)
return ret;
omap3_rom_rng_idle(0);
return hwrng_register(&omap3_rom_rng_ops);
......
......@@ -151,8 +151,15 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "missing period\n");
return -EINVAL;
}
if (!of_property_read_u32(pdev->dev.of_node,
"quality", &i))
priv->rng_ops.quality = i;
else
priv->rng_ops.quality = 0;
} else {
period = pdata->period;
priv->rng_ops.quality = pdata->quality;
}
priv->period = ns_to_ktime(period * NSEC_PER_USEC);
......
......@@ -327,6 +327,15 @@ config HW_RANDOM_PPC4XX
This option provides the kernel-side support for the TRNG hardware
found in the security function of some PowerPC 4xx SoCs.
config CRYPTO_DEV_OMAP
tristate "Support for OMAP crypto HW accelerators"
depends on ARCH_OMAP2PLUS
help
OMAP processors have various crypto HW accelerators. Select this if
you want to use the OMAP modules for any of the crypto algorithms.
if CRYPTO_DEV_OMAP
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
......@@ -348,6 +357,7 @@ config CRYPTO_DEV_OMAP_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_CTR
select CRYPTO_AEAD
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
......@@ -364,6 +374,8 @@ config CRYPTO_DEV_OMAP_DES
the ECB and CBC modes of operation are supported by the driver. Also
accesses made on unaligned boundaries are supported.
endif # CRYPTO_DEV_OMAP
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
depends on (ARCH_PICOXCELL || COMPILE_TEST) && HAVE_CLK
......@@ -542,6 +554,7 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
......@@ -656,4 +669,21 @@ config CRYPTO_DEV_BCM_SPU
source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
depends on HAS_DMA && OF
depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT)
select CRYPTO_AES
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
help
This driver interfaces with the SafeXcel EIP-197 cryptographic engine
designed by Inside Secure. Select this if you want to use CBC/ECB
chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
algorithms.
endif # CRYPTO_HW
......@@ -6,6 +6,7 @@ obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
......@@ -20,7 +21,9 @@ obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
......@@ -39,3 +42,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
......@@ -1179,6 +1179,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
dev_set_drvdata(dev, core_dev);
core_dev->ofdev = ofdev;
core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
rc = -ENOMEM;
if (!core_dev->dev)
goto err_alloc_dev;
......
......@@ -36,6 +36,7 @@
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/hmac.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/authenc.h>
......@@ -2510,8 +2511,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
memcpy(ctx->opad, ctx->ipad, blocksize);
for (index = 0; index < blocksize; index++) {
ctx->ipad[index] ^= 0x36;
ctx->opad[index] ^= 0x5c;
ctx->ipad[index] ^= HMAC_IPAD_VALUE;
ctx->opad[index] ^= HMAC_OPAD_VALUE;
}
flow_dump(" ipad: ", ctx->ipad, blocksize);
......@@ -2638,7 +2639,7 @@ static int aead_need_fallback(struct aead_request *req)
(spu->spu_type == SPU_TYPE_SPUM) &&
(ctx->digestsize != 8) && (ctx->digestsize != 12) &&
(ctx->digestsize != 16)) {
flow_log("%s() AES CCM needs fallbck for digest size %d\n",
flow_log("%s() AES CCM needs fallback for digest size %d\n",
__func__, ctx->digestsize);
return 1;
}
......
......@@ -1187,8 +1187,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct aead_edesc *edesc;
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
......@@ -1475,8 +1475,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct ablkcipher_edesc *edesc;
......@@ -1681,8 +1680,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
struct ablkcipher_edesc *edesc;
......
......@@ -555,8 +555,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
......@@ -808,8 +808,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct ablkcipher_edesc *edesc;
......@@ -953,8 +952,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
struct ablkcipher_edesc *edesc;
......
......@@ -719,8 +719,8 @@ static int ahash_update_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
u8 *next_buf = alt_buf(state);
......@@ -849,8 +849,8 @@ static int ahash_final_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index;
......@@ -926,8 +926,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
u32 *desc;
int sec4_sg_src_index;
......@@ -1013,8 +1013,8 @@ static int ahash_digest(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, mapped_nents;
......@@ -1093,8 +1093,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = current_buf(state);
int buflen = *current_buflen(state);
u32 *desc;
......@@ -1154,8 +1154,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
u8 *next_buf = alt_buf(state);
......@@ -1280,8 +1280,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
......@@ -1370,8 +1370,8 @@ static int ahash_update_first(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *next_buf = alt_buf(state);
int *next_buflen = alt_buflen(state);
int to_hash;
......
......@@ -18,6 +18,10 @@
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
sizeof(struct rsa_priv_f1_pdb))
#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
sizeof(struct rsa_priv_f2_pdb))
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
sizeof(struct rsa_priv_f3_pdb))
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
......@@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
}
static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
size_t p_sz = key->p_sz;
size_t q_sz = key->p_sz;
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
}
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
size_t p_sz = key->p_sz;
size_t q_sz = key->p_sz;
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
}
/* RSA Job Completion handler */
static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
{
......@@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
akcipher_request_complete(req, err);
}
static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
if (err)
caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
rsa_priv_f2_unmap(dev, edesc, req);
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
akcipher_request_complete(req, err);
}
static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
if (err)
caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
rsa_priv_f3_unmap(dev, edesc, req);
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
akcipher_request_complete(req, err);
}
static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
size_t desclen)
{
......@@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
struct rsa_edesc *edesc;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
......@@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
return 0;
}
static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
size_t q_sz = key->p_sz;
pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->d_dma)) {
dev_err(dev, "Unable to map RSA private exponent memory\n");
return -ENOMEM;
}
pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->p_dma)) {
dev_err(dev, "Unable to map RSA prime factor p memory\n");
goto unmap_d;
}
pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->q_dma)) {
dev_err(dev, "Unable to map RSA prime factor q memory\n");
goto unmap_p;
}
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q;
}
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
}
if (edesc->src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
pdb->g_dma = sg_dma_address(req->src);
}
if (edesc->dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
} else {
pdb->f_dma = sg_dma_address(req->dst);
}
pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
return 0;
unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
unmap_d:
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
return -ENOMEM;
}
static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
size_t q_sz = key->p_sz;
pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->p_dma)) {
dev_err(dev, "Unable to map RSA prime factor p memory\n");
return -ENOMEM;
}
pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->q_dma)) {
dev_err(dev, "Unable to map RSA prime factor q memory\n");
goto unmap_p;
}
pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->dp_dma)) {
dev_err(dev, "Unable to map RSA exponent dp memory\n");
goto unmap_q;
}
pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->dq_dma)) {
dev_err(dev, "Unable to map RSA exponent dq memory\n");
goto unmap_dp;
}
pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->c_dma)) {
dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
goto unmap_dq;
}
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv;
}
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
}
if (edesc->src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
pdb->g_dma = sg_dma_address(req->src);
}
if (edesc->dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry);
} else {
pdb->f_dma = sg_dma_address(req->dst);
}
pdb->sgf |= key->n_sz;
pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
return 0;
unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq:
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
unmap_dp:
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
return -ENOMEM;
}
static int caam_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
......@@ -301,24 +543,14 @@ static int caam_rsa_enc(struct akcipher_request *req)
return ret;
}
static int caam_rsa_dec(struct akcipher_request *req)
static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
if (unlikely(!key->n || !key->d))
return -EINVAL;
if (req->dst_len < key->n_sz) {
req->dst_len = key->n_sz;
dev_err(jrdev, "Output buffer length less than parameter n\n");
return -EOVERFLOW;
}
/* Allocate extended descriptor */
edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
if (IS_ERR(edesc))
......@@ -344,17 +576,147 @@ static int caam_rsa_dec(struct akcipher_request *req)
return ret;
}
static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
/* Allocate extended descriptor */
edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
ret = set_rsa_priv_f2_pdb(req, edesc);
if (ret)
goto init_fail;
/* Initialize Job Descriptor */
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
if (!ret)
return -EINPROGRESS;
rsa_priv_f2_unmap(jrdev, edesc, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
kfree(edesc);
return ret;
}
static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
/* Allocate extended descriptor */
edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
ret = set_rsa_priv_f3_pdb(req, edesc);
if (ret)
goto init_fail;
/* Initialize Job Descriptor */
init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
if (!ret)
return -EINPROGRESS;
rsa_priv_f3_unmap(jrdev, edesc, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
kfree(edesc);
return ret;
}
static int caam_rsa_dec(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
int ret;
if (unlikely(!key->n || !key->d))
return -EINVAL;
if (req->dst_len < key->n_sz) {
req->dst_len = key->n_sz;
dev_err(ctx->dev, "Output buffer length less than parameter n\n");
return -EOVERFLOW;
}
if (key->priv_form == FORM3)
ret = caam_rsa_dec_priv_f3(req);
else if (key->priv_form == FORM2)
ret = caam_rsa_dec_priv_f2(req);
else
ret = caam_rsa_dec_priv_f1(req);
return ret;
}
static void caam_rsa_free_key(struct caam_rsa_key *key)
{
kzfree(key->d);
kzfree(key->p);
kzfree(key->q);
kzfree(key->dp);
kzfree(key->dq);
kzfree(key->qinv);
kzfree(key->tmp1);
kzfree(key->tmp2);
kfree(key->e);
kfree(key->n);
key->d = NULL;
key->e = NULL;
key->n = NULL;
key->d_sz = 0;
key->e_sz = 0;
key->n_sz = 0;
memset(key, 0, sizeof(*key));
}
static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
{
while (!**ptr && *nbytes) {
(*ptr)++;
(*nbytes)--;
}
}
/**
* caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
* dP, dQ and qInv could decode to less than corresponding p, q length, as the
* BER-encoding requires that the minimum number of bytes be used to encode the
* integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
* length.
*
* @ptr : pointer to {dP, dQ, qInv} CRT member
* @nbytes: length in bytes of {dP, dQ, qInv} CRT member
* @dstlen: length in bytes of corresponding p or q prime factor
*/
static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
{
u8 *dst;
caam_rsa_drop_leading_zeros(&ptr, &nbytes);
if (!nbytes)
return NULL;
dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
if (!dst)
return NULL;
memcpy(dst + (dstlen - nbytes), ptr, nbytes);
return dst;
}
/**
......@@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
{
u8 *val;
while (!*buf && *nbytes) {
buf++;
(*nbytes)--;
}
caam_rsa_drop_leading_zeros(&buf, nbytes);
if (!*nbytes)
return NULL;
val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
if (!val)
......@@ -437,6 +798,64 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return -ENOMEM;
}
static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct rsa_key *raw_key)
{
struct caam_rsa_key *rsa_key = &ctx->key;
size_t p_sz = raw_key->p_sz;
size_t q_sz = raw_key->q_sz;
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
return;
rsa_key->p_sz = p_sz;
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
if (!rsa_key->q)
goto free_p;
rsa_key->q_sz = q_sz;
rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->tmp1)
goto free_q;
rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->tmp2)
goto free_tmp1;
rsa_key->priv_form = FORM2;
rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
if (!rsa_key->dp)
goto free_tmp2;
rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
if (!rsa_key->dq)
goto free_dp;
rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
q_sz);
if (!rsa_key->qinv)
goto free_dq;
rsa_key->priv_form = FORM3;
return;
free_dq:
kzfree(rsa_key->dq);
free_dp:
kzfree(rsa_key->dp);
free_tmp2:
kzfree(rsa_key->tmp2);
free_tmp1:
kzfree(rsa_key->tmp1);
free_q:
kzfree(rsa_key->q);
free_p:
kzfree(rsa_key->p);
}
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
......@@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
caam_rsa_set_priv_key_form(ctx, &raw_key);
return 0;
err:
......@@ -490,12 +911,11 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return -ENOMEM;
}
static int caam_rsa_max_size(struct crypto_akcipher *tfm)
static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
return (key->n) ? key->n_sz : -EINVAL;
return ctx->key.n_sz;
}
/* Per session pkc's driver context creation function */
......
......@@ -12,22 +12,76 @@
#include "compat.h"
#include "pdb.h"
/**
* caam_priv_key_form - CAAM RSA private key representation
* CAAM RSA private key may have either of three forms.
*
* 1. The first representation consists of the pair (n, d), where the
* components have the following meanings:
* n the RSA modulus
* d the RSA private exponent
*
* 2. The second representation consists of the triplet (p, q, d), where the
* components have the following meanings:
* p the first prime factor of the RSA modulus n
* q the second prime factor of the RSA modulus n
* d the RSA private exponent
*
* 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
* where the components have the following meanings:
* p the first prime factor of the RSA modulus n
* q the second prime factor of the RSA modulus n
* dP the first factors's CRT exponent
* dQ the second factors's CRT exponent
* qInv the (first) CRT coefficient
*
* The benefit of using the third or the second key form is lower computational
* cost for the decryption and signature operations.
*/
enum caam_priv_key_form {
FORM1,
FORM2,
FORM3
};
/**
* caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
* @n : RSA modulus raw byte stream
* @e : RSA public exponent raw byte stream
* @d : RSA private exponent raw byte stream
* @p : RSA prime factor p of RSA modulus n
* @q : RSA prime factor q of RSA modulus n
* @dp : RSA CRT exponent of p
* @dp : RSA CRT exponent of q
* @qinv : RSA CRT coefficient
* @tmp1 : CAAM uses this temporary buffer as internal state buffer.
* It is assumed to be as long as p.
* @tmp2 : CAAM uses this temporary buffer as internal state buffer.
* It is assumed to be as long as q.
* @n_sz : length in bytes of RSA modulus n
* @e_sz : length in bytes of RSA public exponent
* @d_sz : length in bytes of RSA private exponent
* @p_sz : length in bytes of RSA prime factor p of RSA modulus n
* @q_sz : length in bytes of RSA prime factor q of RSA modulus n
* @priv_form : CAAM RSA private key representation
*/
struct caam_rsa_key {
u8 *n;
u8 *e;
u8 *d;
u8 *p;
u8 *q;
u8 *dp;
u8 *dq;
u8 *qinv;
u8 *tmp1;
u8 *tmp2;
size_t n_sz;
size_t e_sz;
size_t d_sz;
size_t p_sz;
size_t q_sz;
enum caam_priv_key_form priv_form;
};
/**
......@@ -59,6 +113,8 @@ struct rsa_edesc {
union {
struct rsa_pub_pdb pub;
struct rsa_priv_f1_pdb priv_f1;
struct rsa_priv_f2_pdb priv_f2;
struct rsa_priv_f3_pdb priv_f3;
} pdb;
u32 hw_desc[];
};
......@@ -66,5 +122,7 @@ struct rsa_edesc {
/* Descriptor construction primitives. */
void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
#endif
......@@ -536,7 +536,7 @@ static int caam_jr_probe(struct platform_device *pdev)
return 0;
}
static struct of_device_id caam_jr_match[] = {
static const struct of_device_id caam_jr_match[] = {
{
.compatible = "fsl,sec-v4.0-job-ring",
},
......
......@@ -483,6 +483,8 @@ struct dsa_verify_pdb {
#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
#define RSA_PDB_D_SHIFT 12
#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
#define RSA_PDB_Q_SHIFT 12
#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
......@@ -490,6 +492,8 @@ struct dsa_verify_pdb {
#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
#define RSA_PRIV_KEY_FRM_1 0
#define RSA_PRIV_KEY_FRM_2 1
#define RSA_PRIV_KEY_FRM_3 2
/**
* RSA Encrypt Protocol Data Block
......@@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
dma_addr_t d_dma;
} __packed;
/**
* RSA Decrypt PDB - Private Key Form #2
* @sgf : scatter-gather field
* @g_dma : dma address of encrypted input data
* @f_dma : dma address of output data
* @d_dma : dma address of RSA private exponent
* @p_dma : dma address of RSA prime factor p of RSA modulus n
* @q_dma : dma address of RSA prime factor q of RSA modulus n
* @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
* as internal state buffer. It is assumed to be as long as p.
* @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
* as internal state buffer. It is assumed to be as long as q.
* @p_q_len : length in bytes of first two prime factors of the RSA modulus n
*/
struct rsa_priv_f2_pdb {
u32 sgf;
dma_addr_t g_dma;
dma_addr_t f_dma;
dma_addr_t d_dma;
dma_addr_t p_dma;
dma_addr_t q_dma;
dma_addr_t tmp1_dma;
dma_addr_t tmp2_dma;
u32 p_q_len;
} __packed;
/**
* RSA Decrypt PDB - Private Key Form #3
* This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
* the RSA modulus.
* @sgf : scatter-gather field
* @g_dma : dma address of encrypted input data
* @f_dma : dma address of output data
* @c_dma : dma address of RSA CRT coefficient
* @p_dma : dma address of RSA prime factor p of RSA modulus n
* @q_dma : dma address of RSA prime factor q of RSA modulus n
* @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
* @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
* @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
* as internal state buffer. It is assumed to be as long as p.
* @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
* as internal state buffer. It is assumed to be as long as q.
* @p_q_len : length in bytes of first two prime factors of the RSA modulus n
*/
struct rsa_priv_f3_pdb {
u32 sgf;
dma_addr_t g_dma;
dma_addr_t f_dma;
dma_addr_t c_dma;
dma_addr_t p_dma;
dma_addr_t q_dma;
dma_addr_t dp_dma;
dma_addr_t dq_dma;
dma_addr_t tmp1_dma;
dma_addr_t tmp2_dma;
u32 p_q_len;
} __packed;
#endif
......@@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
RSA_PRIV_KEY_FRM_1);
}
/* Descriptor for RSA Private operation - Private Key Form #2 */
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
{
init_job_desc_pdb(desc, 0, sizeof(*pdb));
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
append_ptr(desc, pdb->d_dma);
append_ptr(desc, pdb->p_dma);
append_ptr(desc, pdb->q_dma);
append_ptr(desc, pdb->tmp1_dma);
append_ptr(desc, pdb->tmp2_dma);
append_cmd(desc, pdb->p_q_len);
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
RSA_PRIV_KEY_FRM_2);
}
/* Descriptor for RSA Private operation - Private Key Form #3 */
void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
{
init_job_desc_pdb(desc, 0, sizeof(*pdb));
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
append_ptr(desc, pdb->c_dma);
append_ptr(desc, pdb->p_dma);
append_ptr(desc, pdb->q_dma);
append_ptr(desc, pdb->dp_dma);
append_ptr(desc, pdb->dq_dma);
append_ptr(desc, pdb->tmp1_dma);
append_ptr(desc, pdb->tmp2_dma);
append_cmd(desc, pdb->p_q_len);
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
RSA_PRIV_KEY_FRM_3);
}
......@@ -98,7 +98,6 @@ static inline void update_output_data(struct cpt_request_info *req_info,
}
static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
u32 cipher_type, u32 aes_key_type,
u32 *argcnt)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
......@@ -124,11 +123,11 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
req_info->req.param1 = req->nbytes; /* Encryption Data length */
req_info->req.param2 = 0; /*Auth data length */
fctx->enc.enc_ctrl.e.enc_cipher = cipher_type;
fctx->enc.enc_ctrl.e.aes_key = aes_key_type;
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
if (cipher_type == AES_XTS)
if (ctx->cipher_type == AES_XTS)
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
else
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
......@@ -154,14 +153,13 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
}
static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc,
u32 cipher_type, u32 aes_key_type,
u32 enc_iv_len)
{
struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
struct cpt_request_info *req_info = &rctx->cpt_req;
u32 argcnt = 0;
create_ctx_hdr(req, enc, cipher_type, aes_key_type, &argcnt);
create_ctx_hdr(req, enc, &argcnt);
update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
update_input_data(req_info, req->src, req->nbytes, &argcnt);
req_info->incnt = argcnt;
......@@ -177,7 +175,6 @@ static inline void store_cb_info(struct ablkcipher_request *req,
}
static inline void create_output_list(struct ablkcipher_request *req,
u32 cipher_type,
u32 enc_iv_len)
{
struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
......@@ -197,12 +194,9 @@ static inline void create_output_list(struct ablkcipher_request *req,
req_info->outcnt = argcnt;
}
static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc,
u32 cipher_type)
static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 key_type = AES_128_BIT;
struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
struct fc_context *fctx = &rctx->fctx;
......@@ -210,36 +204,10 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc,
void *cdev = NULL;
int status;
switch (ctx->key_len) {
case 16:
key_type = AES_128_BIT;
break;
case 24:
key_type = AES_192_BIT;
break;
case 32:
if (cipher_type == AES_XTS)
key_type = AES_128_BIT;
else
key_type = AES_256_BIT;
break;
case 64:
if (cipher_type == AES_XTS)
key_type = AES_256_BIT;
else
return -EINVAL;
break;
default:
return -EINVAL;
}
if (cipher_type == DES3_CBC)
key_type = 0;
memset(req_info, 0, sizeof(struct cpt_request_info));
memset(fctx, 0, sizeof(struct fc_context));
create_input_list(req, enc, cipher_type, key_type, enc_iv_len);
create_output_list(req, cipher_type, enc_iv_len);
create_input_list(req, enc, enc_iv_len);
create_output_list(req, enc_iv_len);
store_cb_info(req, req_info);
cdev = dev_handle.cdev[smp_processor_id()];
status = cptvf_do_request(cdev, req_info);
......@@ -254,34 +222,14 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc,
return -EINPROGRESS;
}
int cvm_des3_encrypt_cbc(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, true, DES3_CBC);
}
int cvm_des3_decrypt_cbc(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, false, DES3_CBC);
}
int cvm_aes_encrypt_xts(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, true, AES_XTS);
}
int cvm_aes_decrypt_xts(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, false, AES_XTS);
}
int cvm_aes_encrypt_cbc(struct ablkcipher_request *req)
int cvm_encrypt(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, true, AES_CBC);
return cvm_enc_dec(req, true);
}
int cvm_aes_decrypt_cbc(struct ablkcipher_request *req)
int cvm_decrypt(struct ablkcipher_request *req)
{
return cvm_enc_dec(req, false, AES_CBC);
return cvm_enc_dec(req, false);
}
int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
......@@ -299,24 +247,93 @@ int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ctx->key_len = keylen;
memcpy(ctx->enc_key, key1, keylen / 2);
memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
ctx->cipher_type = AES_XTS;
switch (ctx->key_len) {
case 32:
ctx->key_type = AES_128_BIT;
break;
case 64:
ctx->key_type = AES_256_BIT;
break;
default:
return -EINVAL;
}
return 0;
}
int cvm_enc_dec_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
{
if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
ctx->key_len = keylen;
switch (ctx->key_len) {
case 16:
ctx->key_type = AES_128_BIT;
break;
case 24:
ctx->key_type = AES_192_BIT;
break;
case 32:
ctx->key_type = AES_256_BIT;
break;
default:
return -EINVAL;
}
if (ctx->cipher_type == DES3_CBC)
ctx->key_type = 0;
return 0;
}
return -EINVAL;
}
static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen, u8 cipher_type)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
ctx->key_len = keylen;
ctx->cipher_type = cipher_type;
if (!cvm_validate_keylen(ctx, keylen)) {
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
crypto_ablkcipher_set_flags(cipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
}
return -EINVAL;
static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CBC);
}
static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_ECB);
}
static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, AES_CFB);
}
static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, DES3_CBC);
}
static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
return cvm_setkey(cipher, key, keylen, DES3_ECB);
}
int cvm_enc_dec_init(struct crypto_tfm *tfm)
......@@ -349,8 +366,8 @@ struct crypto_alg algs[] = { {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.setkey = cvm_xts_setkey,
.encrypt = cvm_aes_encrypt_xts,
.decrypt = cvm_aes_decrypt_xts,
.encrypt = cvm_encrypt,
.decrypt = cvm_decrypt,
},
},
.cra_init = cvm_enc_dec_init,
......@@ -369,9 +386,51 @@ struct crypto_alg algs[] = { {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = cvm_enc_dec_setkey,
.encrypt = cvm_aes_encrypt_cbc,
.decrypt = cvm_aes_decrypt_cbc,
.setkey = cvm_cbc_aes_setkey,
.encrypt = cvm_encrypt,
.decrypt = cvm_decrypt,
},
},
.cra_init = cvm_enc_dec_init,
.cra_module = THIS_MODULE,
}, {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.cra_alignmask = 7,
.cra_priority = 4001,
.cra_name = "ecb(aes)",
.cra_driver_name = "cavium-ecb-aes",
.cra_type = &crypto_ablkcipher_type,
.cra_u = {
.ablkcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = cvm_ecb_aes_setkey,
.encrypt = cvm_encrypt,
.decrypt = cvm_decrypt,
},
},
.cra_init = cvm_enc_dec_init,
.cra_module = THIS_MODULE,
}, {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.cra_alignmask = 7,
.cra_priority = 4001,
.cra_name = "cfb(aes)",
.cra_driver_name = "cavium-cfb-aes",
.cra_type = &crypto_ablkcipher_type,
.cra_u = {
.ablkcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = cvm_cfb_aes_setkey,
.encrypt = cvm_encrypt,
.decrypt = cvm_decrypt,
},
},
.cra_init = cvm_enc_dec_init,
......@@ -390,9 +449,30 @@ struct crypto_alg algs[] = { {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = cvm_enc_dec_setkey,
.encrypt = cvm_des3_encrypt_cbc,
.decrypt = cvm_des3_decrypt_cbc,
.setkey = cvm_cbc_des3_setkey,
.encrypt = cvm_encrypt,
.decrypt = cvm_decrypt,
},
},
.cra_init = cvm_enc_dec_init,
.cra_module = THIS_MODULE,
}, {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_des3_ctx),
.cra_alignmask = 7,
.cra_priority = 4001,
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "cavium-ecb-des3_ede",
.cra_type = &crypto_ablkcipher_type,
.cra_u = {
.ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = cvm_ecb_des3_setkey,
.encrypt = cvm_encrypt,
.decrypt = cvm_decrypt,
},
},
.cra_init = cvm_enc_dec_init,
......
......@@ -77,6 +77,11 @@ union encr_ctrl {
} e;
};
struct cvm_cipher {
const char *name;
u8 value;
};
struct enc_context {
union encr_ctrl enc_ctrl;
u8 encr_key[32];
......@@ -96,6 +101,8 @@ struct fc_context {
struct cvm_enc_ctx {
u32 key_len;
u8 enc_key[MAX_KEY_SIZE];
u8 cipher_type:4;
u8 key_type:2;
};
struct cvm_des3_ctx {
......
......@@ -525,7 +525,7 @@ static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
intr = cptvf_read_vf_misc_intr_status(cptvf);
/*Check for MISC interrupt types*/
if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
dev_err(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
intr, cptvf->vfid);
cptvf_handle_mbox_intr(cptvf);
cptvf_clear_mbox_intr(cptvf);
......
#
# Cavium NITROX Crypto Device configuration
#
config CRYPTO_DEV_NITROX
tristate
select CRYPTO_BLKCIPHER
select CRYPTO_AES
select CRYPTO_DES
select FW_LOADER
config CRYPTO_DEV_NITROX_CNN55XX
tristate "Support for Cavium CNN55XX driver"
depends on PCI_MSI && 64BIT
select CRYPTO_DEV_NITROX
default m
help
Support for Cavium NITROX family CNN55XX driver
for accelerating crypto workloads.
To compile this as a module, choose M here: the module
will be called n5pf.
obj-$(CONFIG_CRYPTO_DEV_NITROX_CNN55XX) += n5pf.o
n5pf-objs := nitrox_main.o \
nitrox_isr.o \
nitrox_lib.o \
nitrox_hal.o \
nitrox_reqmgr.o \
nitrox_algs.o
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/xts.h>
#include "nitrox_dev.h"
#include "nitrox_common.h"
#include "nitrox_req.h"
#define PRIO 4001
struct nitrox_cipher {
const char *name;
enum flexi_cipher value;
};
/**
* supported cipher list
*/
static const struct nitrox_cipher flexi_cipher_table[] = {
{ "null", CIPHER_NULL },
{ "cbc(des3_ede)", CIPHER_3DES_CBC },
{ "ecb(des3_ede)", CIPHER_3DES_ECB },
{ "cbc(aes)", CIPHER_AES_CBC },
{ "ecb(aes)", CIPHER_AES_ECB },
{ "cfb(aes)", CIPHER_AES_CFB },
{ "rfc3686(ctr(aes))", CIPHER_AES_CTR },
{ "xts(aes)", CIPHER_AES_XTS },
{ "cts(cbc(aes))", CIPHER_AES_CBC_CTS },
{ NULL, CIPHER_INVALID }
};
static enum flexi_cipher flexi_cipher_type(const char *name)
{
const struct nitrox_cipher *cipher = flexi_cipher_table;
while (cipher->name) {
if (!strcmp(cipher->name, name))
break;
cipher++;
}
return cipher->value;
}
static int flexi_aes_keylen(int keylen)
{
int aes_keylen;
switch (keylen) {
case AES_KEYSIZE_128:
aes_keylen = 1;
break;
case AES_KEYSIZE_192:
aes_keylen = 2;
break;
case AES_KEYSIZE_256:
aes_keylen = 3;
break;
default:
aes_keylen = -EINVAL;
break;
}
return aes_keylen;
}
static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
void *fctx;
/* get the first device */
nctx->ndev = nitrox_get_first_device();
if (!nctx->ndev)
return -ENODEV;
/* allocate nitrox crypto context */
fctx = crypto_alloc_context(nctx->ndev);
if (!fctx) {
nitrox_put_device(nctx->ndev);
return -ENOMEM;
}
nctx->u.ctx_handle = (uintptr_t)fctx;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
sizeof(struct nitrox_kcrypt_request));
return 0;
}
static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
/* free the nitrox crypto context */
if (nctx->u.ctx_handle) {
struct flexi_crypto_context *fctx = nctx->u.fctx;
memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
memset(&fctx->auth, 0, sizeof(struct auth_keys));
crypto_free_context((void *)fctx);
}
nitrox_put_device(nctx->ndev);
nctx->u.ctx_handle = 0;
nctx->ndev = NULL;
}
static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher,
int aes_keylen, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
struct flexi_crypto_context *fctx;
enum flexi_cipher cipher_type;
const char *name;
name = crypto_tfm_alg_name(tfm);
cipher_type = flexi_cipher_type(name);
if (unlikely(cipher_type == CIPHER_INVALID)) {
pr_err("unsupported cipher: %s\n", name);
return -EINVAL;
}
/* fill crypto context */
fctx = nctx->u.fctx;
fctx->flags = 0;
fctx->w0.cipher_type = cipher_type;
fctx->w0.aes_keylen = aes_keylen;
fctx->w0.iv_source = IV_FROM_DPTR;
fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0);
/* copy the key to context */
memcpy(fctx->crypto.u.key, key, keylen);
return 0;
}
static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
int aes_keylen;
aes_keylen = flexi_aes_keylen(keylen);
if (aes_keylen < 0) {
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
static void nitrox_skcipher_callback(struct skcipher_request *skreq,
int err)
{
if (err) {
pr_err_ratelimited("request failed status 0x%0x\n", err);
err = -EINVAL;
}
skcipher_request_complete(skreq, err);
}
static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
int ivsize = crypto_skcipher_ivsize(cipher);
struct se_crypto_request *creq;
creq = &nkreq->creq;
creq->flags = skreq->base.flags;
creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
/* fill the request */
creq->ctrl.value = 0;
creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT);
/* param0: length of the data to be encrypted */
creq->gph.param0 = cpu_to_be16(skreq->cryptlen);
creq->gph.param1 = 0;
/* param2: encryption data offset */
creq->gph.param2 = cpu_to_be16(ivsize);
creq->gph.param3 = 0;
creq->ctx_handle = nctx->u.ctx_handle;
creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
/* copy the iv */
memcpy(creq->iv, skreq->iv, ivsize);
creq->ivsize = ivsize;
creq->src = skreq->src;
creq->dst = skreq->dst;
nkreq->nctx = nctx;
nkreq->skreq = skreq;
/* send the crypto request */
return nitrox_process_se_request(nctx->ndev, creq,
nitrox_skcipher_callback, skreq);
}
static int nitrox_aes_encrypt(struct skcipher_request *skreq)
{
return nitrox_skcipher_crypt(skreq, true);
}
static int nitrox_aes_decrypt(struct skcipher_request *skreq)
{
return nitrox_skcipher_crypt(skreq, false);
}
static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
if (keylen != DES3_EDE_KEY_SIZE) {
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return nitrox_skcipher_setkey(cipher, 0, key, keylen);
}
static int nitrox_3des_encrypt(struct skcipher_request *skreq)
{
return nitrox_skcipher_crypt(skreq, true);
}
static int nitrox_3des_decrypt(struct skcipher_request *skreq)
{
return nitrox_skcipher_crypt(skreq, false);
}
static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
struct flexi_crypto_context *fctx;
int aes_keylen, ret;
ret = xts_check_key(tfm, key, keylen);
if (ret)
return ret;
keylen /= 2;
aes_keylen = flexi_aes_keylen(keylen);
if (aes_keylen < 0) {
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
fctx = nctx->u.fctx;
/* copy KEY2 */
memcpy(fctx->auth.u.key2, (key + keylen), keylen);
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
struct flexi_crypto_context *fctx;
int aes_keylen;
if (keylen < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
fctx = nctx->u.fctx;
memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE),
CTR_RFC3686_NONCE_SIZE);
keylen -= CTR_RFC3686_NONCE_SIZE;
aes_keylen = flexi_aes_keylen(keylen);
if (aes_keylen < 0) {
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
static struct skcipher_alg nitrox_skciphers[] = { {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "n5_cbc(aes)",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_setkey,
.encrypt = nitrox_aes_encrypt,
.decrypt = nitrox_aes_decrypt,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "n5_ecb(aes)",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_setkey,
.encrypt = nitrox_aes_encrypt,
.decrypt = nitrox_aes_decrypt,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
.cra_name = "cfb(aes)",
.cra_driver_name = "n5_cfb(aes)",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_setkey,
.encrypt = nitrox_aes_encrypt,
.decrypt = nitrox_aes_decrypt,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "n5_xts(aes)",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_xts_setkey,
.encrypt = nitrox_aes_encrypt,
.decrypt = nitrox_aes_decrypt,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "n5_rfc3686(ctr(aes))",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
.setkey = nitrox_aes_ctr_rfc3686_setkey,
.encrypt = nitrox_aes_encrypt,
.decrypt = nitrox_aes_decrypt,
}, {
.base = {
.cra_name = "cts(cbc(aes))",
.cra_driver_name = "n5_cts(cbc(aes))",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = nitrox_aes_setkey,
.encrypt = nitrox_aes_encrypt,
.decrypt = nitrox_aes_decrypt,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "n5_cbc(des3_ede)",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = nitrox_3des_setkey,
.encrypt = nitrox_3des_encrypt,
.decrypt = nitrox_3des_decrypt,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
}, {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "n5_ecb(des3_ede)",
.cra_priority = PRIO,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = nitrox_3des_setkey,
.encrypt = nitrox_3des_encrypt,
.decrypt = nitrox_3des_decrypt,
.init = nitrox_skcipher_init,
.exit = nitrox_skcipher_exit,
}
};
int nitrox_crypto_register(void)
{
return crypto_register_skciphers(nitrox_skciphers,
ARRAY_SIZE(nitrox_skciphers));
}
void nitrox_crypto_unregister(void)
{
crypto_unregister_skciphers(nitrox_skciphers,
ARRAY_SIZE(nitrox_skciphers));
}
#ifndef __NITROX_COMMON_H
#define __NITROX_COMMON_H
#include "nitrox_dev.h"
#include "nitrox_req.h"
int nitrox_crypto_register(void);
void nitrox_crypto_unregister(void);
void *crypto_alloc_context(struct nitrox_device *ndev);
void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
void nitrox_put_device(struct nitrox_device *ndev);
void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
int nitrox_pf_init_isr(struct nitrox_device *ndev);
int nitrox_common_sw_init(struct nitrox_device *ndev);
void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
void pkt_slc_resp_handler(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
struct skcipher_request *skreq);
void backlog_qflush_work(struct work_struct *work);
void nitrox_config_emu_unit(struct nitrox_device *ndev);
void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
void nitrox_config_nps_unit(struct nitrox_device *ndev);
void nitrox_config_pom_unit(struct nitrox_device *ndev);
void nitrox_config_rand_unit(struct nitrox_device *ndev);
void nitrox_config_efl_unit(struct nitrox_device *ndev);
void nitrox_config_bmi_unit(struct nitrox_device *ndev);
void nitrox_config_bmo_unit(struct nitrox_device *ndev);
void nitrox_config_lbc_unit(struct nitrox_device *ndev);
void invalidate_lbc(struct nitrox_device *ndev);
void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
#endif /* __NITROX_COMMON_H */
此差异已折叠。
#ifndef __NITROX_DEV_H
#define __NITROX_DEV_H
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#define VERSION_LEN 32
struct nitrox_cmdq {
/* command queue lock */
spinlock_t cmdq_lock;
/* response list lock */
spinlock_t response_lock;
/* backlog list lock */
spinlock_t backlog_lock;
/* request submitted to chip, in progress */
struct list_head response_head;
/* hw queue full, hold in backlog list */
struct list_head backlog_head;
/* doorbell address */
u8 __iomem *dbell_csr_addr;
/* base address of the queue */
u8 *head;
struct nitrox_device *ndev;
/* flush pending backlog commands */
struct work_struct backlog_qflush;
/* requests posted waiting for completion */
atomic_t pending_count;
/* requests in backlog queues */
atomic_t backlog_count;
/* command size 32B/64B */
u8 instr_size;
u8 qno;
u32 qsize;
/* unaligned addresses */
u8 *head_unaligned;
dma_addr_t dma_unaligned;
/* dma address of the base */
dma_addr_t dma;
};
struct nitrox_hw {
/* firmware version */
char fw_name[VERSION_LEN];
u16 vendor_id;
u16 device_id;
u8 revision_id;
/* CNN55XX cores */
u8 se_cores;
u8 ae_cores;
u8 zip_cores;
};
#define MAX_MSIX_VECTOR_NAME 20
/**
* vectors for queues (64 AE, 64 SE and 64 ZIP) and
* error condition/mailbox.
*/
#define MAX_MSIX_VECTORS 192
struct nitrox_msix {
struct msix_entry *entries;
char **names;
DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
u32 nr_entries;
};
struct bh_data {
/* slc port completion count address */
u8 __iomem *completion_cnt_csr_addr;
struct nitrox_cmdq *cmdq;
struct tasklet_struct resp_handler;
};
struct nitrox_bh {
struct bh_data *slc;
};
/* NITROX-5 driver state */
#define NITROX_UCODE_LOADED 0
#define NITROX_READY 1
/* command queue size */
#define DEFAULT_CMD_QLEN 2048
/* command timeout in milliseconds */
#define CMD_TIMEOUT 2000
#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
#define PF_MODE 0
#define NITROX_CSR_ADDR(ndev, offset) \
((ndev)->bar_addr + (offset))
/**
* struct nitrox_device - NITROX Device Information.
* @list: pointer to linked list of devices
* @bar_addr: iomap address
* @pdev: PCI device information
* @status: NITROX status
* @timeout: Request timeout in jiffies
* @refcnt: Device usage count
* @idx: device index (0..N)
* @node: NUMA node id attached
* @qlen: Command queue length
* @nr_queues: Number of command queues
* @ctx_pool: DMA pool for crypto context
* @pkt_cmdqs: SE Command queues
* @msix: MSI-X information
* @bh: post processing work
* @hw: hardware information
* @debugfs_dir: debugfs directory
*/
struct nitrox_device {
struct list_head list;
u8 __iomem *bar_addr;
struct pci_dev *pdev;
unsigned long status;
unsigned long timeout;
refcount_t refcnt;
u8 idx;
int node;
u16 qlen;
u16 nr_queues;
struct dma_pool *ctx_pool;
struct nitrox_cmdq *pkt_cmdqs;
struct nitrox_msix msix;
struct nitrox_bh bh;
struct nitrox_hw hw;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *debugfs_dir;
#endif
};
/**
* nitrox_read_csr - Read from device register
* @ndev: NITROX device
* @offset: offset of the register to read
*
* Returns: value read
*/
static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset)
{
return readq(ndev->bar_addr + offset);
}
/**
* nitrox_write_csr - Write to device register
* @ndev: NITROX device
* @offset: offset of the register to write
* @value: value to write
*/
static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
u64 value)
{
writeq(value, (ndev->bar_addr + offset));
}
static inline int nitrox_ready(struct nitrox_device *ndev)
{
return test_bit(NITROX_READY, &ndev->status);
}
#endif /* __NITROX_DEV_H */
此差异已折叠。
此差异已折叠。
#include <linux/cpumask.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci_regs.h>
#include <linux/vmalloc.h>
#include <linux/pci.h>
#include "nitrox_dev.h"
#include "nitrox_common.h"
#include "nitrox_req.h"
#include "nitrox_csr.h"
#define CRYPTO_CTX_SIZE 256
/* command queue alignments */
#define PKT_IN_ALIGN 16
static int cmdq_common_init(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
u32 qsize;
qsize = (ndev->qlen) * cmdq->instr_size;
cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
(qsize + PKT_IN_ALIGN),
&cmdq->dma_unaligned,
GFP_KERNEL);
if (!cmdq->head_unaligned)
return -ENOMEM;
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
cmdq->qsize = (qsize + PKT_IN_ALIGN);
spin_lock_init(&cmdq->response_lock);
spin_lock_init(&cmdq->cmdq_lock);
spin_lock_init(&cmdq->backlog_lock);
INIT_LIST_HEAD(&cmdq->response_head);
INIT_LIST_HEAD(&cmdq->backlog_head);
INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
atomic_set(&cmdq->pending_count, 0);
atomic_set(&cmdq->backlog_count, 0);
return 0;
}
static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
cancel_work_sync(&cmdq->backlog_qflush);
dma_free_coherent(DEV(ndev), cmdq->qsize,
cmdq->head_unaligned, cmdq->dma_unaligned);
atomic_set(&cmdq->pending_count, 0);
atomic_set(&cmdq->backlog_count, 0);
cmdq->dbell_csr_addr = NULL;
cmdq->head = NULL;
cmdq->dma = 0;
cmdq->qsize = 0;
cmdq->instr_size = 0;
}
static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
{
int i;
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
cmdq_common_cleanup(cmdq);
}
kfree(ndev->pkt_cmdqs);
ndev->pkt_cmdqs = NULL;
}
static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
{
int i, err, size;
size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
if (!ndev->pkt_cmdqs)
return -ENOMEM;
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq;
u64 offset;
cmdq = &ndev->pkt_cmdqs[i];
cmdq->ndev = ndev;
cmdq->qno = i;
cmdq->instr_size = sizeof(struct nps_pkt_instr);
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
/* SE ring doorbell address for this queue */
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
err = cmdq_common_init(cmdq);
if (err)
goto pkt_cmdq_fail;
}
return 0;
pkt_cmdq_fail:
nitrox_cleanup_pkt_cmdqs(ndev);
return err;
}
static int create_crypto_dma_pool(struct nitrox_device *ndev)
{
size_t size;
/* Crypto context pool, 16 byte aligned */
size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
ndev->ctx_pool = dma_pool_create("crypto-context",
DEV(ndev), size, 16, 0);
if (!ndev->ctx_pool)
return -ENOMEM;
return 0;
}
static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
{
if (!ndev->ctx_pool)
return;
dma_pool_destroy(ndev->ctx_pool);
ndev->ctx_pool = NULL;
}
/*
* crypto_alloc_context - Allocate crypto context from pool
* @ndev: NITROX Device
*/
void *crypto_alloc_context(struct nitrox_device *ndev)
{
struct ctx_hdr *ctx;
void *vaddr;
dma_addr_t dma;
vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
if (!vaddr)
return NULL;
/* fill meta data */
ctx = vaddr;
ctx->pool = ndev->ctx_pool;
ctx->dma = dma;
ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
return ((u8 *)vaddr + sizeof(struct ctx_hdr));
}
/**
* crypto_free_context - Free crypto context to pool
* @ctx: context to free
*/
void crypto_free_context(void *ctx)
{
struct ctx_hdr *ctxp;
if (!ctx)
return;
ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
}
/**
* nitrox_common_sw_init - allocate software resources.
* @ndev: NITROX device
*
* Allocates crypto context pools and command queues etc.
*
* Return: 0 on success, or a negative error code on error.
*/
int nitrox_common_sw_init(struct nitrox_device *ndev)
{
int err = 0;
/* per device crypto context pool */
err = create_crypto_dma_pool(ndev);
if (err)
return err;
err = nitrox_init_pkt_cmdqs(ndev);
if (err)
destroy_crypto_dma_pool(ndev);
return err;
}
/**
* nitrox_common_sw_cleanup - free software resources.
* @ndev: NITROX device
*/
void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
{
nitrox_cleanup_pkt_cmdqs(ndev);
destroy_crypto_dma_pool(ndev);
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -4,7 +4,8 @@ ccp-objs := ccp-dev.o \
ccp-dev-v3.o \
ccp-dev-v5.o \
ccp-platform.o \
ccp-dmaengine.o
ccp-dmaengine.o \
ccp-debugfs.o
ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
......
......@@ -18,6 +18,7 @@
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
......@@ -308,8 +309,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
}
for (i = 0; i < block_size; i++) {
ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36;
ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ HMAC_IPAD_VALUE;
ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ HMAC_OPAD_VALUE;
}
sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
......
此差异已折叠。
......@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
......@@ -231,6 +232,8 @@ static int ccp5_do_cmd(struct ccp5_desc *desc,
int i;
int ret = 0;
cmd_q->total_ops++;
if (CCP5_CMD_SOC(desc)) {
CCP5_CMD_IOC(desc) = 1;
CCP5_CMD_SOC(desc) = 0;
......@@ -282,6 +285,8 @@ static int ccp5_perform_aes(struct ccp_op *op)
union ccp_function function;
u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
op->cmd_q->total_aes_ops++;
/* Zero out all the fields of the command desc */
memset(&desc, 0, Q_DESC_SIZE);
......@@ -325,6 +330,8 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
union ccp_function function;
u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
op->cmd_q->total_xts_aes_ops++;
/* Zero out all the fields of the command desc */
memset(&desc, 0, Q_DESC_SIZE);
......@@ -364,6 +371,8 @@ static int ccp5_perform_sha(struct ccp_op *op)
struct ccp5_desc desc;
union ccp_function function;
op->cmd_q->total_sha_ops++;
/* Zero out all the fields of the command desc */
memset(&desc, 0, Q_DESC_SIZE);
......@@ -404,6 +413,8 @@ static int ccp5_perform_des3(struct ccp_op *op)
union ccp_function function;
u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
op->cmd_q->total_3des_ops++;
/* Zero out all the fields of the command desc */
memset(&desc, 0, sizeof(struct ccp5_desc));
......@@ -444,6 +455,8 @@ static int ccp5_perform_rsa(struct ccp_op *op)
struct ccp5_desc desc;
union ccp_function function;
op->cmd_q->total_rsa_ops++;
/* Zero out all the fields of the command desc */
memset(&desc, 0, Q_DESC_SIZE);
......@@ -487,6 +500,8 @@ static int ccp5_perform_passthru(struct ccp_op *op)
struct ccp_dma_info *daddr = &op->dst.u.dma;
op->cmd_q->total_pt_ops++;
memset(&desc, 0, Q_DESC_SIZE);
CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
......@@ -543,6 +558,8 @@ static int ccp5_perform_ecc(struct ccp_op *op)
struct ccp5_desc desc;
union ccp_function function;
op->cmd_q->total_ecc_ops++;
/* Zero out all the fields of the command desc */
memset(&desc, 0, Q_DESC_SIZE);
......@@ -592,7 +609,6 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
return queues ? 0 : -EINVAL;
}
static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
int lsb_cnt, int n_lsbs,
unsigned long *lsb_pub)
......@@ -757,6 +773,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
struct ccp_device *ccp = dev_get_drvdata(dev);
ccp5_disable_queue_interrupts(ccp);
ccp->total_interrupts++;
if (ccp->use_tasklet)
tasklet_schedule(&ccp->irq_tasklet);
else
......@@ -956,6 +973,9 @@ static int ccp5_init(struct ccp_device *ccp)
if (ret)
goto e_hwrng;
/* Set up debugfs entries */
ccp5_debugfs_setup(ccp);
return 0;
e_hwrng:
......@@ -992,6 +1012,12 @@ static void ccp5_destroy(struct ccp_device *ccp)
/* Remove this device from the list of available units first */
ccp_del_device(ccp);
/* We're in the process of tearing down the entire driver;
* when all the devices are gone clean up debugfs
*/
if (ccp_present())
ccp5_debugfs_destroy();
/* Disable and clear interrupts */
ccp5_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
......
......@@ -31,8 +31,9 @@
#include "ccp-dev.h"
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
MODULE_VERSION("1.1.0");
MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
struct ccp_tasklet_data {
......
......@@ -70,6 +70,7 @@
#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
#define CMD5_PSP_CCP_VERSION 0x100
#define CMD5_Q_CONTROL_BASE 0x0000
#define CMD5_Q_TAIL_LO_BASE 0x0004
......@@ -322,6 +323,16 @@ struct ccp_cmd_queue {
/* Interrupt wait queue */
wait_queue_head_t int_queue;
unsigned int int_rcvd;
/* Per-queue Statistics */
unsigned long total_ops;
unsigned long total_aes_ops;
unsigned long total_xts_aes_ops;
unsigned long total_3des_ops;
unsigned long total_sha_ops;
unsigned long total_rsa_ops;
unsigned long total_pt_ops;
unsigned long total_ecc_ops;
} ____cacheline_aligned;
struct ccp_device {
......@@ -419,6 +430,12 @@ struct ccp_device {
/* DMA caching attribute support */
unsigned int axcache;
/* Device Statistics */
unsigned long total_interrupts;
/* DebugFS info */
struct dentry *debugfs_instance;
};
enum ccp_memtype {
......@@ -632,6 +649,9 @@ void ccp_unregister_rng(struct ccp_device *ccp);
int ccp_dmaengine_register(struct ccp_device *ccp);
void ccp_dmaengine_unregister(struct ccp_device *ccp);
void ccp5_debugfs_setup(struct ccp_device *ccp);
void ccp5_debugfs_destroy(void);
/* Structure for computation functions that are device-specific */
struct ccp_actions {
int (*aes)(struct ccp_op *);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册