未验证 提交 e87dfde7 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!441 [openEuler-1.0-LTS] Add support for Zhaoxin SM3 and SM4 instruction

Merge Pull Request from: @leoliu-oc 
 
Include 2 patches:
1. Driver for Zhaoxin GMI SM3 Secure Hash algorithm
This SM3 algorithm driver is developed to support the SM3 instruction, making user develop their applications with both high performance and high security.

2. Driver for Zhaoxin GMI SM4 Block Cipher Algorithm
This SM4 algorithm driver is developed to support the SM4 instruction, making user develop their applications with both high performance and high security.

Issue
https://gitee.com/openeuler/kernel/issues/I6J50I

Test
N/A

Knowe Issue
N/A

Introduced config change
CONFIG_CRYPTO_DEV_ZHAOXIN_SM3=m
CONFIG_CRYPTO_DEV_ZHAOXIN_SM4=m 
 
Link:https://gitee.com/openeuler/kernel/pulls/441 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zhang Changzhong <zhangchangzhong@huawei.com> 
......@@ -10,6 +10,32 @@ menuconfig CRYPTO_HW
if CRYPTO_HW
config CRYPTO_DEV_ZHAOXIN_SM3
tristate "Zhaoxin GMI driver for SM3 algorithm"
depends on CRYPTO && X86
select CRYPTO_HASH
help
Use Zhaoxin GMI for SM3 algorithm.
Available in ZX-C+ and newer CPUs.
If unsure say M. The compiled module will be
called zhaoxin_gmi_sm3.
config CRYPTO_DEV_ZHAOXIN_SM4
tristate "Zhaoxin GMI driver for SM4 algorithm"
depends on CRYPTO && X86
select CRYPTO_SIMD
select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
help
Use Zhaoxin GMI for SM4 algorithm.
Available in ZX-C+ and newer CPUs.
If unsure say M. The compiled module will be
called zhaoxin_gmi_sm4.
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on X86 && !UML
......
......@@ -48,4 +48,6 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SM3) += zhaoxin-gmi-sm3.o
obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SM4) += zhaoxin-gmi-sm4.o
obj-y += hisilicon/
// SPDX-License-Identifier: GPL-2.0-only
/*
* zx-gmi-sm4.c - wrapper code for Zhaoxin GMI.
*
* Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
static u8 use_ccs;
const u8 zx_sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
};
EXPORT_SYMBOL_GPL(zx_sm3_zero_message_hash);
u32 zx_gmi_capability(void)
{
u32 eax = 0;
u32 ebx, ecx, edx = 0;
// 1. check vendor ID string
__asm__ __volatile__ ("cpuid" : "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(eax) : );
if (((ebx == 0x746e6543) && (ecx == 0x736c7561) && (edx == 0x48727561)) ||
((ebx == 0x68532020) && (ecx == 0x20206961) && (edx == 0x68676e61))) {
// 2. check whether support SM3/SM4/SM2 Instructions
eax = 0xC0000001;
__asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : );
} else {
pr_warn("This is not a ZX CPU! Return!\n");
return 0;
}
return edx;
}
static u32 get_cpu_fms(u32 *eax, u32 *leaf)
{
u32 eax_tmp = *eax, leaf_tmp = *leaf;
__asm__ __volatile__ (
"cpuid"
: "=a"(eax_tmp)
: "0"(leaf_tmp)
: "ebx", "ecx");
*eax = eax_tmp;
return eax_tmp;
}
/*
* Load supported features of the CPU to see if the SM3/SM4 is available.
*/
static int gmi_available(void)
{
u32 eax = 0;
u32 edx = 0;
u8 family, model;
/* Diff ZXC with ZXD */
u32 leaf = 0x1;
get_cpu_fms(&eax, &leaf);
family = (eax & 0xf00) >> 8; /* bit 11-08 */
model = (eax & 0xf0) >> 4; /* bit 7-4 */
edx = zx_gmi_capability();
if (((family == 7) && (model == 0xb))
|| ((family == 6) && (model == 0xf))
|| ((family == 6) && (model == 9)))
use_ccs = ((edx & (0x3 << 4)) == (0x3 << 4));
else
use_ccs = 0;
return use_ccs;
}
void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt)
{
u64 in, out, cnt;
if (!inp) {
pr_warn("GMI-SM3: input is null\n");
return;
}
if (!(sst)) {
pr_warn("GMI-SM3: sst is null\n");
return;
}
if (!blockcnt) {
pr_warn("GMI-SM3: cnt is 0\n");
return;
}
in = (u64)inp;
out = (u64)(sst->state);
cnt = (u64)blockcnt;
//printk(KERN_INFO "ZX-GMI-SM3 is called\n");
__asm__ __volatile__(
"movq %2, %%rdi\n"
"movq %0, %%rsi\n"
"movq %1, %%rcx\n"
"movq $-1, %%rax\n"
"movq $0x20, %%rbx\n"
".byte 0xf3, 0x0f, 0xa6, 0xe8"
:
: "r"(in), "r"(cnt), "r"(out)
: "%rdi", "%rsi", "%rcx", "rbx", "%rax", "memory"
);
}
static inline int zx_sm3_init(struct shash_desc *desc)
{
struct sm3_state *sctx;
if (!desc)
return -EINVAL;
sctx = shash_desc_ctx(desc);
sctx->state[0] = 0x6f168073UL;
sctx->state[1] = 0xb9b21449UL;
sctx->state[2] = 0xd7422417UL;
sctx->state[3] = 0x00068adaUL;
sctx->state[4] = 0xbc306fa9UL;
sctx->state[5] = 0xaa383116UL;
sctx->state[6] = 0x4dee8de3UL;
sctx->state[7] = 0x4e0efbb0UL;
sctx->count = 0;
return 0;
}
static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out)
{
struct sm3_state *sctx = shash_desc_ctx(desc);
__be32 *digest = (__be32 *)out;
memcpy(digest, sctx->state, 32);
*sctx = (struct sm3_state){};
return 0;
}
int zx_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
if (!data || !len)
return -EINVAL;
return sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
}
EXPORT_SYMBOL(zx_sm3_update);
static int zx_sm3_final(struct shash_desc *desc, u8 *out)
{
if (!desc || !out)
return -EINVAL;
sm3_base_do_finalize(desc, sm3_generic_block_fn);
return zx_sm3_base_finish(desc, out);
}
int zx_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
if (!desc || !data || !len || !hash)
return -EINVAL;
sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
return zx_sm3_final(desc, hash);
}
EXPORT_SYMBOL(zx_sm3_finup);
static struct shash_alg zx_sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = zx_sm3_init,
.update = zx_sm3_update,
.final = zx_sm3_final,
.finup = zx_sm3_finup,
.descsize = sizeof(struct sm3_state),
.base = {
.cra_name = "sm3",
.cra_driver_name = "zhaoxin-gmi-sm3",
.cra_priority = 300,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init zx_sm3_generic_mod_init(void)
{
if (!gmi_available()) {
pr_warn("GMI is unavailable on this platform.");
return -ENODEV;
}
return crypto_register_shash(&zx_sm3_alg);
}
static void __exit zx_sm3_generic_mod_fini(void)
{
crypto_unregister_shash(&zx_sm3_alg);
}
module_init(zx_sm3_generic_mod_init);
module_exit(zx_sm3_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("zx-sm3");
MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm3");
// SPDX-License-Identifier: GPL-2.0-only
/*
* zhaoxin-gmi-sm4.c - wrapper code for Zhaoxin GMI.
*
* Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/err.h>
#include <crypto/cryptd.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/workqueue.h>
#include <crypto/sm4.h>
#include <asm/unaligned.h>
#define SM4_ECB (1<<6)
#define SM4_CBC (1<<7)
#define SM4_CFB (1<<8)
#define SM4_OFB (1<<9)
#define SM4_CTR (1<<10)
#define ZX_GMI_ALIGNMENT 16
#define GETU16(p) ((u16)(p)[0]<<8 | (u16)(p)[1])
#define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3])
/* Control word. */
struct sm4_cipher_data {
u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */
union {
u32 pad;
struct {
u32 encdec:1;
u32 func:5;
u32 mode:5;
u32 digest:1;
} b;
} cword; /* Control word */
struct crypto_sm4_ctx keys; /* Encryption key */
};
static inline u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv,
struct sm4_cipher_data *sm4_data, int count)
{
int eax = sm4_data->cword.pad;
// Set the flag for encryption or decryption
if (sm4_data->cword.b.encdec == 1)
eax &= ~0x01;
else
eax |= 0x01;
asm volatile (".byte 0xf3, 0x0f, 0xa7, 0xf0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (eax)
: "d" (iv), "b" (key), "c" (count));
return iv;
}
static inline u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv,
struct sm4_cipher_data *sm4_data, int count)
{
int eax = sm4_data->cword.pad;
u8 oiv[SM4_BLOCK_SIZE] = {0};
u32 cnt_tmp;
u32 i;
//Backup the original IV if it is not NULL.
if (iv)
memcpy(oiv, iv, SM4_BLOCK_SIZE);
// Set the flag for encryption or decryption
if (sm4_data->cword.b.encdec == 1)
eax &= ~0x01;
else
eax |= 0x01;
// Get the current counter.
cnt_tmp = GETU16(&iv[14]);
// Get the available counter space before overflow.
cnt_tmp = 0x10000 - cnt_tmp;
//
// Check there is enough counter space for the required blocks.
//
if (cnt_tmp < count) {
// Process the first part of data blocks.
asm volatile (".byte 0xf3,0x0f,0xa7,0xf0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (eax)
: "d" (iv), "b" (key), "c" (cnt_tmp));
// The IV's lower 16 bits should be 0x0000 NOW. Check it
//if (GETU16(&iv[14]) != 0)
// printk(KERN_WARNING "ZX-GMI: Counter should be 0, please check\n");
// Only increase the counter by SW when overflow occurs.
memcpy(iv, oiv, SM4_BLOCK_SIZE);
for (i = 0; i < cnt_tmp; i++)
crypto_inc(iv, SM4_BLOCK_SIZE);
// Get the number of data blocks that have not beed encrypted.
cnt_tmp = count - cnt_tmp;
// Process the remaining part of data blocks.
asm volatile (".byte 0xf3,0x0f,0xa7,0xf0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (eax)
: "d" (iv), "b" (key), "c" (cnt_tmp));
} else {
// Counter space is big enough, the counter will not overflow.
asm volatile (".byte 0xf3,0x0f,0xa7,0xf0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (eax)
: "d" (iv), "b" (key), "c" (count));
}
// Restore the iv if not null
if (iv)
memcpy(iv, oiv, SM4_BLOCK_SIZE);
return iv;
}
static u8 *rep_xcrypt_ebc_ONE(const u8 *input, u8 *output, void *key,
u8 *iv, struct sm4_cipher_data *sm4_data, int count)
{
u64 in, out, enkey, ivec;
in = (u64)input;
out = (u64)(output);
enkey = (u64)key;
ivec = (u64)iv;
__asm__ __volatile__(
"movq %2, %%rdi\n"
"movq %0, %%rsi\n"
"movq $1, %%rcx\n"
"movq $0x60, %%rax\n"
"movq %1, %%rbx\n"
"movq %3, %%rdx\n"
".byte 0xf3, 0x0f, 0xa7, 0xf0"
:
: "r"(in), "r"(enkey), "r"(out), "r"(ivec)
: "%rdi", "%rsi", "%rdx", "%rcx", "rbx", "%rax", "memory"
);
return iv;
}
/**
* gmi_sm4_set_key - Set the sm4 key.
* @tfm: The %crypto_skcipher that is used in the context.
* @in_key: The input key.
* @key_len:The size of the key.
*/
int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
if (key_len != SM4_KEY_SIZE) {
pr_err("The key_len must be 16 bytes. please check\n");
return -EINVAL;
}
memcpy(ctx->rkey_enc, in_key, key_len);
memcpy(ctx->rkey_dec, in_key, key_len);
return 0;
}
EXPORT_SYMBOL_GPL(gmi_sm4_set_key);
static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_data *cw)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int blocks;
int err;
u8 *iv;
err = skcipher_walk_virt(&walk, req, true);
while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) {
iv = rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc,
walk.iv, cw, blocks);
err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE);
}
return err;
}
static int ebc_encrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_ECB;
err = sm4_cipher_common(req, &cw);
return err;
}
static int ebc_decrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.pad |= 0x20|SM4_ECB;
err = sm4_cipher_common(req, &cw);
return err;
}
static int cbc_encrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_CBC;
err = sm4_cipher_common(req, &cw);
return err;
}
static int cbc_decrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.pad |= 0x20|SM4_CBC;
err = sm4_cipher_common(req, &cw);
return err;
}
static void ctr_crypt_final(struct crypto_sm4_ctx *ctx,
struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[SM4_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 iv_temp[16];
unsigned int nbytes = walk->nbytes;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_ECB;
memcpy(iv_temp, ctrblk, 16);
rep_xcrypt_ebc_ONE(ctrblk, keystream, ctx->rkey_enc, walk->iv, &cw, 1);
crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, SM4_BLOCK_SIZE);
}
/*
* sm4_cipher_ctr is usef for ZX-E or newer
*/
static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data *cw)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int blocks;
int err;
u8 *iv;
u32 i;
err = skcipher_walk_virt(&walk, req, true);
while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) {
iv = rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr,
ctx->rkey_enc, walk.iv, cw, blocks);
// Update the counter.
for (i = 0; i < blocks; i++)
crypto_inc(walk.iv, SM4_BLOCK_SIZE);
err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE);
}
if (walk.nbytes) {
ctr_crypt_final(ctx, &walk);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
/*
* ctr_encrypt is usef for ZX-E or newer
*/
static int ctr_encrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_CTR;
err = sm4_cipher_ctr(req, &cw);
return err;
}
/*
* ctr_decrypt is usef for ZX-E or newer
*/
static int ctr_decrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.pad |= 0x20|SM4_CTR;
err = sm4_cipher_ctr(req, &cw);
return err;
}
/*
* sm4_ctr_zxc is used for ZXC+
*/
static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int blocks;
int err;
u8 *iv = NULL;
u32 n;
u8 en_iv[SM4_BLOCK_SIZE] = {0};
err = skcipher_walk_virt(&walk, req, true);
while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) {
while (blocks--) {
iv = rep_xcrypt_ebc_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1);
crypto_inc(walk.iv, SM4_BLOCK_SIZE);
for (n = 0; n < 16; n += sizeof(size_t))
*(size_t *)(walk.dst.virt.addr + n) = *(size_t *)(en_iv + n)
^ *(size_t *)(walk.src.virt.addr + n);
walk.src.virt.addr += SM4_BLOCK_SIZE;
walk.dst.virt.addr += SM4_BLOCK_SIZE;
}
err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE);
}
if (walk.nbytes) {
ctr_crypt_final(ctx, &walk);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
/*
* ctr_encrypt_zxc is usef for ZX-C+
*/
static int ctr_encrypt_zxc(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_CTR;
err = sm4_ctr_zxc(req, &cw);
return err;
}
/*
* ctr_decrypt_zxc is usef for ZX-C+
*/
static int ctr_decrypt_zxc(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 0;
cw.cword.pad |= 0x20|SM4_CTR;
err = sm4_ctr_zxc(req, &cw);
return err;
}
/*
* ofb_encrypt is usef for ZX-E or newer
*/
static int ofb_encrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_OFB;
err = sm4_cipher_common(req, &cw);
return err;
}
/*
* ofb_decrypt is usef for ZX-E or newer
*/
static int ofb_decrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.pad |= 0x20|SM4_OFB;
err = sm4_cipher_common(req, &cw);
return err;
}
/*
* sm4_ofb_zxc is usef for ZX-C+
*/
static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int blocks;
int err;
u8 *iv = NULL;
u32 n;
err = skcipher_walk_virt(&walk, req, true);
while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) {
while (blocks--) {
iv = rep_xcrypt_ebc_ONE(walk.iv, walk.iv, ctx->rkey_enc, walk.iv, cw, 1);
for (n = 0; n < 16; n += sizeof(size_t))
*(size_t *)(walk.dst.virt.addr + n) = *(size_t *)(walk.iv + n)
^ *(size_t *)(walk.src.virt.addr + n);
walk.src.virt.addr += SM4_BLOCK_SIZE;
walk.dst.virt.addr += SM4_BLOCK_SIZE;
}
err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE);
}
return err;
}
/*
* ofb_encrypt_zxc is usef for ZX-C+
*/
static int ofb_encrypt_zxc(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_OFB;
err = sm4_ofb_zxc(req, &cw);
return err;
}
/*
* ofb_decrypt_zxc is usef for ZX-C+
*/
static int ofb_decrypt_zxc(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 0;
cw.cword.pad |= 0x20|SM4_OFB;
err = sm4_ofb_zxc(req, &cw);
return err;
}
/*
* cfb_encrypt is usef for ZX-E or newer.
*/
static int cfb_encrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_CFB;
err = sm4_cipher_common(req, &cw);
return err;
}
/*
* cfb_decrypt is usef for ZX-E or newer.
*/
static int cfb_decrypt(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.pad |= 0x20|SM4_CFB;
err = sm4_cipher_common(req, &cw);
return err;
}
/*
* sm4_cfb_zxc is usef for ZX-C+
*/
static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int blocks;
int err;
u8 *iv = NULL;
u32 n;
size_t t;
err = skcipher_walk_virt(&walk, req, true);
while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) {
while (blocks--) {
iv = rep_xcrypt_ebc_ONE(walk.iv, walk.iv, ctx->rkey_enc, walk.iv, cw, 1);
if (cw->cword.b.encdec)
for (n = 0; n < 16; n += sizeof(size_t))
*(size_t *)(walk.dst.virt.addr + n) =
*(size_t *)(walk.iv + n)
^= *(size_t *)(walk.src.virt.addr + n);
else
for (n = 0; n < 16; n += sizeof(size_t)) {
t = *(size_t *)(walk.src.virt.addr + n);
*(size_t *)(walk.dst.virt.addr + n) =
*(size_t *)(walk.iv + n) ^ t;
*(size_t *)(walk.iv + n) = t;
}
walk.src.virt.addr += SM4_BLOCK_SIZE;
walk.dst.virt.addr += SM4_BLOCK_SIZE;
}
err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE);
}
return err;
}
/*
* cfb_encrypt_zxc is usef for ZX-C+
*/
static int cfb_encrypt_zxc(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 1;
cw.cword.pad |= 0x20|SM4_CFB;
err = sm4_cfb_zxc(req, &cw);
return err;
}
/*
* cfb_decrypt_zxc is usef for ZX-C+
*/
static int cfb_decrypt_zxc(struct skcipher_request *req)
{
int err;
struct sm4_cipher_data cw;
cw.cword.pad = 0;
cw.cword.b.encdec = 0;
cw.cword.pad |= 0x20|SM4_CFB;
err = sm4_cfb_zxc(req, &cw);
return err;
}
static struct skcipher_alg aes_algs[] = {
{
.base = {
.cra_name = "__ecb(sm4)",
.cra_driver_name = "__ecb-sm4-gmi",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.setkey = gmi_sm4_set_key,
.encrypt = ebc_encrypt,
.decrypt = ebc_decrypt,
},
{
.base = {
.cra_name = "__cbc(sm4)",
.cra_driver_name = "__cbc-sm4-gmi",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.setkey = gmi_sm4_set_key,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
{
.base = {
.cra_name = "__ctr(sm4)",
.cra_driver_name = "__ctr-sm4-gmi",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1, //SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.setkey = gmi_sm4_set_key,
.encrypt = ctr_encrypt,
.decrypt = ctr_decrypt,
},
{
.base = {
.cra_name = "__ofb(sm4)",
.cra_driver_name = "__ofb-sm4-gmi",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.setkey = gmi_sm4_set_key,
.encrypt = ofb_encrypt,
.decrypt = ofb_decrypt,
},
{
.base = {
.cra_name = "__cfb(sm4)",
.cra_driver_name = "__cfb-sm4-gmi",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.setkey = gmi_sm4_set_key,
.encrypt = cfb_encrypt,
.decrypt = cfb_decrypt,
}
};
static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(aes_algs)];
static int zx_gmi_capability(void)
{
int eax = 0;
int ebx, ecx, edx = 0;
// 1. check vendor ID string
asm volatile ("cpuid":"=b"(ebx), "=c"(ecx), "=d"(edx):"a"(eax) : );
if (((ebx == 0x746e6543) && (ecx == 0x736c7561) && (edx == 0x48727561)) ||
((ebx == 0x68532020) && (ecx == 0x20206961) && (edx == 0x68676e61))) {
// 2. check whether support SM3/SM4/SM2 Instructions
eax = 0xC0000001;
__asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : );
} else {
pr_warn("This is not a ZX CPU!\n");
}
return edx;
}
static u32 get_cpu_fms(u32 *eax, u32 *leaf)
{
u32 eax_tmp = *eax, leaf_tmp = *leaf;
__asm__ __volatile__ (
"cpuid"
: "=a"(eax_tmp)
: "0"(leaf_tmp)
: "ebx", "ecx");
*eax = eax_tmp;
return eax_tmp;
}
static int gmi_zxc_check(void)
{
u32 eax = 0;
char family, model;
u32 leaf = 0x1;
int f_zxc = 0;
get_cpu_fms(&eax, &leaf);
family = (eax & 0xf00) >> 8; /* bit 11-08 */
model = (eax & 0xf0) >> 4; /* bit 7-4 */
if ((family == 7) && (model == 0xb))
f_zxc = 0;
else if (((family == 6) && (model == 0xf)) ||
((family == 6) && (model == 9)))
f_zxc = 1;
return f_zxc;
}
/*
* Load supported features of the CPU to see if the SM3/SM4 is available.
*/
static int gmi_ccs_available(void)
{
unsigned int zx_gmi_use_ccs = 0; /* Chinese Cipher Standard SM3 and SM4 Support */
zx_gmi_use_ccs = ((zx_gmi_capability() & (0x3 << 4)) == (0x3 << 4));
return zx_gmi_use_ccs;
}
static void gmi_sm4_exit(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(sm4_simd_algs) && sm4_simd_algs[i]; i++)
simd_skcipher_free(sm4_simd_algs[i]);
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init gmi_sm4_init(void)
{
struct simd_skcipher_alg *simd;
const char *basename;
const char *algname;
const char *drvname;
int err;
int i;
if (!gmi_ccs_available())
return -ENODEV;
if (gmi_zxc_check()) {
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
if (!strcmp(aes_algs[i].base.cra_name, "__ctr(sm4)")) {
pr_info("GRX: zxc gmi sm4 ctr FOUND\n");
aes_algs[i].encrypt = ctr_encrypt_zxc;
aes_algs[i].decrypt = ctr_decrypt_zxc;
} else if (!strcmp(aes_algs[i].base.cra_name, "__cfb(sm4)")) {
pr_info("GRX: zxc gmi sm4 cfb FOUND\n");
aes_algs[i].encrypt = cfb_encrypt_zxc;
aes_algs[i].decrypt = cfb_decrypt_zxc;
} else if (!strcmp(aes_algs[i].base.cra_name, "__ofb(sm4)")) {
pr_info("GRX: zxc gmi sm4 ofb FOUND\n");
aes_algs[i].encrypt = ofb_encrypt_zxc;
aes_algs[i].decrypt = ofb_decrypt_zxc;
}
}
}
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
simd = simd_skcipher_create_compat(algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
sm4_simd_algs[i] = simd;
}
return 0;
unregister_simds:
gmi_sm4_exit();
return err;
}
late_initcall(gmi_sm4_init);
module_exit(gmi_sm4_exit);
MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using Zhaoxin GMI");
MODULE_AUTHOR("GRX");
MODULE_LICENSE("GPL");
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册