提交 f6631e38 编写于 作者: C Christoph Müllner 提交者: Hugo Landau

riscv: AES: Provide a Zvkned-based implementation

The upcoming RISC-V vector crypto extensions provide
the Zvkned extension, that provides a AES-specific instructions.
This patch provides an implementation that utilizes this
extension if available.

Tested on QEMU and no regressions observed.
Signed-off-by: NChristoph Müllner <christoph.muellner@vrull.eu>
Reviewed-by: NTomas Mraz <tomas@openssl.org>
Reviewed-by: NPaul Dale <pauli@openssl.org>
Reviewed-by: NHugo Landau <hlandau@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/21923)
上级 5191bcc8
#! /usr/bin/env perl
# This file is dual-licensed, meaning that you can use it under your
# choice of either of the following two licenses:
#
# Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You can obtain
# a copy in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# or
#
# Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# - RV64I
# - RISC-V vector ('V') with VLEN >= 128
# - RISC-V vector crypto AES extension ('Zvkned')
use strict;
use warnings;
use FindBin qw($Bin);
use lib "$Bin";
use lib "$Bin/../../perlasm";
use riscv;
# $output is the last argument if it looks like a file (it has an extension)
# $flavour is the first argument if it doesn't look like a file
my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
$output and open STDOUT,">$output";
my $code=<<___;
.text
___
################################################################################
# int rv64i_zvkned_set_encrypt_key(const unsigned char *userKey, const int bits,
# AES_KEY *key)
# int rv64i_zvkned_set_decrypt_key(const unsigned char *userKey, const int bits,
# AES_KEY *key)
{
my ($UKEY,$BITS,$KEYP) = ("a0", "a1", "a2");
my ($T0,$T1,$T4) = ("t1", "t2", "t4");
my ($v0, $v1, $v2, $v3, $v4, $v5, $v6,
$v7, $v8, $v9, $v10, $v11, $v12,
$v13, $v14, $v15, $v16, $v17, $v18,
$v19, $v20, $v21, $v22, $v23, $v24,
) = map("v$_",(0..24));
$code .= <<___;
.p2align 3
.globl rv64i_zvkned_set_encrypt_key
.type rv64i_zvkned_set_encrypt_key,\@function
rv64i_zvkned_set_encrypt_key:
beqz $UKEY, L_fail_m1
beqz $KEYP, L_fail_m1
# Get proper routine for key size
li $T0, 256
beq $BITS, $T0, L_set_key_256
li $T0, 128
beq $BITS, $T0, L_set_key_128
j L_fail_m2
.size rv64i_zvkned_set_encrypt_key,.-rv64i_zvkned_set_encrypt_key
___
$code .= <<___;
.p2align 3
.globl rv64i_zvkned_set_decrypt_key
.type rv64i_zvkned_set_decrypt_key,\@function
rv64i_zvkned_set_decrypt_key:
beqz $UKEY, L_fail_m1
beqz $KEYP, L_fail_m1
# Get proper routine for key size
li $T0, 256
beq $BITS, $T0, L_set_key_256
li $T0, 128
beq $BITS, $T0, L_set_key_128
j L_fail_m2
.size rv64i_zvkned_set_decrypt_key,.-rv64i_zvkned_set_decrypt_key
___
$code .= <<___;
.p2align 3
L_set_key_128:
# Store the number of rounds
li $T1, 10
sw $T1, 240($KEYP)
@{[vsetivli__x0_4_e32_m1_tu_mu]}
# Load the key
@{[vle32_v $v10, ($UKEY)]}
# Generate keys for round 2-11 into registers v11-v20.
@{[vaeskf1_vi $v11, $v10, 1]} # v11 <- rk2 (w[ 4, 7])
@{[vaeskf1_vi $v12, $v11, 2]} # v12 <- rk3 (w[ 8,11])
@{[vaeskf1_vi $v13, $v12, 3]} # v13 <- rk4 (w[12,15])
@{[vaeskf1_vi $v14, $v13, 4]} # v14 <- rk5 (w[16,19])
@{[vaeskf1_vi $v15, $v14, 5]} # v15 <- rk6 (w[20,23])
@{[vaeskf1_vi $v16, $v15, 6]} # v16 <- rk7 (w[24,27])
@{[vaeskf1_vi $v17, $v16, 7]} # v17 <- rk8 (w[28,31])
@{[vaeskf1_vi $v18, $v17, 8]} # v18 <- rk9 (w[32,35])
@{[vaeskf1_vi $v19, $v18, 9]} # v19 <- rk10 (w[36,39])
@{[vaeskf1_vi $v20, $v19, 10]} # v20 <- rk11 (w[40,43])
# Store the round keys
@{[vse32_v $v10, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v11, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v12, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v13, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v14, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v15, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v16, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v17, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v18, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v19, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v20, ($KEYP)]}
li a0, 1
ret
.size L_set_key_128,.-L_set_key_128
___
$code .= <<___;
.p2align 3
L_set_key_256:
# Store the number of rounds
li $T1, 14
sw $T1, 240($KEYP)
@{[vsetivli__x0_4_e32_m1_tu_mu]}
# Load the key
@{[vle32_v $v10, ($UKEY)]}
addi $UKEY, $UKEY, 16
@{[vle32_v $v11, ($UKEY)]}
@{[vmv_v_v $v12, $v10]}
@{[vaeskf2_vi $v12, $v11, 2]}
@{[vmv_v_v $v13, $v11]}
@{[vaeskf2_vi $v13, $v12, 3]}
@{[vmv_v_v $v14, $v12]}
@{[vaeskf2_vi $v14, $v13, 4]}
@{[vmv_v_v $v15, $v13]}
@{[vaeskf2_vi $v15, $v14, 5]}
@{[vmv_v_v $v16, $v14]}
@{[vaeskf2_vi $v16, $v15, 6]}
@{[vmv_v_v $v17, $v15]}
@{[vaeskf2_vi $v17, $v16, 7]}
@{[vmv_v_v $v18, $v16]}
@{[vaeskf2_vi $v18, $v17, 8]}
@{[vmv_v_v $v19, $v17]}
@{[vaeskf2_vi $v19, $v18, 9]}
@{[vmv_v_v $v20, $v18]}
@{[vaeskf2_vi $v20, $v19, 10]}
@{[vmv_v_v $v21, $v19]}
@{[vaeskf2_vi $v21, $v20, 11]}
@{[vmv_v_v $v22, $v20]}
@{[vaeskf2_vi $v22, $v21, 12]}
@{[vmv_v_v $v23, $v21]}
@{[vaeskf2_vi $v23, $v22, 13]}
@{[vmv_v_v $v24, $v22]}
@{[vaeskf2_vi $v24, $v23, 14]}
@{[vse32_v $v10, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v11, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v12, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v13, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v14, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v15, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v16, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v17, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v18, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v19, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v20, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v21, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v22, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v23, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vse32_v $v24, ($KEYP)]}
li a0, 1
ret
.size L_set_key_256,.-L_set_key_256
___
}
################################################################################
# void rv64i_zvkned_encrypt(const unsigned char *in, unsigned char *out,
# const AES_KEY *key);
{
my ($INP,$OUTP,$KEYP) = ("a0", "a1", "a2");
my ($T0,$T1, $rounds, $T6) = ("a3", "a4", "t5", "t6");
my ($v0, $v1, $v2, $v3, $v4, $v5, $v6,
$v7, $v8, $v9, $v10, $v11, $v12,
$v13, $v14, $v15, $v16, $v17, $v18,
$v19, $v20, $v21, $v22, $v23, $v24,
) = map("v$_",(0..24));
$code .= <<___;
.p2align 3
.globl rv64i_zvkned_encrypt
.type rv64i_zvkned_encrypt,\@function
rv64i_zvkned_encrypt:
# Load number of rounds
lwu $rounds, 240($KEYP)
# Get proper routine for key size
li $T6, 14
beq $rounds, $T6, L_enc_256
li $T6, 10
beq $rounds, $T6, L_enc_128
j L_fail_m2
.size rv64i_zvkned_encrypt,.-rv64i_zvkned_encrypt
___
$code .= <<___;
.p2align 3
L_enc_128:
@{[vsetivli__x0_4_e32_m1_tu_mu]}
@{[vle32_v $v10, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v11, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v12, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v13, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v14, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v15, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v16, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v17, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v18, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v19, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v20, ($KEYP)]}
@{[vle32_v $v1, ($INP)]}
@{[vaesz_vs $v1, $v10]} # with round key w[ 0, 3]
@{[vaesem_vs $v1, $v11]} # with round key w[ 4, 7]
@{[vaesem_vs $v1, $v12]} # with round key w[ 8,11]
@{[vaesem_vs $v1, $v13]} # with round key w[12,15]
@{[vaesem_vs $v1, $v14]} # with round key w[16,19]
@{[vaesem_vs $v1, $v15]} # with round key w[20,23]
@{[vaesem_vs $v1, $v16]} # with round key w[24,27]
@{[vaesem_vs $v1, $v17]} # with round key w[28,31]
@{[vaesem_vs $v1, $v18]} # with round key w[32,35]
@{[vaesem_vs $v1, $v19]} # with round key w[36,39]
@{[vaesef_vs $v1, $v20]} # with round key w[40,43]
@{[vse32_v $v1, ($OUTP)]}
ret
.size L_enc_128,.-L_enc_128
___
$code .= <<___;
.p2align 3
L_enc_256:
@{[vsetivli__x0_4_e32_m1_tu_mu]}
@{[vle32_v $v10, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v11, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v12, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v13, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v14, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v15, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v16, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v17, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v18, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v19, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v20, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v21, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v22, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v23, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v24, ($KEYP)]}
@{[vle32_v $v1, ($INP)]}
@{[vaesz_vs $v1, $v10]} # with round key w[ 0, 3]
@{[vaesem_vs $v1, $v11]}
@{[vaesem_vs $v1, $v12]}
@{[vaesem_vs $v1, $v13]}
@{[vaesem_vs $v1, $v14]}
@{[vaesem_vs $v1, $v15]}
@{[vaesem_vs $v1, $v16]}
@{[vaesem_vs $v1, $v17]}
@{[vaesem_vs $v1, $v18]}
@{[vaesem_vs $v1, $v19]}
@{[vaesem_vs $v1, $v20]}
@{[vaesem_vs $v1, $v21]}
@{[vaesem_vs $v1, $v22]}
@{[vaesem_vs $v1, $v23]}
@{[vaesef_vs $v1, $v24]}
@{[vse32_v $v1, ($OUTP)]}
ret
.size L_enc_256,.-L_enc_256
___
}
################################################################################
# void rv64i_zvkned_decrypt(const unsigned char *in, unsigned char *out,
# const AES_KEY *key);
{
my ($INP,$OUTP,$KEYP) = ("a0", "a1", "a2");
my ($T0,$T1, $rounds, $T6) = ("a3", "a4", "t5", "t6");
my ($v0, $v1, $v2, $v3, $v4, $v5, $v6,
$v7, $v8, $v9, $v10, $v11, $v12,
$v13, $v14, $v15, $v16, $v17, $v18,
$v19, $v20, $v21, $v22, $v23, $v24,
) = map("v$_",(0..24));
$code .= <<___;
.p2align 3
.globl rv64i_zvkned_decrypt
.type rv64i_zvkned_decrypt,\@function
rv64i_zvkned_decrypt:
# Load number of rounds
lwu $rounds, 240($KEYP)
# Get proper routine for key size
li $T6, 14
beq $rounds, $T6, L_dec_256
li $T6, 10
beq $rounds, $T6, L_dec_128
j L_fail_m2
.size rv64i_zvkned_decrypt,.-rv64i_zvkned_decrypt
___
$code .= <<___;
.p2align 3
L_dec_128:
@{[vsetivli__x0_4_e32_m1_tu_mu]}
@{[vle32_v $v10, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v11, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v12, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v13, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v14, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v15, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v16, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v17, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v18, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v19, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v20, ($KEYP)]}
@{[vle32_v $v1, ($INP)]}
@{[vaesz_vs $v1, $v20]} # with round key w[43,47]
@{[vaesdm_vs $v1, $v19]} # with round key w[36,39]
@{[vaesdm_vs $v1, $v18]} # with round key w[32,35]
@{[vaesdm_vs $v1, $v17]} # with round key w[28,31]
@{[vaesdm_vs $v1, $v16]} # with round key w[24,27]
@{[vaesdm_vs $v1, $v15]} # with round key w[20,23]
@{[vaesdm_vs $v1, $v14]} # with round key w[16,19]
@{[vaesdm_vs $v1, $v13]} # with round key w[12,15]
@{[vaesdm_vs $v1, $v12]} # with round key w[ 8,11]
@{[vaesdm_vs $v1, $v11]} # with round key w[ 4, 7]
@{[vaesdf_vs $v1, $v10]} # with round key w[ 0, 3]
@{[vse32_v $v1, ($OUTP)]}
ret
.size L_dec_128,.-L_dec_128
___
$code .= <<___;
.p2align 3
L_dec_256:
@{[vsetivli__x0_4_e32_m1_tu_mu]}
@{[vle32_v $v10, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v11, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v12, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v13, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v14, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v15, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v16, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v17, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v18, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v19, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v20, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v21, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v22, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v23, ($KEYP)]}
addi $KEYP, $KEYP, 16
@{[vle32_v $v24, ($KEYP)]}
@{[vle32_v $v1, ($INP)]}
@{[vaesz_vs $v1, $v24]} # with round key w[56,59]
@{[vaesdm_vs $v1, $v23]} # with round key w[52,55]
@{[vaesdm_vs $v1, $v22]} # with round key w[48,51]
@{[vaesdm_vs $v1, $v21]} # with round key w[44,47]
@{[vaesdm_vs $v1, $v20]} # with round key w[40,43]
@{[vaesdm_vs $v1, $v19]} # with round key w[36,39]
@{[vaesdm_vs $v1, $v18]} # with round key w[32,35]
@{[vaesdm_vs $v1, $v17]} # with round key w[28,31]
@{[vaesdm_vs $v1, $v16]} # with round key w[24,27]
@{[vaesdm_vs $v1, $v15]} # with round key w[20,23]
@{[vaesdm_vs $v1, $v14]} # with round key w[16,19]
@{[vaesdm_vs $v1, $v13]} # with round key w[12,15]
@{[vaesdm_vs $v1, $v12]} # with round key w[ 8,11]
@{[vaesdm_vs $v1, $v11]} # with round key w[ 4, 7]
@{[vaesdf_vs $v1, $v10]} # with round key w[ 0, 3]
@{[vse32_v $v1, ($OUTP)]}
ret
.size L_dec_256,.-L_dec_256
___
}
$code .= <<___;
L_fail_m1:
li a0, -1
ret
.size L_fail_m1,.-L_fail_m1
L_fail_m2:
li a0, -2
ret
.size L_fail_m2,.-L_fail_m2
___
print $code;
close STDOUT or die "error closing STDOUT: $!";
......@@ -47,7 +47,7 @@ IF[{- !$disabled{asm} -}]
# aes-c64xplus.s implements AES_ctr32_encrypt
$AESDEF_c64xplus=AES_ASM AES_CTR_ASM
$AESASM_riscv64=aes_cbc.c aes-riscv64.s aes-riscv64-zkn.s
$AESASM_riscv64=aes_cbc.c aes-riscv64.s aes-riscv64-zkn.s aes-riscv64-zvkned.s
$AESDEF_riscv64=AES_ASM
$AESASM_riscv32=aes_core.c aes_cbc.c aes-riscv32-zkn.s
......@@ -124,6 +124,7 @@ INCLUDE[aes-mips.o]=..
GENERATE[aes-riscv64.s]=asm/aes-riscv64.pl
GENERATE[aes-riscv64-zkn.s]=asm/aes-riscv64-zkn.pl
GENERATE[aes-riscv32-zkn.s]=asm/aes-riscv32-zkn.pl
GENERATE[aes-riscv64-zvkned.s]=asm/aes-riscv64-zvkned.pl
GENERATE[aesv8-armx.S]=asm/aesv8-armx.pl
INCLUDE[aesv8-armx.o]=..
......
......@@ -498,4 +498,64 @@ sub vgmul_vv {
return ".word ".($template | ($vs2 << 20) | ($vd << 7));
}
## Zvkned instructions
sub vaesdf_vs {
# vaesdf.vs vd, vs2
my $template = 0b101001_1_00000_00001_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
return ".word ".($template | ($vs2 << 20) | ($vd << 7));
}
sub vaesdm_vs {
# vaesdm.vs vd, vs2
my $template = 0b101001_1_00000_00000_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
return ".word ".($template | ($vs2 << 20) | ($vd << 7));
}
sub vaesef_vs {
# vaesef.vs vd, vs2
my $template = 0b101001_1_00000_00011_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
return ".word ".($template | ($vs2 << 20) | ($vd << 7));
}
sub vaesem_vs {
# vaesem.vs vd, vs2
my $template = 0b101001_1_00000_00010_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
return ".word ".($template | ($vs2 << 20) | ($vd << 7));
}
sub vaeskf1_vi {
# vaeskf1.vi vd, vs2, uimmm
my $template = 0b100010_1_00000_00000_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
my $uimm = shift;
return ".word ".($template | ($uimm << 15) | ($vs2 << 20) | ($vd << 7));
}
sub vaeskf2_vi {
# vaeskf2.vi vd, vs2, uimm
my $template = 0b101010_1_00000_00000_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
my $uimm = shift;
return ".word ".($template | ($vs2 << 20) | ($uimm << 15) | ($vd << 7));
}
sub vaesz_vs {
# vaesz.vs vd, vs2
my $template = 0b101001_1_00000_00111_010_00000_1110111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
return ".word ".($template | ($vs2 << 20) | ($vd << 7));
}
1;
......@@ -435,6 +435,7 @@ void aes256_t4_xts_decrypt(const unsigned char *in, unsigned char *out,
/* RISC-V 64 support */
# include "riscv_arch.h"
/* Zkne and Zknd extensions (scalar crypto AES). */
int rv64i_zkne_set_encrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key);
int rv64i_zknd_set_decrypt_key(const unsigned char *userKey, const int bits,
......@@ -443,6 +444,16 @@ void rv64i_zkne_encrypt(const unsigned char *in, unsigned char *out,
const AES_KEY *key);
void rv64i_zknd_decrypt(const unsigned char *in, unsigned char *out,
const AES_KEY *key);
/* Zvkned extension (vector crypto AES). */
int rv64i_zvkned_set_encrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key);
int rv64i_zvkned_set_decrypt_key(const unsigned char *userKey, const int bits,
AES_KEY *key);
void rv64i_zvkned_encrypt(const unsigned char *in, unsigned char *out,
const AES_KEY *key);
void rv64i_zvkned_decrypt(const unsigned char *in, unsigned char *out,
const AES_KEY *key);
# elif defined(OPENSSL_CPUID_OBJ) && defined(__riscv) && __riscv_xlen == 32
/* RISC-V 32 support */
# include "riscv_arch.h"
......
......@@ -36,6 +36,7 @@ RISCV_DEFINE_CAP(V, 0, 14)
RISCV_DEFINE_CAP(ZVBB, 0, 15)
RISCV_DEFINE_CAP(ZVBC, 0, 16)
RISCV_DEFINE_CAP(ZVKG, 0, 17)
RISCV_DEFINE_CAP(ZVKNED, 0, 18)
/*
* In the future ...
......
......@@ -31,7 +31,41 @@ static const PROV_CCM_HW rv64i_zknd_zkne_ccm = {
ossl_ccm_generic_gettag
};
/*-
* RISC-V RV64 ZVKNED support for AES CCM.
* This file is included by cipher_aes_ccm_hw.c
*/
static int ccm_rv64i_zvkned_initkey(PROV_CCM_CTX *ctx, const unsigned char *key,
size_t keylen)
{
PROV_AES_CCM_CTX *actx = (PROV_AES_CCM_CTX *)ctx;
/* Zvkned only supports 128 and 256 bit keys. */
if (keylen * 8 == 128 || keylen * 8 == 256) {
AES_HW_CCM_SET_KEY_FN(rv64i_zvkned_set_encrypt_key, rv64i_zvkned_encrypt,
NULL, NULL);
} else {
AES_HW_CCM_SET_KEY_FN(AES_set_encrypt_key, AES_encrypt, NULL, NULL)
}
return 1;
}
static const PROV_CCM_HW rv64i_zvkned_ccm = {
ccm_rv64i_zvkned_initkey,
ossl_ccm_generic_setiv,
ossl_ccm_generic_setaad,
ossl_ccm_generic_auth_encrypt,
ossl_ccm_generic_auth_decrypt,
ossl_ccm_generic_gettag
};
const PROV_CCM_HW *ossl_prov_aes_hw_ccm(size_t keybits)
{
return RISCV_HAS_ZKND_AND_ZKNE() ? &rv64i_zknd_zkne_ccm : &aes_ccm;
if (RISCV_HAS_ZVKNED() && riscv_vlen() >= 128)
return &rv64i_zvkned_ccm;
else if (RISCV_HAS_ZKND_AND_ZKNE())
return &rv64i_zknd_zkne_ccm;
else
return &aes_ccm;
}
......@@ -31,9 +31,40 @@ static const PROV_GCM_HW rv64i_zknd_zkne_gcm = {
ossl_gcm_one_shot
};
/*-
* RISC-V RV64 ZVKNED support for AES GCM.
* This file is included by cipher_aes_gcm_hw.c
*/
static int rv64i_zvkned_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key,
size_t keylen)
{
PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
AES_KEY *ks = &actx->ks.ks;
/* Zvkned only supports 128 and 256 bit keys. */
if (keylen * 8 == 128 || keylen * 8 == 256) {
GCM_HW_SET_KEY_CTR_FN(ks, rv64i_zvkned_set_encrypt_key,
rv64i_zvkned_encrypt, NULL);
} else {
GCM_HW_SET_KEY_CTR_FN(ks, AES_set_encrypt_key, AES_encrypt, NULL);
}
return 1;
}
static const PROV_GCM_HW rv64i_zvkned_gcm = {
rv64i_zvkned_gcm_initkey,
ossl_gcm_setiv,
ossl_gcm_aad_update,
generic_aes_gcm_cipher_update,
ossl_gcm_cipher_final,
ossl_gcm_one_shot
};
const PROV_GCM_HW *ossl_prov_aes_hw_gcm(size_t keybits)
{
if (RISCV_HAS_ZKND_AND_ZKNE())
if (RISCV_HAS_ZVKNED() && riscv_vlen() >= 128)
return &rv64i_zvkned_gcm;
else if (RISCV_HAS_ZKND_AND_ZKNE())
return &rv64i_zknd_zkne_gcm;
else
return &aes_gcm;
......
......@@ -48,12 +48,77 @@ static int cipher_hw_rv64i_zknd_zkne_initkey(PROV_CIPHER_CTX *dat,
return 1;
}
/*-
* RISC-V RV64 ZVKNED support for AES modes ecb, cbc, ofb, cfb, ctr.
* This file is included by cipher_aes_hw.c
*/
#define cipher_hw_rv64i_zvkned_cbc ossl_cipher_hw_generic_cbc
#define cipher_hw_rv64i_zvkned_ecb ossl_cipher_hw_generic_ecb
#define cipher_hw_rv64i_zvkned_ofb128 ossl_cipher_hw_generic_ofb128
#define cipher_hw_rv64i_zvkned_cfb128 ossl_cipher_hw_generic_cfb128
#define cipher_hw_rv64i_zvkned_cfb8 ossl_cipher_hw_generic_cfb8
#define cipher_hw_rv64i_zvkned_cfb1 ossl_cipher_hw_generic_cfb1
#define cipher_hw_rv64i_zvkned_ctr ossl_cipher_hw_generic_ctr
static int cipher_hw_rv64i_zvkned_initkey(PROV_CIPHER_CTX *dat,
const unsigned char *key,
size_t keylen)
{
int ret;
PROV_AES_CTX *adat = (PROV_AES_CTX *)dat;
AES_KEY *ks = &adat->ks.ks;
dat->ks = ks;
/* Zvkned only supports 128 and 256 bit keys. */
if (keylen * 8 == 128 || keylen * 8 == 256) {
if ((dat->mode == EVP_CIPH_ECB_MODE || dat->mode == EVP_CIPH_CBC_MODE)
&& !dat->enc) {
ret = rv64i_zvkned_set_decrypt_key(key, keylen * 8, ks);
dat->block = (block128_f) rv64i_zvkned_decrypt;
dat->stream.cbc = NULL;
} else {
ret = rv64i_zvkned_set_encrypt_key(key, keylen * 8, ks);
dat->block = (block128_f) rv64i_zvkned_encrypt;
dat->stream.cbc = NULL;
}
} else {
if ((dat->mode == EVP_CIPH_ECB_MODE || dat->mode == EVP_CIPH_CBC_MODE)
&& !dat->enc) {
ret = AES_set_decrypt_key(key, keylen * 8, ks);
dat->block = (block128_f)AES_decrypt;
dat->stream.cbc = (dat->mode == EVP_CIPH_CBC_MODE)
? (cbc128_f)AES_cbc_encrypt : NULL;
} else {
ret = AES_set_encrypt_key(key, keylen * 8, ks);
dat->block = (block128_f)AES_encrypt;
dat->stream.cbc = (dat->mode == EVP_CIPH_CBC_MODE)
? (cbc128_f)AES_cbc_encrypt : NULL;
}
}
if (ret < 0) {
ERR_raise(ERR_LIB_PROV, PROV_R_KEY_SETUP_FAILED);
return 0;
}
return 1;
}
#define PROV_CIPHER_HW_declare(mode) \
static const PROV_CIPHER_HW rv64i_zknd_zkne_##mode = { \
cipher_hw_rv64i_zknd_zkne_initkey, \
cipher_hw_rv64i_zknd_zkne_##mode, \
cipher_hw_aes_copyctx \
}; \
static const PROV_CIPHER_HW rv64i_zvkned_##mode = { \
cipher_hw_rv64i_zvkned_initkey, \
cipher_hw_rv64i_zvkned_##mode, \
cipher_hw_aes_copyctx \
};
#define PROV_CIPHER_HW_select(mode) \
if (RISCV_HAS_ZKND_AND_ZKNE()) \
if (RISCV_HAS_ZVKNED() && riscv_vlen() >= 128) \
return &rv64i_zvkned_##mode; \
else if (RISCV_HAS_ZKND_AND_ZKNE()) \
return &rv64i_zknd_zkne_##mode;
......@@ -117,13 +117,38 @@ static int cipher_hw_aes_ocb_rv64i_zknd_zkne_initkey(PROV_CIPHER_CTX *vctx,
return 1;
}
static int cipher_hw_aes_ocb_rv64i_zvkned_initkey(PROV_CIPHER_CTX *vctx,
const unsigned char *key,
size_t keylen)
{
PROV_AES_OCB_CTX *ctx = (PROV_AES_OCB_CTX *)vctx;
/* Zvkned only supports 128 and 256 bit keys. */
if (keylen * 8 == 128 || keylen * 8 == 256) {
OCB_SET_KEY_FN(rv64i_zvkned_set_encrypt_key,
rv64i_zvkned_set_decrypt_key,
rv64i_zvkned_encrypt, rv64i_zvkned_decrypt,
NULL, NULL);
} else {
OCB_SET_KEY_FN(AES_set_encrypt_key, AES_set_decrypt_key,
AES_encrypt, AES_decrypt, NULL, NULL);
}
return 1;
}
# define PROV_CIPHER_HW_declare() \
static const PROV_CIPHER_HW aes_rv64i_zknd_zkne_ocb = { \
cipher_hw_aes_ocb_rv64i_zknd_zkne_initkey, \
NULL \
}; \
static const PROV_CIPHER_HW aes_rv64i_zvkned_ocb = { \
cipher_hw_aes_ocb_rv64i_zvkned_initkey, \
NULL \
};
# define PROV_CIPHER_HW_select() \
if (RISCV_HAS_ZKND_AND_ZKNE()) \
if (RISCV_HAS_ZVKNED() && riscv_vlen() >= 128) \
return &aes_rv64i_zvkned_ocb; \
else if (RISCV_HAS_ZKND_AND_ZKNE()) \
return &aes_rv64i_zknd_zkne_ocb;
#elif defined(__riscv) && __riscv_xlen == 32
......
......@@ -175,15 +175,45 @@ static int cipher_hw_aes_xts_rv64i_zknd_zkne_initkey(PROV_CIPHER_CTX *ctx,
return 1;
}
static int cipher_hw_aes_xts_rv64i_zvkned_initkey(PROV_CIPHER_CTX *ctx,
const unsigned char *key,
size_t keylen)
{
PROV_AES_XTS_CTX *xctx = (PROV_AES_XTS_CTX *)ctx;
OSSL_xts_stream_fn stream_enc = NULL;
OSSL_xts_stream_fn stream_dec = NULL;
/* Zvkned only supports 128 and 256 bit keys. */
if (keylen * 8 == 128 || keylen * 8 == 256) {
XTS_SET_KEY_FN(rv64i_zvkned_set_encrypt_key,
rv64i_zvkned_set_decrypt_key,
rv64i_zvkned_encrypt, rv64i_zvkned_decrypt,
stream_enc, stream_dec);
} else {
XTS_SET_KEY_FN(AES_set_encrypt_key, AES_set_decrypt_key,
AES_encrypt, AES_decrypt,
stream_enc, stream_dec);
}
return 1;
}
# define PROV_CIPHER_HW_declare_xts() \
static const PROV_CIPHER_HW aes_xts_rv64i_zknd_zkne = { \
cipher_hw_aes_xts_rv64i_zknd_zkne_initkey, \
NULL, \
cipher_hw_aes_xts_copyctx \
};
static const PROV_CIPHER_HW aes_xts_rv64i_zvkned = { \
cipher_hw_aes_xts_rv64i_zvkned_initkey, \
NULL, \
cipher_hw_aes_xts_copyctx \
};
# define PROV_CIPHER_HW_select_xts() \
if (RISCV_HAS_ZKND_AND_ZKNE()) \
return &aes_xts_rv64i_zknd_zkne;
if (RISCV_HAS_ZVKNED() && riscv_vlen() >= 128) \
return &aes_xts_rv64i_zvkned; \
else if (RISCV_HAS_ZKND_AND_ZKNE()) \
return &aes_xts_rv64i_zknd_zkne; \
#elif defined(__riscv) && __riscv_xlen == 32
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册