riscv: Support sm3 on platforms with vlen >= 128.

This patch updates the OSSSL_HWSM3_block_data_order_zvksh and enables
SM3 on platforms with VLEN >= 128.

Signed-off-by: Jerry Shih <jerry.shih@sifive.com>
Signed-off-by: Phoebe Chen <phoebe.chen@sifive.com>

Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
Reviewed-by: Hugo Landau <hlandau@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/21923)
This commit is contained in:
Jerry Shih 2023-09-11 11:30:52 +08:00 committed by Hugo Landau
parent fbe6348363
commit 1c25bc2e3f
2 changed files with 90 additions and 85 deletions

View File

@ -11,6 +11,7 @@
# or # or
# #
# Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu> # Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
# Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@ -36,9 +37,9 @@
# The generated code of this file depends on the following RISC-V extensions: # The generated code of this file depends on the following RISC-V extensions:
# - RV64I # - RV64I
# - RISC-V vector ('V') with VLEN >= 256 # - RISC-V Vector ('V') with VLEN >= 128
# - Vector Bit-manipulation used in Cryptography ('Zvbb') # - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
# - ShangMi Suite: SM3 Secure Hash ('Zvksh') # - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
use strict; use strict;
use warnings; use warnings;
@ -63,7 +64,11 @@ ___
# ossl_hwsm3_block_data_order_zvksh(SM3_CTX *c, const void *p, size_t num); # ossl_hwsm3_block_data_order_zvksh(SM3_CTX *c, const void *p, size_t num);
{ {
my ($CTX, $INPUT, $NUM) = ("a0", "a1", "a2"); my ($CTX, $INPUT, $NUM) = ("a0", "a1", "a2");
my ($V0, $V1, $V2, $V3, $V4) = ("v0", "v1", "v2", "v3", "v4"); my ($V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7,
$V8, $V9, $V10, $V11, $V12, $V13, $V14, $V15,
$V16, $V17, $V18, $V19, $V20, $V21, $V22, $V23,
$V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31,
) = map("v$_",(0..31));
$code .= <<___; $code .= <<___;
.text .text
@ -71,142 +76,142 @@ $code .= <<___;
.globl ossl_hwsm3_block_data_order_zvksh .globl ossl_hwsm3_block_data_order_zvksh
.type ossl_hwsm3_block_data_order_zvksh,\@function .type ossl_hwsm3_block_data_order_zvksh,\@function
ossl_hwsm3_block_data_order_zvksh: ossl_hwsm3_block_data_order_zvksh:
@{[vsetivli__x0_8_e32_m1_tu_mu]} @{[vsetivli "zero", 8, "e32", "m2", "ta", "ma"]}
# Load initial state of hash context (c->A-H). # Load initial state of hash context (c->A-H).
@{[vle32_v $V0, $CTX]} @{[vle32_v $V0, $CTX]}
@{[vrev8_v $V0, $V0]} @{[vrev8_v $V0, $V0]}
L_sm3_loop: L_sm3_loop:
# Copy the previous state to v1. # Copy the previous state to v2.
# It will be XOR'ed with the current state at the end of the round. # It will be XOR'ed with the current state at the end of the round.
@{[vmv_v_v $V1, $V0]} @{[vmv_v_v $V2, $V0]}
# Load the 64B block in 2x32B chunks. # Load the 64B block in 2x32B chunks.
@{[vle32_v $V3, $INPUT]} # v3 := {w7, ..., w0} @{[vle32_v $V6, $INPUT]} # v6 := {w7, ..., w0}
add $INPUT, $INPUT, 32 addi $INPUT, $INPUT, 32
@{[vle32_v $V4, $INPUT]} # v4 := {w15, ..., w8} @{[vle32_v $V8, $INPUT]} # v8 := {w15, ..., w8}
add $INPUT, $INPUT, 32 addi $INPUT, $INPUT, 32
add $NUM, $NUM, -1 addi $NUM, $NUM, -1
# As vsm3c consumes only w0, w1, w4, w5 we need to slide the input # As vsm3c consumes only w0, w1, w4, w5 we need to slide the input
# 2 elements down so we process elements w2, w3, w6, w7 # 2 elements down so we process elements w2, w3, w6, w7
# This will be repeated for each odd round. # This will be repeated for each odd round.
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w7, ..., w2} @{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w7, ..., w2}
@{[vsm3c_vi $V0, $V3, 0]} @{[vsm3c_vi $V0, $V6, 0]}
@{[vsm3c_vi $V0, $V2, 1]} @{[vsm3c_vi $V0, $V4, 1]}
# Prepare a vector with {w11, ..., w4} # Prepare a vector with {w11, ..., w4}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w7, ..., w4} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w7, ..., w4}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w11, w10, w9, w8, w7, w6, w5, w4} @{[vslideup_vi $V4, $V8, 4]} # v4 := {w11, w10, w9, w8, w7, w6, w5, w4}
@{[vsm3c_vi $V0, $V2, 2]} @{[vsm3c_vi $V0, $V4, 2]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w11, w10, w9, w8, w7, w6} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w11, w10, w9, w8, w7, w6}
@{[vsm3c_vi $V0, $V2, 3]} @{[vsm3c_vi $V0, $V4, 3]}
@{[vsm3c_vi $V0, $V4, 4]} @{[vsm3c_vi $V0, $V8, 4]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {X, X, w15, w14, w13, w12, w11, w10} @{[vslidedown_vi $V4, $V8, 2]} # v4 := {X, X, w15, w14, w13, w12, w11, w10}
@{[vsm3c_vi $V0, $V2, 5]} @{[vsm3c_vi $V0, $V4, 5]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w23, w22, w21, w20, w19, w18, w17, w16} @{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w23, w22, w21, w20, w19, w18, w17, w16}
# Prepare a register with {w19, w18, w17, w16, w15, w14, w13, w12} # Prepare a register with {w19, w18, w17, w16, w15, w14, w13, w12}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w15, w14, w13, w12} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w15, w14, w13, w12}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w19, w18, w17, w16, w15, w14, w13, w12} @{[vslideup_vi $V4, $V6, 4]} # v4 := {w19, w18, w17, w16, w15, w14, w13, w12}
@{[vsm3c_vi $V0, $V2, 6]} @{[vsm3c_vi $V0, $V4, 6]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w19, w18, w17, w16, w15, w14} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w19, w18, w17, w16, w15, w14}
@{[vsm3c_vi $V0, $V2, 7]} @{[vsm3c_vi $V0, $V4, 7]}
@{[vsm3c_vi $V0, $V3, 8]} @{[vsm3c_vi $V0, $V6, 8]}
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w23, w22, w21, w20, w19, w18} @{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w23, w22, w21, w20, w19, w18}
@{[vsm3c_vi $V0, $V2, 9]} @{[vsm3c_vi $V0, $V4, 9]}
@{[vsm3me_vv $V4, $V3, $V4]} # v4 := {w31, w30, w29, w28, w27, w26, w25, w24} @{[vsm3me_vv $V8, $V6, $V8]} # v8 := {w31, w30, w29, w28, w27, w26, w25, w24}
# Prepare a register with {w27, w26, w25, w24, w23, w22, w21, w20} # Prepare a register with {w27, w26, w25, w24, w23, w22, w21, w20}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w23, w22, w21, w20} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w23, w22, w21, w20}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w27, w26, w25, w24, w23, w22, w21, w20} @{[vslideup_vi $V4, $V8, 4]} # v4 := {w27, w26, w25, w24, w23, w22, w21, w20}
@{[vsm3c_vi $V0, $V2, 10]} @{[vsm3c_vi $V0, $V4, 10]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w27, w26, w25, w24, w23, w22} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w27, w26, w25, w24, w23, w22}
@{[vsm3c_vi $V0, $V2, 11]} @{[vsm3c_vi $V0, $V4, 11]}
@{[vsm3c_vi $V0, $V4, 12]} @{[vsm3c_vi $V0, $V8, 12]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {x, X, w31, w30, w29, w28, w27, w26} @{[vslidedown_vi $V4, $V8, 2]} # v4 := {x, X, w31, w30, w29, w28, w27, w26}
@{[vsm3c_vi $V0, $V2, 13]} @{[vsm3c_vi $V0, $V4, 13]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w32, w33, w34, w35, w36, w37, w38, w39} @{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w32, w33, w34, w35, w36, w37, w38, w39}
# Prepare a register with {w35, w34, w33, w32, w31, w30, w29, w28} # Prepare a register with {w35, w34, w33, w32, w31, w30, w29, w28}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w31, w30, w29, w28} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w31, w30, w29, w28}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w35, w34, w33, w32, w31, w30, w29, w28} @{[vslideup_vi $V4, $V6, 4]} # v4 := {w35, w34, w33, w32, w31, w30, w29, w28}
@{[vsm3c_vi $V0, $V2, 14]} @{[vsm3c_vi $V0, $V4, 14]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w35, w34, w33, w32, w31, w30} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w35, w34, w33, w32, w31, w30}
@{[vsm3c_vi $V0, $V2, 15]} @{[vsm3c_vi $V0, $V4, 15]}
@{[vsm3c_vi $V0, $V3, 16]} @{[vsm3c_vi $V0, $V6, 16]}
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w39, w38, w37, w36, w35, w34} @{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w39, w38, w37, w36, w35, w34}
@{[vsm3c_vi $V0, $V2, 17]} @{[vsm3c_vi $V0, $V4, 17]}
@{[vsm3me_vv $V4, $V3, $V4]} # v4 := {w47, w46, w45, w44, w43, w42, w41, w40} @{[vsm3me_vv $V8, $V6, $V8]} # v8 := {w47, w46, w45, w44, w43, w42, w41, w40}
# Prepare a register with {w43, w42, w41, w40, w39, w38, w37, w36} # Prepare a register with {w43, w42, w41, w40, w39, w38, w37, w36}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w39, w38, w37, w36} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w39, w38, w37, w36}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w43, w42, w41, w40, w39, w38, w37, w36} @{[vslideup_vi $V4, $V8, 4]} # v4 := {w43, w42, w41, w40, w39, w38, w37, w36}
@{[vsm3c_vi $V0, $V2, 18]} @{[vsm3c_vi $V0, $V4, 18]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w43, w42, w41, w40, w39, w38} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w43, w42, w41, w40, w39, w38}
@{[vsm3c_vi $V0, $V2, 19]} @{[vsm3c_vi $V0, $V4, 19]}
@{[vsm3c_vi $V0, $V4, 20]} @{[vsm3c_vi $V0, $V8, 20]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {X, X, w47, w46, w45, w44, w43, w42} @{[vslidedown_vi $V4, $V8, 2]} # v4 := {X, X, w47, w46, w45, w44, w43, w42}
@{[vsm3c_vi $V0, $V2, 21]} @{[vsm3c_vi $V0, $V4, 21]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w55, w54, w53, w52, w51, w50, w49, w48} @{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w55, w54, w53, w52, w51, w50, w49, w48}
# Prepare a register with {w51, w50, w49, w48, w47, w46, w45, w44} # Prepare a register with {w51, w50, w49, w48, w47, w46, w45, w44}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w47, w46, w45, w44} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w47, w46, w45, w44}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w51, w50, w49, w48, w47, w46, w45, w44} @{[vslideup_vi $V4, $V6, 4]} # v4 := {w51, w50, w49, w48, w47, w46, w45, w44}
@{[vsm3c_vi $V0, $V2, 22]} @{[vsm3c_vi $V0, $V4, 22]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w51, w50, w49, w48, w47, w46} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w51, w50, w49, w48, w47, w46}
@{[vsm3c_vi $V0, $V2, 23]} @{[vsm3c_vi $V0, $V4, 23]}
@{[vsm3c_vi $V0, $V3, 24]} @{[vsm3c_vi $V0, $V6, 24]}
@{[vslidedown_vi $V2, $V3, 2]} # v2 := {X, X, w55, w54, w53, w52, w51, w50} @{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w55, w54, w53, w52, w51, w50}
@{[vsm3c_vi $V0, $V2, 25]} @{[vsm3c_vi $V0, $V4, 25]}
@{[vsm3me_vv $V4, $V3, $V4]} # v4 := {w63, w62, w61, w60, w59, w58, w57, w56} @{[vsm3me_vv $V8, $V6, $V8]} # v8 := {w63, w62, w61, w60, w59, w58, w57, w56}
# Prepare a register with {w59, w58, w57, w56, w55, w54, w53, w52} # Prepare a register with {w59, w58, w57, w56, w55, w54, w53, w52}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w55, w54, w53, w52} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w55, w54, w53, w52}
@{[vslideup_vi $V2, $V4, 4]} # v2 := {w59, w58, w57, w56, w55, w54, w53, w52} @{[vslideup_vi $V4, $V8, 4]} # v4 := {w59, w58, w57, w56, w55, w54, w53, w52}
@{[vsm3c_vi $V0, $V2, 26]} @{[vsm3c_vi $V0, $V4, 26]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w59, w58, w57, w56, w55, w54} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w59, w58, w57, w56, w55, w54}
@{[vsm3c_vi $V0, $V2, 27]} @{[vsm3c_vi $V0, $V4, 27]}
@{[vsm3c_vi $V0, $V4, 28]} @{[vsm3c_vi $V0, $V8, 28]}
@{[vslidedown_vi $V2, $V4, 2]} # v2 := {X, X, w63, w62, w61, w60, w59, w58} @{[vslidedown_vi $V4, $V8, 2]} # v4 := {X, X, w63, w62, w61, w60, w59, w58}
@{[vsm3c_vi $V0, $V2, 29]} @{[vsm3c_vi $V0, $V4, 29]}
@{[vsm3me_vv $V3, $V4, $V3]} # v3 := {w71, w70, w69, w68, w67, w66, w65, w64} @{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w71, w70, w69, w68, w67, w66, w65, w64}
# Prepare a register with {w67, w66, w65, w64, w63, w62, w61, w60} # Prepare a register with {w67, w66, w65, w64, w63, w62, w61, w60}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, X, X, w63, w62, w61, w60} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w63, w62, w61, w60}
@{[vslideup_vi $V2, $V3, 4]} # v2 := {w67, w66, w65, w64, w63, w62, w61, w60} @{[vslideup_vi $V4, $V6, 4]} # v4 := {w67, w66, w65, w64, w63, w62, w61, w60}
@{[vsm3c_vi $V0, $V2, 30]} @{[vsm3c_vi $V0, $V4, 30]}
@{[vslidedown_vi $V2, $V2, 2]} # v2 := {X, X, w67, w66, w65, w64, w63, w62} @{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w67, w66, w65, w64, w63, w62}
@{[vsm3c_vi $V0, $V2, 31]} @{[vsm3c_vi $V0, $V4, 31]}
# XOR in the previous state. # XOR in the previous state.
@{[vxor_vv $V0, $V0, $V1]} @{[vxor_vv $V0, $V0, $V2]}
bnez $NUM, L_sm3_loop # Check if there are any more block to process bnez $NUM, L_sm3_loop # Check if there are any more block to process
L_sm3_end: L_sm3_end:

View File

@ -21,7 +21,7 @@ void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num);
void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num) void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num)
{ {
if (RISCV_HAS_ZVBB_AND_ZVKSH() && riscv_vlen() >= 256) { if (RISCV_HAS_ZVKB_AND_ZVKSH() && riscv_vlen() >= 128) {
ossl_hwsm3_block_data_order_zvksh(c, p, num); ossl_hwsm3_block_data_order_zvksh(c, p, num);
} else { } else {
ossl_sm3_block_data_order(c, p, num); ossl_sm3_block_data_order(c, p, num);