C64x+ assembler pack. linux-c64xplus build is *not* tested nor can it be

tested, because kernel is not in shape to handle it *yet*. The code is
committed mostly to stimulate the kernel development.
This commit is contained in:
Andy Polyakov 2012-04-18 13:01:36 +00:00
parent d3ddf0228e
commit 3e181369dd
10 changed files with 3399 additions and 0 deletions

View File

@ -399,6 +399,9 @@ my %table=(
"linux-alpha+bwx-gcc","gcc:-O3 -DL_ENDIAN -DTERMIO::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL:${alpha_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"linux-alpha-ccc","ccc:-fast -readonly_strings -DL_ENDIAN -DTERMIO::-D_REENTRANT:::SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL:${alpha_asm}",
"linux-alpha+bwx-ccc","ccc:-fast -readonly_strings -DL_ENDIAN -DTERMIO::-D_REENTRANT:::SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_PTR DES_RISC1 DES_UNROLL:${alpha_asm}",
#
# TI_CGT_C6000_7.3.x is a requirement
"linux-c64xplus","cl6x:--linux --strip_coff_underscore -ea=.s -eo=.o -mv6400+ -o2 -ox -ms -pden -DOPENSSL_SMALL_FOOTPRINT::-D_REENTRANT:::BN_LLONG:c64xpluscpuid.o:bn-c64xplus.o c64xplus-gf2m.o::aes-c64xplus.o aes_cbc.o aes_ctr.o:::sha1-c64xplus.o sha256-c64xplus.o sha512-c64xplus.o:::::::ghash-c64xplus.o::void:dlfcn:linux-shared:--pic:-z --sysv --shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR):true",
# Android: linux-* but without -DTERMIO and pointers to headers and libs.
"android","gcc:-mandroid -I\$(ANDROID_DEV)/include -B\$(ANDROID_DEV)/lib -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",

33
TABLE
View File

@ -3927,6 +3927,39 @@ $ranlib =
$arflags =
$multilib =
*** linux-c64xplus
$cc = cl6x
$cflags = --linux --strip_coff_underscore -ea=.s -eo=.o -mv6400+ -o2 -ox -ms -pden -DOPENSSL_SMALL_FOOTPRINT
$unistd =
$thread_cflag = -D_REENTRANT
$sys_id =
$lflags =
$bn_ops = BN_LLONG
$cpuid_obj = c64xpluscpuid.o
$bn_obj = bn-c64xplus.o c64xplus-gf2m.o
$des_obj =
$aes_obj = aes-c64xplus.o aes_cbc.o aes_ctr.o
$bf_obj =
$md5_obj =
$sha1_obj = sha1-c64xplus.o sha256-c64xplus.o sha512-c64xplus.o
$cast_obj =
$rc4_obj =
$rmd160_obj =
$rc5_obj =
$wp_obj =
$cmll_obj =
$modes_obj = ghash-c64xplus.o
$engines_obj =
$perlasm_scheme = void
$dso_scheme = dlfcn
$shared_target= linux-shared
$shared_cflag = --pic
$shared_ldflag = -z --sysv --shared
$shared_extension = .so.$(SHLIB_MAJOR).$(SHLIB_MINOR)
$ranlib = true
$arflags =
$multilib =
*** linux-elf
$cc = gcc
$cflags = -DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,333 @@
;;====================================================================
;; Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
;; project.
;;
;; Rights for redistribution and usage in source and binary forms are
;; granted according to the OpenSSL license. Warranty of any kind is
;; disclaimed.
;;====================================================================
;; Compiler-generated multiply-n-add SPLOOP runs at 12*n cycles, n
;; being the number of 32-bit words, addition - 8*n. Corresponding 4x
;; unrolled SPLOOP-free loops - at ~8*n and ~5*n. Below assembler
;; SPLOOPs spin at ... 2*n cycles [plus epilogue].
;;====================================================================
.text
.asg B3,RA
.asg A4,ARG0
.asg B4,ARG1
.asg A6,ARG2
.asg B6,ARG3
.asg A8,ARG4
.asg B8,ARG5
.asg A4,RET
.asg A15,FP
.asg B14,DP
.asg B15,SP
.global _bn_mul_add_words
_bn_mul_add_words:
.asmfunc
MV ARG2,B0
[!B0] BNOP RA
||[!B0] MVK 0,RET
[B0] MVC B0,ILC
[B0] ZERO A19 ; high part of accumulator
|| [B0] MV ARG0,A2
|| [B0] MV ARG3,A3
NOP 3
SPLOOP 2 ; 2*n+10
;;====================================================================
LDW *ARG1++,B7 ; ap[i]
NOP 3
LDW *ARG0++,A7 ; rp[i]
MPY32U B7,A3,A17:A16
NOP 3 ; [2,0] in epilogue
ADDU A16,A7,A21:A20
ADDU A19,A21:A20,A19:A18
|| MV.S A17,A23
SPKERNEL 2,1 ; leave slot for "return value"
|| STW A18,*A2++ ; rp[i]
|| ADD A19,A23,A19
;;====================================================================
BNOP RA,4
MV A19,RET ; return value
.endasmfunc
.global _bn_mul_words
_bn_mul_words:
.asmfunc
MV ARG2,B0
[!B0] BNOP RA
||[!B0] MVK 0,RET
[B0] MVC B0,ILC
[B0] ZERO A19 ; high part of accumulator
NOP 3
SPLOOP 2 ; 2*n+10
;;====================================================================
LDW *ARG1++,A7 ; ap[i]
NOP 4
MPY32U A7,ARG3,A17:A16
NOP 4 ; [2,0] in epiloque
ADDU A19,A16,A19:A18
|| MV.S A17,A21
SPKERNEL 2,1 ; leave slot for "return value"
|| STW A18,*ARG0++ ; rp[i]
|| ADD.L A19,A21,A19
;;====================================================================
BNOP RA,4
MV A19,RET ; return value
.endasmfunc
.global _bn_sqr_words
_bn_sqr_words:
.asmfunc
MV ARG2,B0
[!B0] BNOP RA
||[!B0] MVK 0,RET
[B0] MVC B0,ILC
[B0] MV ARG0,B2
|| [B0] ADD 4,ARG0,ARG0
NOP 3
SPLOOP 2 ; 2*n+10
;;====================================================================
LDW *ARG1++,B7 ; ap[i]
NOP 4
MPY32U B7,B7,B1:B0
NOP 3 ; [2,0] in epilogue
STW B0,*B2++(8) ; rp[2*i]
MV B1,A1
SPKERNEL 2,0 ; fully overlap BNOP RA,5
|| STW A1,*ARG0++(8) ; rp[2*i+1]
;;====================================================================
BNOP RA,5
.endasmfunc
.global _bn_add_words
_bn_add_words:
.asmfunc
MV ARG3,B0
[!B0] BNOP RA
||[!B0] MVK 0,RET
[B0] MVC B0,ILC
[B0] ZERO A1 ; carry flag
|| [B0] MV ARG0,A3
NOP 3
SPLOOP 2 ; 2*n+6
;;====================================================================
LDW *ARG2++,A7 ; bp[i]
|| LDW *ARG1++,B7 ; ap[i]
NOP 4
ADDU A7,B7,A9:A8
ADDU A1,A9:A8,A1:A0
SPKERNEL 0,0 ; fully overlap BNOP RA,5
|| STW A0,*A3++ ; write result
|| MV A1,RET ; keep carry flag in RET
;;====================================================================
BNOP RA,5
.endasmfunc
.global _bn_sub_words
_bn_sub_words:
.asmfunc
MV ARG3,B0
[!B0] BNOP RA
||[!B0] MVK 0,RET
[B0] MVC B0,ILC
[B0] ZERO A2 ; borrow flag
|| [B0] MV ARG0,A3
NOP 3
SPLOOP 2 ; 2*n+6
;;====================================================================
LDW *ARG2++,A7 ; bp[i]
|| LDW *ARG1++,B7 ; ap[i]
NOP 4
SUBU B7,A7,A1:A0
[A2] SUB A1:A0,1,A1:A0
SPKERNEL 0,1 ; leave slot for "return borrow flag"
|| STW A0,*A3++ ; write result
|| AND 1,A1,A2 ; pass on borrow flag
;;====================================================================
BNOP RA,4
AND 1,A1,RET ; return borrow flag
.endasmfunc
.global _bn_div_words
.global __divull
_bn_div_words:
.asmfunc
CALLP __divull,A3 ; jump to rts64plus.lib
|| MV ARG0,A5
|| MV ARG1,ARG0
|| MV ARG2,ARG1
|| ZERO B5
.endasmfunc
;;====================================================================
;; Not really Comba algorithm, just straightforward NxM... Dedicated
;; fully unrolled real Comba implementations are asymptotically 2x
;; faster, but naturally larger undertaking. Purpose of this exercise
;; was rather to learn to master nested SPLOOPs...
;;====================================================================
.global _bn_sqr_comba8
.global _bn_mul_comba8
_bn_sqr_comba8:
MV ARG1,ARG2
_bn_mul_comba8:
.asmfunc
MVK 8,B0 ; N, RILC
|| MVK 8,A0 ; M, outer loop counter
|| MV ARG1,A5 ; copy ap
|| MV ARG0,B4 ; copy rp
|| ZERO B19 ; high part of accumulator
MVC B0,RILC
|| SUB B0,2,B1 ; N-2, initial ILC
|| SUB B0,1,B2 ; const B2=N-1
|| LDW *A5++,B6 ; ap[0]
|| MV A0,A3 ; const A3=M
sploopNxM?: ; for best performance arrange M<=N
[A0] SPLOOPD 2 ; 2*n+10
|| MVC B1,ILC
|| ADDAW B4,B0,B5
|| ZERO B7
|| LDW *A5++,A9 ; pre-fetch ap[1]
|| ZERO A1
|| SUB A0,1,A0
;;====================================================================
;; SPLOOP from bn_mul_add_words, but with flipped A<>B register files.
;; This is because of Advisory 15 from TI publication SPRZ247I.
LDW *ARG2++,A7 ; bp[i]
NOP 3
[A1] LDW *B5++,B7 ; rp[i]
MPY32U A7,B6,B17:B16
NOP 3
ADDU B16,B7,B21:B20
ADDU B19,B21:B20,B19:B18
|| MV.S B17,B23
SPKERNEL
|| STW B18,*B4++ ; rp[i]
|| ADD.S B19,B23,B19
;;====================================================================
outer?: ; m*2*(n+1)+10
SUBAW ARG2,A3,ARG2 ; rewind bp to bp[0]
SPMASKR
|| CMPGT A0,1,A2 ; done pre-fetching ap[i+1]?
MVD A9,B6 ; move through .M unit(*)
[A2] LDW *A5++,A9 ; pre-fetch ap[i+1]
SUBAW B5,B2,B5 ; rewind rp to rp[1]
MVK 1,A1
[A0] BNOP.S1 outer?,4
|| [A0] SUB.L A0,1,A0
STW B19,*B4--[B2] ; rewind rp tp rp[1]
|| ZERO.S B19 ; high part of accumulator
;; end of outer?
BNOP RA,5 ; return
.endasmfunc
;; (*) It should be noted that B6 is used as input to MPY32U in
;; chronologically next cycle in *preceding* SPLOOP iteration.
;; Normally such arrangement would require DINT, but at this
;; point SPLOOP is draining and interrupts are disabled
;; implicitly.
.global _bn_sqr_comba4
.global _bn_mul_comba4
_bn_sqr_comba4:
MV ARG1,ARG2
_bn_mul_comba4:
.asmfunc
.if 0
BNOP sploopNxM?,3
;; Above mentioned m*2*(n+1)+10 does not apply in n=m=4 case,
;; because of read-after-write penalties, it's rather
;; n*2*(n+3)+10, or 66 cycles [plus various overheads]...
MVK 4,B0 ; N, RILC
|| MVK 4,A0 ; M, outer loop counter
|| MV ARG1,A5 ; copy ap
|| MV ARG0,B4 ; copy rp
|| ZERO B19 ; high part of accumulator
MVC B0,RILC
|| SUB B0,2,B1 ; first ILC
|| SUB B0,1,B2 ; const B2=N-1
|| LDW *A5++,B6 ; ap[0]
|| MV A0,A3 ; const A3=M
.else
;; This alternative is exercise in fully unrolled Comba
;; algorithm implementation that operates at n*(n+1)+12, or
;; as little as 32 cycles...
LDW *ARG1[0],B16 ; a[0]
|| LDW *ARG2[0],A16 ; b[0]
LDW *ARG1[1],B17 ; a[1]
|| LDW *ARG2[1],A17 ; b[1]
LDW *ARG1[2],B18 ; a[2]
|| LDW *ARG2[2],A18 ; b[2]
LDW *ARG1[3],B19 ; a[3]
|| LDW *ARG2[3],A19 ; b[3]
NOP
MPY32U A16,B16,A1:A0 ; a[0]*b[0]
MPY32U A17,B16,A23:A22 ; a[0]*b[1]
MPY32U A16,B17,A25:A24 ; a[1]*b[0]
MPY32U A16,B18,A27:A26 ; a[2]*b[0]
STW A0,*ARG0[0]
|| MPY32U A17,B17,A29:A28 ; a[1]*b[1]
MPY32U A18,B16,A31:A30 ; a[0]*b[2]
|| ADDU A22,A1,A1:A0
MV A23,B0
|| MPY32U A19,B16,A21:A20 ; a[3]*b[0]
|| ADDU A24,A1:A0,A1:A0
ADDU A25,B0,B1:B0
|| STW A0,*ARG0[1]
|| MPY32U A18,B17,A23:A22 ; a[2]*b[1]
|| ADDU A26,A1,A9:A8
ADDU A27,B1,B9:B8
|| MPY32U A17,B18,A25:A24 ; a[1]*b[2]
|| ADDU A28,A9:A8,A9:A8
ADDU A29,B9:B8,B9:B8
|| MPY32U A16,B19,A27:A26 ; a[0]*b[3]
|| ADDU A30,A9:A8,A9:A8
ADDU A31,B9:B8,B9:B8
|| ADDU B0,A9:A8,A9:A8
STW A8,*ARG0[2]
|| ADDU A20,A9,A1:A0
ADDU A21,B9,B1:B0
|| MPY32U A19,B17,A21:A20 ; a[3]*b[1]
|| ADDU A22,A1:A0,A1:A0
ADDU A23,B1:B0,B1:B0
|| MPY32U A18,B18,A23:A22 ; a[2]*b[2]
|| ADDU A24,A1:A0,A1:A0
ADDU A25,B1:B0,B1:B0
|| MPY32U A17,B19,A25:A24 ; a[1]*b[3]
|| ADDU A26,A1:A0,A1:A0
ADDU A27,B1:B0,B1:B0
|| ADDU B8,A1:A0,A1:A0
STW A0,*ARG0[3]
|| MPY32U A19,B18,A27:A26 ; a[3]*b[2]
|| ADDU A20,A1,A9:A8
ADDU A21,B1,B9:B8
|| MPY32U A18,B19,A29:A28 ; a[2]*b[3]
|| ADDU A22,A9:A8,A9:A8
ADDU A23,B9:B8,B9:B8
|| MPY32U A19,B19,A31:A30 ; a[3]*b[3]
|| ADDU A24,A9:A8,A9:A8
ADDU A25,B9:B8,B9:B8
|| ADDU B0,A9:A8,A9:A8
STW A8,*ARG0[4]
|| ADDU A26,A9,A1:A0
ADDU A27,B9,B1:B0
|| ADDU A28,A1:A0,A1:A0
ADDU A29,B1:B0,B1:B0
|| BNOP RA
|| ADDU B8,A1:A0,A1:A0
STW A0,*ARG0[5]
|| ADDU A30,A1,A9:A8
ADD A31,B1,B8
ADDU B0,A9:A8,A9:A8 ; removed || to avoid cross-path stall below
ADD B8,A9,A9
|| STW A8,*ARG0[6]
STW A9,*ARG0[7]
.endif
.endasmfunc

View File

@ -0,0 +1,146 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# February 2012
#
# The module implements bn_GF2m_mul_2x2 polynomial multiplication
# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
# C for the time being... The subroutine runs in 37 cycles, which is
# 4.5x faster than compiler-generated code. Though comparison is
# totally unfair, because this module utilizes Galois Field Multiply
# instruction.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($rp,$a1,$a0,$b1,$b0)=("A4","B4","A6","B6","A8"); # argument vector
($Alo,$Alox0,$Alox1,$Alox2,$Alox3)=map("A$_",(16..20));
($Ahi,$Ahix0,$Ahix1,$Ahix2,$Ahix3)=map("B$_",(16..20));
($B_0,$B_1,$B_2,$B_3)=("B5","A5","A7","B7");
($A,$B)=($Alo,$B_1);
$xFF="B1";
sub mul_1x1_upper {
my ($A,$B)=@_;
$code.=<<___;
EXTU $B,8,24,$B_2 ; smash $B to 4 bytes
|| AND $B,$xFF,$B_0
|| SHRU $B,24,$B_3
SHRU $A,16, $Ahi ; smash $A to two halfwords
|| EXTU $A,16,16,$Alo
XORMPY $Alo,$B_2,$Alox2 ; 16x8 bits muliplication
|| XORMPY $Ahi,$B_2,$Ahix2
|| EXTU $B,16,24,$B_1
XORMPY $Alo,$B_0,$Alox0
|| XORMPY $Ahi,$B_0,$Ahix0
XORMPY $Alo,$B_3,$Alox3
|| XORMPY $Ahi,$B_3,$Ahix3
XORMPY $Alo,$B_1,$Alox1
|| XORMPY $Ahi,$B_1,$Ahix1
___
}
sub mul_1x1_merged {
my ($OUTlo,$OUThi,$A,$B)=@_;
$code.=<<___;
EXTU $B,8,24,$B_2 ; smash $B to 4 bytes
|| AND $B,$xFF,$B_0
|| SHRU $B,24,$B_3
SHRU $A,16, $Ahi ; smash $A to two halfwords
|| EXTU $A,16,16,$Alo
XOR $Ahix0,$Alox2,$Ahix0
|| MV $Ahix2,$OUThi
|| XORMPY $Alo,$B_2,$Alox2
XORMPY $Ahi,$B_2,$Ahix2
|| EXTU $B,16,24,$B_1
|| XORMPY $Alo,$B_0,A1 ; $Alox0
XOR $Ahix1,$Alox3,$Ahix1
|| SHL $Ahix0,16,$OUTlo
|| SHRU $Ahix0,16,$Ahix0
XOR $Alox0,$OUTlo,$OUTlo
|| XOR $Ahix0,$OUThi,$OUThi
|| XORMPY $Ahi,$B_0,$Ahix0
|| XORMPY $Alo,$B_3,$Alox3
|| SHL $Alox1,8,$Alox1
|| SHL $Ahix3,8,$Ahix3
XOR $Alox1,$OUTlo,$OUTlo
|| XOR $Ahix3,$OUThi,$OUThi
|| XORMPY $Ahi,$B_3,$Ahix3
|| SHL $Ahix1,24,$Alox1
|| SHRU $Ahix1,8, $Ahix1
XOR $Alox1,$OUTlo,$OUTlo
|| XOR $Ahix1,$OUThi,$OUThi
|| XORMPY $Alo,$B_1,$Alox1
|| XORMPY $Ahi,$B_1,$Ahix1
|| MV A1,$Alox0
___
}
sub mul_1x1_lower {
my ($OUTlo,$OUThi)=@_;
$code.=<<___;
;NOP
XOR $Ahix0,$Alox2,$Ahix0
|| MV $Ahix2,$OUThi
NOP
XOR $Ahix1,$Alox3,$Ahix1
|| SHL $Ahix0,16,$OUTlo
|| SHRU $Ahix0,16,$Ahix0
XOR $Alox0,$OUTlo,$OUTlo
|| XOR $Ahix0,$OUThi,$OUThi
|| SHL $Alox1,8,$Alox1
|| SHL $Ahix3,8,$Ahix3
XOR $Alox1,$OUTlo,$OUTlo
|| XOR $Ahix3,$OUThi,$OUThi
|| SHL $Ahix1,24,$Alox1
|| SHRU $Ahix1,8, $Ahix1
XOR $Alox1,$OUTlo,$OUTlo
|| XOR $Ahix1,$OUThi,$OUThi
___
}
$code.=<<___;
.text
.global _bn_GF2m_mul_2x2
_bn_GF2m_mul_2x2:
.asmfunc
MVK 0xFF,$xFF
___
&mul_1x1_upper($a0,$b0); # a0·b0
$code.=<<___;
|| MV $b1,$B
MV $a1,$A
___
&mul_1x1_merged("A28","B28",$A,$B); # a0·b0/a1·b1
$code.=<<___;
|| XOR $b0,$b1,$B
XOR $a0,$a1,$A
___
&mul_1x1_merged("A31","B31",$A,$B); # a1·b1/(a0+a1)·(b0+b1)
$code.=<<___;
XOR A28,A31,A29
|| XOR B28,B31,B29 ; a0·b0+a1·b1
___
&mul_1x1_lower("A30","B30"); # (a0+a1)·(b0+b1)
$code.=<<___;
|| BNOP B3
XOR A29,A30,A30
|| XOR B29,B30,B30 ; (a0+a1)·(b0+b1)-a0·b0-a1·b1
XOR B28,A30,A30
|| STW A28,*${rp}[0]
XOR B30,A31,A31
|| STW A30,*${rp}[1]
STW A31,*${rp}[2]
STW B31,*${rp}[3]
.endasmfunc
___
print $code;
close STDOUT;

246
crypto/c64xpluscpuid.pl Normal file
View File

@ -0,0 +1,246 @@
#!/usr/bin/env perl
#
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
$code.=<<___;
.text
.asg B3,RA
.global _OPENSSL_rdtsc
_OPENSSL_rdtsc:
.asmfunc
B RA
MVC TSCL,B0
MVC TSCH,B1
[!B0] MVC B0,TSCL ; start TSC
MV B0,A4
MV B1,A5
.endasmfunc
.global _OPENSSL_cleanse
_OPENSSL_cleanse:
.asmfunc
ZERO A3:A2
|| ZERO B2
|| SHRU B4,3,B0 ; is length >= 8
|| ADD 1,A4,B6
[!B0] BNOP RA
|| ZERO A1
|| ZERO B1
[B0] MVC B0,ILC
||[!B0] CMPLT 0,B4,A1
||[!B0] CMPLT 1,B4,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B2,*B6++[2]
||[!B0] CMPLT 2,B4,A1
||[!B0] CMPLT 3,B4,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B2,*B6++[2]
||[!B0] CMPLT 4,B4,A1
||[!B0] CMPLT 5,B4,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B2,*B6++[2]
||[!B0] CMPLT 6,B4,A1
[A1] STB A2,*A4++[2]
SPLOOP 1
STNDW A3:A2,*A4++
|| SUB B4,8,B4
SPKERNEL
MV B4,B0 ; remaining bytes
|| ADD 1,A4,B6
|| BNOP RA
[B0] CMPLT 0,B0,A1
|| [B0] CMPLT 1,B0,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B2,*B6++[2]
|| [B0] CMPLT 2,B0,A1
|| [B0] CMPLT 3,B0,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B2,*B6++[2]
|| [B0] CMPLT 4,B0,A1
|| [B0] CMPLT 5,B0,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B2,*B6++[2]
|| [B0] CMPLT 6,B0,A1
[A1] STB A2,*A4++[2]
.endasmfunc
.global _OPENSSL_atomic_add
_OPENSSL_atomic_add:
.asmfunc
MV A4,B0
atomic_add?:
LL *B0,B5
NOP 4
ADD B4,B5,B5
SL B5,*B0
CMTL *B0,B1
NOP 4
[!B1] B atomic_add?
[B1] BNOP RA,4
MV B5,A4
.endasmfunc
.global _OPENSSL_wipe_cpu
_OPENSSL_wipe_cpu:
.asmfunc
ZERO A0
|| ZERO B0
|| ZERO A1
|| ZERO B1
ZERO A3:A2
|| MVD B0,B2
|| ZERO A4
|| ZERO B4
|| ZERO A5
|| ZERO B5
|| BNOP RA
ZERO A7:A6
|| ZERO B7:B6
|| ZERO A8
|| ZERO B8
|| ZERO A9
|| ZERO B9
ZERO A17:A16
|| ZERO B17:B16
|| ZERO A18
|| ZERO B18
|| ZERO A19
|| ZERO B19
ZERO A21:A20
|| ZERO B21:B20
|| ZERO A22
|| ZERO B22
|| ZERO A23
|| ZERO B23
ZERO A25:A24
|| ZERO B25:B24
|| ZERO A26
|| ZERO B26
|| ZERO A27
|| ZERO B27
ZERO A29:A28
|| ZERO B29:B28
|| ZERO A30
|| ZERO B30
|| ZERO A31
|| ZERO B31
.endasmfunc
CLFLUSH .macro CONTROL,ADDR,LEN
B passthrough?
|| STW ADDR,*CONTROL[0]
STW LEN,*CONTROL[1]
spinlock?:
LDW *CONTROL[1],A0
NOP 3
passthrough?:
NOP
[A0] BNOP spinlock?,5
.endm
.global _OPENSSL_instrument_bus
_OPENSSL_instrument_bus:
.asmfunc
MV B4,B0 ; reassign sizeof(output)
|| MV A4,B4 ; reassign output
|| MVK 0x00004030,A3
MV B0,A4 ; return value
|| MVK 1,A1
|| MVKH 0x01840000,A3 ; L1DWIBAR
MVC TSCL,B8 ; collect 1st tick
|| MVK 0x00004010,A5
MV B8,B9 ; lasttick = tick
|| MVK 0,B7 ; lastdiff = 0
|| MVKH 0x01840000,A5 ; L2WIBAR
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LL *B4,B5
NOP 4
ADD B7,B5,B5
SL B5,*B4
CMTL *B4,B1
NOP 4
STW B5,*B4
bus_loop1?:
MVC TSCL,B8
|| [B0] SUB B0,1,B0
SUB B8,B9,B7 ; lastdiff = tick - lasttick
|| MV B8,B9 ; lasttick = tick
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LL *B4,B5
NOP 4
ADD B7,B5,B5
SL B5,*B4
CMTL *B4,B1
STW B5,*B4 ; [!B1] is removed to flatten samples
|| ADDK 4,B4
|| [B0] BNOP bus_loop1?,5
BNOP RA,5
.endasmfunc
.global _OPENSSL_instrument_bus2
_OPENSSL_instrument_bus2:
.asmfunc
MV A6,B0 ; reassign max
|| MV B4,A6 ; reassing sizeof(output)
|| MVK 0x00004030,A3
MV A4,B4 ; reassign output
|| MVK 0,A4 ; return value
|| MVK 1,A1
|| MVKH 0x01840000,A3 ; L1DWIBAR
MVC TSCL,B8 ; collect 1st tick
|| MVK 0x00004010,A5
MV B8,B9 ; lasttick = tick
|| MVK 0,B7 ; lastdiff = 0
|| MVKH 0x01840000,A5 ; L2WIBAR
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LL *B4,B5
NOP 4
ADD B7,B5,B5
SL B5,*B4
CMTL *B4,B1
NOP 4
STW B5,*B4
MVC TSCL,B8 ; collect 1st diff
SUB B8,B9,B7 ; lastdiff = tick - lasttick
|| MV B8,B9 ; lasttick = tick
|| SUB B0,1,B0
bus_loop2?:
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LL *B4,B5
NOP 4
ADD B7,B5,B5
SL B5,*B4
CMTL *B4,B1
STW B5,*B4 ; [!B1] is removed to flatten samples
||[!B0] BNOP bus_loop2_done?,2
|| SUB B0,1,B0
MVC TSCL,B8
SUB B8,B9,B8
|| MV B8,B9
CMPEQ B8,B7,B2
|| MV B8,B7
[!B2] ADDAW B4,1,B4
||[!B2] ADDK 1,A4
CMPEQ A4,A6,A2
[!A2] BNOP bus_loop2?,5
bus_loop2_done?:
BNOP RA,5
.endasmfunc
___
print $code;
close STDOUT;

View File

@ -0,0 +1,231 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# December 2011
#
# The module implements GCM GHASH function and underlying single
# multiplication operation in GF(2^128). Even though subroutines
# have _4bit suffix, they are not using any tables, but rely on
# hardware Galois Field Multiply support. Streamed GHASH processes
# byte in ~7 cycles, which is >6x faster than "4-bit" table-driven
# code compiled with TI's cl6x 6.0 with -mv6400+ -o2 flags. We are
# comparing apples vs. oranges, but compiler surely could have done
# better, because theoretical [though not necessarily achievable]
# estimate for "4-bit" table-driven implementation is ~12 cycles.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($Xip,$Htable,$inp,$len)=("A4","B4","A6","B6"); # arguments
($Z0,$Z1,$Z2,$Z3, $H0, $H1, $H2, $H3,
$H0x,$H1x,$H2x,$H3x)=map("A$_",(16..27));
($H01u,$H01y,$H2u,$H3u, $H0y,$H1y,$H2y,$H3y,
$H0z,$H1z,$H2z,$H3z)=map("B$_",(16..27));
($FF000000,$E10000)=("B30","B31");
($xip,$x0,$x1,$xib)=map("B$_",(6..9)); # $xip zaps $len
$xia="A9";
($rem,$res)=("B4","B5"); # $rem zaps $Htable
$code.=<<___;
.text
.asg B3,RA
.if 0
.global _gcm_gmult_1bit
_gcm_gmult_1bit:
ADDAD $Htable,2,$Htable
.endif
.global _gcm_gmult_4bit
_gcm_gmult_4bit:
.asmfunc
LDDW *${Htable}[-1],$H1:$H0 ; H.lo
LDDW *${Htable}[-2],$H3:$H2 ; H.hi
|| MV $Xip,${xip} ; reassign Xi
|| MVK 15,B1 ; SPLOOPD constant
MVK 0xE1,$E10000
|| LDBU *++${xip}[15],$x1 ; Xi[15]
MVK 0xFF,$FF000000
|| LDBU *--${xip},$x0 ; Xi[14]
SHL $E10000,16,$E10000 ; [pre-shifted] reduction polynomial
SHL $FF000000,24,$FF000000 ; upper byte mask
|| BNOP ghash_loop?
|| MVK 1,B0 ; take a single spin
PACKH2 $H0,$H1,$xia ; pack H0' and H1's upper bytes
AND $H2,$FF000000,$H2u ; H2's upper byte
AND $H3,$FF000000,$H3u ; H3's upper byte
|| SHRU $H2u,8,$H2u
SHRU $H3u,8,$H3u
|| ZERO $Z1:$Z0
SHRU2 $xia,8,$H01u
|| ZERO $Z3:$Z2
.endasmfunc
.global _gcm_ghash_4bit
_gcm_ghash_4bit:
.asmfunc
LDDW *${Htable}[-1],$H1:$H0 ; H.lo
|| SHRU $len,4,B0 ; reassign len
LDDW *${Htable}[-2],$H3:$H2 ; H.hi
|| MV $Xip,${xip} ; reassign Xi
|| MVK 15,B1 ; SPLOOPD constant
MVK 0xE1,$E10000
|| [B0] LDNDW *${inp}[1],$H1x:$H0x
MVK 0xFF,$FF000000
|| [B0] LDNDW *${inp}++[2],$H3x:$H2x
SHL $E10000,16,$E10000 ; [pre-shifted] reduction polynomial
|| LDDW *${xip}[1],$Z1:$Z0
SHL $FF000000,24,$FF000000 ; upper byte mask
|| LDDW *${xip}[0],$Z3:$Z2
PACKH2 $H0,$H1,$xia ; pack H0' and H1's upper bytes
AND $H2,$FF000000,$H2u ; H2's upper byte
AND $H3,$FF000000,$H3u ; H3's upper byte
|| SHRU $H2u,8,$H2u
SHRU $H3u,8,$H3u
SHRU2 $xia,8,$H01u
|| [B0] XOR $H0x,$Z0,$Z0 ; Xi^=inp
|| [B0] XOR $H1x,$Z1,$Z1
.if .LITTLE_ENDIAN
[B0] XOR $H2x,$Z2,$Z2
|| [B0] XOR $H3x,$Z3,$Z3
|| [B0] SHRU $Z1,24,$xia ; Xi[15], avoid cross-path stall
STDW $Z1:$Z0,*${xip}[1]
|| [B0] SHRU $Z1,16,$x0 ; Xi[14]
|| [B0] ZERO $Z1:$Z0
.else
[B0] XOR $H2x,$Z2,$Z2
|| [B0] XOR $H3x,$Z3,$Z3
|| [B0] MV $Z0,$xia ; Xi[15], avoid cross-path stall
STDW $Z1:$Z0,*${xip}[1]
|| [B0] SHRU $Z0,8,$x0 ; Xi[14]
|| [B0] ZERO $Z1:$Z0
.endif
STDW $Z3:$Z2,*${xip}[0]
|| [B0] ZERO $Z3:$Z2
|| [B0] MV $xia,$x1
[B0] ADDK 14,${xip}
ghash_loop?:
SPLOOPD 6 ; 6*16+7
|| MVC B1,ILC
|| [B0] SUB B0,1,B0
|| ZERO A0
|| ADD $x1,$x1,$xib ; SHL $x1,1,$xib
|| SHL $x1,1,$xia
___
########____________________________
# 0 D2. M1 M2 |
# 1 M1 |
# 2 M1 M2 |
# 3 D1. M1 M2 |
# 4 S1. L1 |
# 5 S2 S1x L1 D2 L2 |____________________________
# 6/0 L1 S1 L2 S2x |D2. M1 M2 |
# 7/1 L1 S1 D1x S2 M2 | M1 |
# 8/2 S1 L1x S2 | M1 M2 |
# 9/3 S1 L1x | D1. M1 M2 |
# 10/4 D1x | S1. L1 |
# 11/5 |S2 S1x L1 D2 L2 |____________
# 12/6/0 D1x __| L1 S1 L2 S2x |D2. ....
# 7/1 L1 S1 D1x S2 M2 | ....
# 8/2 S1 L1x S2 | ....
#####... ................|............
$code.=<<___;
XORMPY $H0,$xia,$H0x ; 0 ; H·Xi[i]
|| XORMPY $H01u,$xib,$H01y
|| [A0] LDBU *--${xip},$x0
XORMPY $H1,$xia,$H1x ; 1
XORMPY $H2,$xia,$H2x ; 2
|| XORMPY $H2u,$xib,$H2y
XORMPY $H3,$xia,$H3x ; 3
|| XORMPY $H3u,$xib,$H3y
||[!A0] MVK.D 15,A0 ; *--${xip} counter
XOR.L $H0x,$Z0,$Z0 ; 4 ; Z^=H·Xi[i]
|| [A0] SUB.S A0,1,A0
XOR.L $H1x,$Z1,$Z1 ; 5
|| AND.D $H01y,$FF000000,$H0z
|| SWAP2.L $H01y,$H1y ; ; SHL $H01y,16,$H1y
|| SHL $x0,1,$xib
|| SHL $x0,1,$xia
XOR.L $H2x,$Z2,$Z2 ; 6/0 ; [0,0] in epilogue
|| SHL $Z0,1,$rem ; ; rem=Z<<1
|| SHRMB.S $Z1,$Z0,$Z0 ; ; Z>>=8
|| AND.L $H1y,$FF000000,$H1z
XOR.L $H3x,$Z3,$Z3 ; 7/1
|| SHRMB.S $Z2,$Z1,$Z1
|| XOR.D $H0z,$Z0,$Z0 ; merge upper byte products
|| AND.S $H2y,$FF000000,$H2z
|| XORMPY $E10000,$rem,$res ; ; implicit rem&0x1FE
XOR.L $H1z,$Z1,$Z1 ; 8/2
|| SHRMB.S $Z3,$Z2,$Z2
|| AND.S $H3y,$FF000000,$H3z
XOR.L $H2z,$Z2,$Z2 ; 9/3
|| SHRU $Z3,8,$Z3
XOR.D $H3z,$Z3,$Z3 ; 10/4
NOP ; 11/5
SPKERNEL 0,2
|| XOR.D $res,$Z3,$Z3 ; 12/6/0; Z^=res
; input pre-fetch is possible where D1 slot is available...
[B0] LDNDW *${inp}[1],$H1x:$H0x ; 8/-
[B0] LDNDW *${inp}++[2],$H3x:$H2x ; 9/-
NOP ; 10/-
.if .LITTLE_ENDIAN
SWAP2 $Z0,$Z1 ; 11/-
|| SWAP4 $Z1,$Z0
SWAP4 $Z1,$Z1 ; 12/-
|| SWAP2 $Z0,$Z0
SWAP2 $Z2,$Z3
|| SWAP4 $Z3,$Z2
||[!B0] BNOP RA
SWAP4 $Z3,$Z3
|| SWAP2 $Z2,$Z2
|| [B0] BNOP ghash_loop?
[B0] XOR $H0x,$Z0,$Z0 ; Xi^=inp
|| [B0] XOR $H1x,$Z1,$Z1
[B0] XOR $H2x,$Z2,$Z2
|| [B0] XOR $H3x,$Z3,$Z3
|| [B0] SHRU $Z1,24,$xia ; Xi[15], avoid cross-path stall
STDW $Z1:$Z0,*${xip}[1]
|| [B0] SHRU $Z1,16,$x0 ; Xi[14]
|| [B0] ZERO $Z1:$Z0
.else
[!B0] BNOP RA ; 11/-
[B0] BNOP ghash_loop? ; 12/-
[B0] XOR $H0x,$Z0,$Z0 ; Xi^=inp
|| [B0] XOR $H1x,$Z1,$Z1
[B0] XOR $H2x,$Z2,$Z2
|| [B0] XOR $H3x,$Z3,$Z3
|| [B0] MV $Z0,$xia ; Xi[15], avoid cross-path stall
STDW $Z1:$Z0,*${xip}[1]
|| [B0] SHRU $Z0,8,$x0 ; Xi[14]
|| [B0] ZERO $Z1:$Z0
.endif
STDW $Z3:$Z2,*${xip}[0]
|| [B0] ZERO $Z3:$Z2
|| [B0] MV $xia,$x1
[B0] ADDK 14,${xip}
.endasmfunc
.sect .const
.cstring "GHASH for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT;

View File

@ -0,0 +1,323 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# SHA1 for C64x+.
#
# November 2011
#
# If compared to compiler-generated code with similar characteristics,
# i.e. compiled with OPENSSL_SMALL_FOOTPRINT and utilizing SPLOOPs,
# this implementation is 25% smaller and >2x faster. In absolute terms
# performance is (quite impressive) ~6.5 cycles per processed byte.
# Fully unrolled assembler would be ~5x larger and is likely to be
# ~15% faster. It would be free from references to intermediate ring
# buffer, but put more pressure on L1P [both because the code would be
# larger and won't be using SPLOOP buffer]. There are no plans to
# realize fully unrolled variant though...
#
# !!! Note that this module uses AMR, which means that all interrupt
# service routines are expected to preserve it and for own well-being
# zero it upon entry.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($CTX,$INP,$NUM) = ("A4","B4","A6"); # arguments
($A,$B,$C,$D,$E, $Arot,$F,$F0,$T,$K) = map("A$_",(16..20, 21..25));
($X0,$X2,$X8,$X13) = ("A26","B26","A27","B27");
($TX0,$TX1,$TX2,$TX3) = map("B$_",(28..31));
($XPA,$XPB) = ("A5","B5"); # X circular buffer
($Actx,$Bctx,$Cctx,$Dctx,$Ectx) = map("A$_",(3,6..9)); # zaps $NUM
$code=<<___;
.text
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .BIG_ENDIAN
.asg MV,SWAP2
.asg MV,SWAP4
.endif
.global _sha1_block_data_order
_sha1_block_data_order:
.asmfunc stack_usage(64)
MV $NUM,A0 ; reassign $NUM
|| MVK -64,B0
[!A0] BNOP RA ; if ($NUM==0) return;
|| [A0] STW FP,*SP--[16] ; save frame pointer and alloca(64)
|| [A0] MV SP,FP
[A0] LDW *${CTX}[0],$A ; load A-E...
|| [A0] AND B0,SP,SP ; align stack at 64 bytes
[A0] LDW *${CTX}[1],$B
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
[A0] LDW *${CTX}[2],$C
|| [A0] MVK 0x00404,B0
[A0] LDW *${CTX}[3],$D
|| [A0] MVKH 0x50000,B0 ; 0x050404, 64 bytes for $XP[AB]
[A0] LDW *${CTX}[4],$E
|| [A0] MVC B0,AMR ; setup circular addressing
LDNW *${INP}++,$TX1 ; pre-fetch input
NOP 1
loop?:
MVK 0x00007999,$K
|| ADDAW SP,2,$XPA
|| SUB A0,1,A0
|| MVK 13,B0
MVKH 0x5a820000,$K ; K_00_19
|| ADDAW SP,2,$XPB
|| MV $A,$Actx
|| MV $B,$Bctx
;;==================================================
SPLOOPD 5 ; BODY_00_13
|| MV $C,$Cctx
|| MV $D,$Dctx
|| MV $E,$Ectx
|| MVC B0,ILC
ROTL $A,5,$Arot
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
|| SWAP2 $TX1,$TX2
|| LDNW *${INP}++,$TX1
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| SWAP4 $TX2,$TX3 ; byte swap
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
ADD $TX3,$T,$A ; A=T+Xi
|| STW $TX3,*${XPB}++
SPKERNEL
;;==================================================
ROTL $A,5,$Arot ; BODY_14
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
|| SWAP2 $TX1,$TX2
|| LDNW *${INP}++,$TX1
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| SWAP4 $TX2,$TX2 ; byte swap
|| LDW *${XPA}++,$X0 ; fetches from X ring buffer are
|| LDW *${XPB}[4],$X2 ; 2 iterations ahead
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
;;==================================================
ROTL $A,5,$Arot ; BODY_15
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
|| SWAP2 $TX1,$TX2
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| SWAP4 $TX2,$TX2 ; byte swap
|| XOR $X0,$X2,$TX0 ; Xupdate XORs are 1 iteration ahead
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
|| XOR $TX0,$TX1,$TX1
|| MVK 3,B0
;;==================================================
SPLOOPD 5 ; BODY_16_19
|| MVC B0,ILC
ROTL $A,5,$Arot
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
|| ROTL $TX1,1,$TX2 ; Xupdate output
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| XOR $X0,$X2,$TX0
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
|| XOR $TX0,$TX1,$TX1
SPKERNEL
MVK 0xffffeba1,$K
|| MVK 19,B0
MVKH 0x6ed90000,$K ; K_20_39
___
sub BODY_20_39 {
$code.=<<___;
;;==================================================
SPLOOPD 5 ; BODY_20_39
|| MVC B0,ILC
ROTL $A,5,$Arot
|| XOR $B,$C,$F
|| ADD $K,$E,$T ; T=E+K
|| ROTL $TX1,1,$TX2 ; Xupdate output
XOR $D,$F,$F ; F_20_39(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
ADD $F,$T,$T ; T+=F_20_39(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| XOR $X0,$X2,$TX0
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++ ; last one is redundant
|| XOR $TX0,$TX1,$TX1
SPKERNEL
___
$code.=<<___ if (!shift);
MVK 0xffffbcdc,$K
MVKH 0x8f1b0000,$K ; K_40_59
___
} &BODY_20_39();
$code.=<<___;
;;==================================================
SPLOOPD 5 ; BODY_40_59
|| MVC B0,ILC
|| AND $B,$C,$F
|| AND $B,$D,$F0
ROTL $A,5,$Arot
|| XOR $F0,$F,$F
|| AND $C,$D,$F0
|| ADD $K,$E,$T ; T=E+K
|| ROTL $TX1,1,$TX2 ; Xupdate output
XOR $F0,$F,$F ; F_40_59(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
ADD $F,$T,$T ; T+=F_40_59(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| XOR $X0,$X2,$TX0
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
|| XOR $TX0,$TX1,$TX1
|| AND $B,$C,$F
|| AND $B,$D,$F0
SPKERNEL
MVK 0xffffc1d6,$K
|| MVK 18,B0
MVKH 0xca620000,$K ; K_60_79
___
&BODY_20_39(-1); # BODY_60_78
$code.=<<___;
;;==================================================
[A0] B loop?
|| ROTL $A,5,$Arot ; BODY_79
|| XOR $B,$C,$F
|| ROTL $TX1,1,$TX2 ; Xupdate output
[A0] LDNW *${INP}++,$TX1 ; pre-fetch input
|| ADD $K,$E,$T ; T=E+K
|| XOR $D,$F,$F ; F_20_39(B,C,D)
ADD $F,$T,$T ; T+=F_20_39(B,C,D)
|| ADD $Ectx,$D,$E ; E=D,E+=Ectx
|| ADD $Dctx,$C,$D ; D=C,D+=Dctx
|| ROTL $B,30,$C ; C=ROL(B,30)
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| ADD $Bctx,$A,$B ; B=A,B+=Bctx
ADD $TX2,$T,$A ; A=T+Xi
ADD $Actx,$A,$A ; A+=Actx
|| ADD $Cctx,$C,$C ; C+=Cctx
;; end of loop?
BNOP RA ; return
|| MV FP,SP ; restore stack pointer
|| LDW *FP[0],FP ; restore frame pointer
STW $A,*${CTX}[0] ; emit A-E...
|| MVK 0,B0
STW $B,*${CTX}[1]
|| MVC B0,AMR ; clear AMR
STW $C,*${CTX}[2]
STW $D,*${CTX}[3]
STW $E,*${CTX}[4]
.endasmfunc
.sect .const
.cstring "SHA1 block transform for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT;

View File

@ -0,0 +1,302 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# SHA256 for C64x+.
#
# January 2012
#
# Performance is just below 10 cycles per processed byte, which is
# almost 40% faster than compiler-generated code. Unroll is unlikely
# to give more than ~8% improvement...
#
# !!! Note that this module uses AMR, which means that all interrupt
# service routines are expected to preserve it and for own well-being
# zero it upon entry.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($CTXA,$INP,$NUM) = ("A4","B4","A6"); # arguments
$K256="A3";
($A,$Actx,$B,$Bctx,$C,$Cctx,$D,$Dctx,$T2,$S0,$s1,$t0a,$t1a,$t2a,$X9,$X14)
=map("A$_",(16..31));
($E,$Ectx,$F,$Fctx,$G,$Gctx,$H,$Hctx,$T1,$S1,$s0,$t0e,$t1e,$t2e,$X1,$X15)
=map("B$_",(16..31));
($Xia,$Xib)=("A5","B5"); # circular/ring buffer
$CTXB=$t2e;
($Xn,$X0,$K)=("B7","B8","B9");
($Maj,$Ch)=($T2,"B6");
$code.=<<___;
.text
.if __TI_EABI__
.nocmp
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .BIG_ENDIAN
.asg SWAP2,MV
.asg SWAP4,MV
.endif
.global _sha256_block_data_order
_sha256_block_data_order:
.asmfunc stack_usage(64)
MV $NUM,A0 ; reassign $NUM
|| MVK -64,B0
[!A0] BNOP RA ; if ($NUM==0) return;
|| [A0] STW FP,*SP--[16] ; save frame pointer and alloca(64)
|| [A0] MV SP,FP
[A0] ADDKPC _sha256_block_data_order,B2
|| [A0] AND B0,SP,SP ; align stack at 64 bytes
.if __TI_EABI__
[A0] MVK 0x00404,B1
|| [A0] MVKL \$PCR_OFFSET(K256,_sha256_block_data_order),$K256
[A0] MVKH 0x50000,B1
|| [A0] MVKH \$PCR_OFFSET(K256,_sha256_block_data_order),$K256
.else
[A0] MVK 0x00404,B1
|| [A0] MVKL (K256-_sha256_block_data_order),$K256
[A0] MVKH 0x50000,B1
|| [A0] MVKH (K256-_sha256_block_data_order),$K256
.endif
[A0] MVC B1,AMR ; setup circular addressing
|| [A0] MV SP,$Xia
[A0] MV SP,$Xib
|| [A0] ADD B2,$K256,$K256
|| [A0] MV $CTXA,$CTXB
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
LDW *${CTXA}[0],$A ; load ctx
|| LDW *${CTXB}[4],$E
LDW *${CTXA}[1],$B
|| LDW *${CTXB}[5],$F
LDW *${CTXA}[2],$C
|| LDW *${CTXB}[6],$G
LDW *${CTXA}[3],$D
|| LDW *${CTXB}[7],$H
LDNW *$INP++,$Xn ; pre-fetch input
LDW *$K256++,$K ; pre-fetch K256[0]
MVK 14,B0 ; loop counters
MVK 47,B1
|| ADDAW $Xia,9,$Xia
outerloop?:
SUB A0,1,A0
|| MV $A,$Actx
|| MV $E,$Ectx
|| MVD $B,$Bctx
|| MVD $F,$Fctx
MV $C,$Cctx
|| MV $G,$Gctx
|| MVD $D,$Dctx
|| MVD $H,$Hctx
|| SWAP4 $Xn,$X0
SPLOOPD 8 ; BODY_00_14
|| MVC B0,ILC
|| SWAP2 $X0,$X0
LDNW *$INP++,$Xn
|| ROTL $A,30,$S0
|| OR $A,$B,$Maj
|| AND $A,$B,$t2a
|| ROTL $E,26,$S1
|| AND $F,$E,$Ch
|| ANDN $G,$E,$t2e
ROTL $A,19,$t0a
|| AND $C,$Maj,$Maj
|| ROTL $E,21,$t0e
|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
ROTL $A,10,$t1a
|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ROTL $E,7,$t1e
|| ADD $K,$H,$T1 ; T1 = h + K256[i]
ADD $X0,$T1,$T1 ; T1 += X[i];
|| STW $X0,*$Xib++
|| XOR $t0a,$S0,$S0
|| XOR $t0e,$S1,$S1
XOR $t1a,$S0,$S0 ; Sigma0(a)
|| XOR $t1e,$S1,$S1 ; Sigma1(e)
|| LDW *$K256++,$K ; pre-fetch K256[i+1]
|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
|| ROTL $G,0,$H ; h = g
|| MV $F,$G ; g = f
|| MV $X0,$X14
|| SWAP4 $Xn,$X0
SWAP2 $X0,$X0
|| MV $E,$F ; f = e
|| ADD $D,$T1,$E ; e = d + T1
|| MV $C,$D ; d = c
MV $B,$C ; c = b
|| MV $A,$B ; b = a
|| ADD $T1,$T2,$A ; a = T1 + T2
SPKERNEL
ROTL $A,30,$S0 ; BODY_15
|| OR $A,$B,$Maj
|| AND $A,$B,$t2a
|| ROTL $E,26,$S1
|| AND $F,$E,$Ch
|| ANDN $G,$E,$t2e
|| LDW *${Xib}[1],$Xn ; modulo-scheduled
ROTL $A,19,$t0a
|| AND $C,$Maj,$Maj
|| ROTL $E,21,$t0e
|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
|| LDW *${Xib}[2],$X1 ; modulo-scheduled
ROTL $A,10,$t1a
|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ROTL $E,7,$t1e
|| ADD $K,$H,$T1 ; T1 = h + K256[i]
ADD $X0,$T1,$T1 ; T1 += X[i];
|| STW $X0,*$Xib++
|| XOR $t0a,$S0,$S0
|| XOR $t0e,$S1,$S1
XOR $t1a,$S0,$S0 ; Sigma0(a)
|| XOR $t1e,$S1,$S1 ; Sigma1(e)
|| LDW *$K256++,$K ; pre-fetch K256[i+1]
|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
|| ROTL $G,0,$H ; h = g
|| MV $F,$G ; g = f
|| MV $X0,$X15
MV $E,$F ; f = e
|| ADD $D,$T1,$E ; e = d + T1
|| MV $C,$D ; d = c
|| MV $Xn,$X0 ; modulo-scheduled
|| LDW *$Xia,$X9 ; modulo-scheduled
|| ROTL $X1,25,$t0e ; modulo-scheduled
|| ROTL $X14,15,$t0a ; modulo-scheduled
SHRU $X1,3,$s0 ; modulo-scheduled
|| SHRU $X14,10,$s1 ; modulo-scheduled
|| ROTL $B,0,$C ; c = b
|| MV $A,$B ; b = a
|| ADD $T1,$T2,$A ; a = T1 + T2
SPLOOPD 10 ; BODY_16_63
|| MVC B1,ILC
|| ROTL $X1,14,$t1e ; modulo-scheduled
|| ROTL $X14,13,$t1a ; modulo-scheduled
XOR $t0e,$s0,$s0
|| XOR $t0a,$s1,$s1
|| MV $X15,$X14
|| MV $X1,$Xn
XOR $t1e,$s0,$s0 ; sigma0(X[i+1])
|| XOR $t1a,$s1,$s1 ; sigma1(X[i+14])
|| LDW *${Xib}[2],$X1 ; module-scheduled
ROTL $A,30,$S0
|| OR $A,$B,$Maj
|| AND $A,$B,$t2a
|| ROTL $E,26,$S1
|| AND $F,$E,$Ch
|| ANDN $G,$E,$t2e
|| ADD $X9,$X0,$X0 ; X[i] += X[i+9]
ROTL $A,19,$t0a
|| AND $C,$Maj,$Maj
|| ROTL $E,21,$t0e
|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
|| ADD $s0,$X0,$X0 ; X[i] += sigma1(X[i+1])
ROTL $A,10,$t1a
|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ROTL $E,7,$t1e
|| ADD $H,$K,$T1 ; T1 = h + K256[i]
|| ADD $s1,$X0,$X0 ; X[i] += sigma1(X[i+14])
XOR $t0a,$S0,$S0
|| XOR $t0e,$S1,$S1
|| ADD $X0,$T1,$T1 ; T1 += X[i]
|| STW $X0,*$Xib++
XOR $t1a,$S0,$S0 ; Sigma0(a)
|| XOR $t1e,$S1,$S1 ; Sigma1(e)
|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
|| MV $X0,$X15
|| ROTL $G,0,$H ; h = g
|| LDW *$K256++,$K ; pre-fetch K256[i+1]
ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
|| MV $F,$G ; g = f
|| MV $Xn,$X0 ; modulo-scheduled
|| LDW *++$Xia,$X9 ; modulo-scheduled
|| ROTL $X1,25,$t0e ; module-scheduled
|| ROTL $X14,15,$t0a ; modulo-scheduled
ROTL $X1,14,$t1e ; modulo-scheduled
|| ROTL $X14,13,$t1a ; modulo-scheduled
|| MV $E,$F ; f = e
|| ADD $D,$T1,$E ; e = d + T1
|| MV $C,$D ; d = c
|| MV $B,$C ; c = b
MV $A,$B ; b = a
|| ADD $T1,$T2,$A ; a = T1 + T2
|| SHRU $X1,3,$s0 ; modulo-scheduled
|| SHRU $X14,10,$s1 ; modulo-scheduled
SPKERNEL
[A0] B outerloop?
|| [A0] LDNW *$INP++,$Xn ; pre-fetch input
|| [A0] ADDK -260,$K256 ; rewind K256
|| ADD $Actx,$A,$A ; accumulate ctx
|| ADD $Ectx,$E,$E
|| ADD $Bctx,$B,$B
ADD $Fctx,$F,$F
|| ADD $Cctx,$C,$C
|| ADD $Gctx,$G,$G
|| ADD $Dctx,$D,$D
|| ADD $Hctx,$H,$H
|| [A0] LDW *$K256++,$K ; pre-fetch K256[0]
[!A0] BNOP RA
||[!A0] MV $CTXA,$CTXB
[!A0] MV FP,SP ; restore stack pointer
||[!A0] LDW *FP[0],FP ; restore frame pointer
[!A0] STW $A,*${CTXA}[0] ; save ctx
||[!A0] STW $E,*${CTXB}[4]
||[!A0] MVK 0,B0
[!A0] STW $B,*${CTXA}[1]
||[!A0] STW $F,*${CTXB}[5]
||[!A0] MVC B0,AMR ; clear AMR
STW $C,*${CTXA}[2]
|| STW $G,*${CTXB}[6]
STW $D,*${CTXA}[3]
|| STW $H,*${CTXB}[7]
.endasmfunc
.sect ".const:sha_asm"
.align 128
K256:
.uword 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
.uword 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
.uword 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
.uword 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
.uword 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
.uword 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
.uword 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
.uword 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
.uword 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
.uword 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
.uword 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
.uword 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
.uword 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
.uword 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
.uword 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
.uword 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
.cstring "SHA256 block transform for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;

View File

@ -0,0 +1,421 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# SHA512 for C64x+.
#
# January 2012
#
# Performance is 19 cycles per processed byte. Compared to block
# transform function from sha512.c compiled with cl6x with -mv6400+
# -o2 -DOPENSSL_SMALL_FOOTPRINT it's almost 7x faster and 2x smaller.
# Loop unroll won't make it, this implementation, any faster, because
# it's effectively dominated by SHRU||SHL pairs and you can't schedule
# more of them.
#
# !!! Note that this module uses AMR, which means that all interrupt
# service routines are expected to preserve it and for own well-being
# zero it upon entry.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($CTXA,$INP,$NUM) = ("A4","B4","A6"); # arguments
$K512="A3";
($Ahi,$Actxhi,$Bhi,$Bctxhi,$Chi,$Cctxhi,$Dhi,$Dctxhi,
$Ehi,$Ectxhi,$Fhi,$Fctxhi,$Ghi,$Gctxhi,$Hhi,$Hctxhi)=map("A$_",(16..31));
($Alo,$Actxlo,$Blo,$Bctxlo,$Clo,$Cctxlo,$Dlo,$Dctxlo,
$Elo,$Ectxlo,$Flo,$Fctxlo,$Glo,$Gctxlo,$Hlo,$Hctxlo)=map("B$_",(16..31));
($S1hi,$CHhi,$S0hi,$t0hi)=map("A$_",(10..13));
($S1lo,$CHlo,$S0lo,$t0lo)=map("B$_",(10..13));
($T1hi, $T2hi)= ("A6","A7");
($T1lo,$T1carry,$T2lo,$T2carry)=("B6","B7","B8","B9");
($Khi,$Klo)=("A9","A8");
($MAJhi,$MAJlo)=($T2hi,$T2lo);
($t1hi,$t1lo)=($Khi,"B2");
$CTXB=$t1lo;
($Xihi,$Xilo)=("A5","B5"); # circular/ring buffer
$code.=<<___;
.text
.if __TI_EABI__
.nocmp
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .BIG_ENDIAN
.asg $Khi,KHI
.asg $Klo,KLO
.else
.asg $Khi,KLO
.asg $Klo,KHI
.endif
.global _sha512_block_data_order
_sha512_block_data_order:
.asmfunc stack_usage(40+128)
MV $NUM,A0 ; reassign $NUM
|| MVK -128,B0
[!A0] BNOP RA ; if ($NUM==0) return;
|| [A0] STW FP,*SP--(40) ; save frame pointer
|| [A0] MV SP,FP
[A0] STDW B13:B12,*SP[4]
|| [A0] MVK 0x00404,B1
[A0] STDW B11:B10,*SP[3]
|| [A0] STDW A13:A12,*FP[-3]
|| [A0] MVKH 0x60000,B1
[A0] STDW A11:A10,*SP[1]
|| [A0] MVC B1,AMR ; setup circular addressing
|| [A0] ADD B0,SP,SP ; alloca(128)
.if __TI_EABI__
[A0] AND B0,SP,SP ; align stack at 128 bytes
|| [A0] ADDKPC _sha512_block_data_order,B1
|| [A0] MVKL \$PCR_OFFSET(K512,_sha512_block_data_order),$K512
[A0] MVKH \$PCR_OFFSET(K512,_sha512_block_data_order),$K512
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
.else
[A0] AND B0,SP,SP ; align stack at 128 bytes
|| [A0] ADDKPC _sha512_block_data_order,B1
|| [A0] MVKL (K512-_sha512_block_data_order),$K512
[A0] MVKH (K512-_sha512_block_data_order),$K512
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
.endif
ADDAW SP,3,$Xilo
ADDAW SP,2,$Xihi
|| MV $CTXA,$CTXB
LDW *${CTXA}[0^.LITTLE_ENDIAN],$Ahi ; load ctx
|| LDW *${CTXB}[1^.LITTLE_ENDIAN],$Alo
|| ADD B1,$K512,$K512
LDW *${CTXA}[2^.LITTLE_ENDIAN],$Bhi
|| LDW *${CTXB}[3^.LITTLE_ENDIAN],$Blo
LDW *${CTXA}[4^.LITTLE_ENDIAN],$Chi
|| LDW *${CTXB}[5^.LITTLE_ENDIAN],$Clo
LDW *${CTXA}[6^.LITTLE_ENDIAN],$Dhi
|| LDW *${CTXB}[7^.LITTLE_ENDIAN],$Dlo
LDW *${CTXA}[8^.LITTLE_ENDIAN],$Ehi
|| LDW *${CTXB}[9^.LITTLE_ENDIAN],$Elo
LDW *${CTXA}[10^.LITTLE_ENDIAN],$Fhi
|| LDW *${CTXB}[11^.LITTLE_ENDIAN],$Flo
LDW *${CTXA}[12^.LITTLE_ENDIAN],$Ghi
|| LDW *${CTXB}[13^.LITTLE_ENDIAN],$Glo
LDW *${CTXA}[14^.LITTLE_ENDIAN],$Hhi
|| LDW *${CTXB}[15^.LITTLE_ENDIAN],$Hlo
LDNDW *$INP++,B11:B10 ; pre-fetch input
LDDW *$K512++,$Khi:$Klo ; pre-fetch K512[0]
outerloop?:
MVK 15,B0 ; loop counters
|| MVK 64,B1
|| SUB A0,1,A0
MV $Ahi,$Actxhi
|| MV $Alo,$Actxlo
|| MV $Bhi,$Bctxhi
|| MV $Blo,$Bctxlo
|| MV $Chi,$Cctxhi
|| MV $Clo,$Cctxlo
|| MVD $Dhi,$Dctxhi
|| MVD $Dlo,$Dctxlo
MV $Ehi,$Ectxhi
|| MV $Elo,$Ectxlo
|| MV $Fhi,$Fctxhi
|| MV $Flo,$Fctxlo
|| MV $Ghi,$Gctxhi
|| MV $Glo,$Gctxlo
|| MVD $Hhi,$Hctxhi
|| MVD $Hlo,$Hctxlo
loop0_15?:
.if .BIG_ENDIAN
MV B11,$T1hi
|| MV B10,$T1lo
.else
SWAP4 B10,$T1hi
|| SWAP4 B11,$T1lo
SWAP2 $T1hi,$T1hi
|| SWAP2 $T1lo,$T1lo
.endif
loop16_79?:
STW $T1hi,*$Xihi++[2]
|| STW $T1lo,*$Xilo++[2] ; X[i] = T1
|| ADD $Hhi,$T1hi,$T1hi
|| ADDU $Hlo,$T1lo,$T1carry:$T1lo ; T1 += h
|| SHRU $Ehi,14,$S1hi
|| SHL $Ehi,32-14,$S1lo
XOR $Fhi,$Ghi,$CHhi
|| XOR $Flo,$Glo,$CHlo
|| ADD KHI,$T1hi,$T1hi
|| ADDU KLO,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += K512[i]
|| SHRU $Elo,14,$t0lo
|| SHL $Elo,32-14,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| AND $Ehi,$CHhi,$CHhi
|| AND $Elo,$CHlo,$CHlo
|| ROTL $Ghi,0,$Hhi
|| ROTL $Glo,0,$Hlo ; h = g
|| SHRU $Ehi,18,$t0hi
|| SHL $Ehi,32-18,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| XOR $Ghi,$CHhi,$CHhi
|| XOR $Glo,$CHlo,$CHlo ; Ch(e,f,g) = ((f^g)&e)^g
|| ROTL $Fhi,0,$Ghi
|| ROTL $Flo,0,$Glo ; g = f
|| SHRU $Elo,18,$t0lo
|| SHL $Elo,32-18,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| OR $Ahi,$Bhi,$MAJhi
|| OR $Alo,$Blo,$MAJlo
|| ROTL $Ehi,0,$Fhi
|| ROTL $Elo,0,$Flo ; f = e
|| SHRU $Ehi,41-32,$t0lo
|| SHL $Ehi,64-41,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| AND $Chi,$MAJhi,$MAJhi
|| AND $Clo,$MAJlo,$MAJlo
|| ROTL $Dhi,0,$Ehi
|| ROTL $Dlo,0,$Elo ; e = d
|| SHRU $Elo,41-32,$t0hi
|| SHL $Elo,64-41,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo ; Sigma1(e)
|| AND $Ahi,$Bhi,$t1hi
|| AND $Alo,$Blo,$t1lo
|| ROTL $Chi,0,$Dhi
|| ROTL $Clo,0,$Dlo ; d = c
|| SHRU $Ahi,28,$S0hi
|| SHL $Ahi,32-28,$S0lo
OR $t1hi,$MAJhi,$MAJhi
|| OR $t1lo,$MAJlo,$MAJlo ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ADD $CHhi,$T1hi,$T1hi
|| ADDU $CHlo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += Ch(e,f,g)
|| ROTL $Bhi,0,$Chi
|| ROTL $Blo,0,$Clo ; c = b
|| SHRU $Alo,28,$t0lo
|| SHL $Alo,32-28,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $S1hi,$T1hi,$T1hi
|| ADDU $S1lo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += Sigma1(e)
|| ROTL $Ahi,0,$Bhi
|| ROTL $Alo,0,$Blo ; b = a
|| SHRU $Ahi,34-32,$t0lo
|| SHL $Ahi,64-34,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $MAJhi,$T1hi,$T2hi
|| ADDU $MAJlo,$T1carry:$T1lo,$T2carry:$T2lo ; T2 = T1+Maj(a,b,c)
|| SHRU $Alo,34-32,$t0hi
|| SHL $Alo,64-34,$t0lo
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $Ehi,$T1hi,$T1hi
|| ADDU $Elo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += e
|| [B0] BNOP loop0_15?
|| SHRU $Ahi,39-32,$t0lo
|| SHL $Ahi,64-39,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| [B0] LDNDW *$INP++,B11:B10 ; pre-fetch input
||[!B1] BNOP break?
|| SHRU $Alo,39-32,$t0hi
|| SHL $Alo,64-39,$t0lo
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo ; Sigma0(a)
|| ADD $T1carry,$T1hi,$Ehi
|| MV $T1lo,$Elo ; e = T1
||[!B0] LDW *${Xihi}[28],$T1hi
||[!B0] LDW *${Xilo}[28],$T1lo ; X[i+14]
ADD $S0hi,$T2hi,$T2hi
|| ADDU $S0lo,$T2carry:$T2lo,$T2carry:$T2lo ; T2 += Sigma0(a)
|| [B1] LDDW *$K512++,$Khi:$Klo ; pre-fetch K512[i]
NOP ; avoid cross-path stall
ADD $T2carry,$T2hi,$Ahi
|| MV $T2lo,$Alo ; a = T2
|| [B0] SUB B0,1,B0
;;===== branch to loop00_15? is taken here
NOP
;;===== branch to break? is taken here
LDW *${Xihi}[2],$T2hi
|| LDW *${Xilo}[2],$T2lo ; X[i+1]
|| SHRU $T1hi,19,$S1hi
|| SHL $T1hi,32-19,$S1lo
SHRU $T1lo,19,$t0lo
|| SHL $T1lo,32-19,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1hi,61-32,$t0lo
|| SHL $T1hi,64-61,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1lo,61-32,$t0hi
|| SHL $T1lo,64-61,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1hi,6,$t0hi
|| SHL $T1hi,32-6,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1lo,6,$t0lo
|| LDW *${Xihi}[18],$T1hi
|| LDW *${Xilo}[18],$T1lo ; X[i+9]
XOR $t0lo,$S1lo,$S1lo ; sigma1(Xi[i+14])
|| LDW *${Xihi}[0],$CHhi
|| LDW *${Xilo}[0],$CHlo ; X[i]
|| SHRU $T2hi,1,$S0hi
|| SHL $T2hi,32-1,$S0lo
SHRU $T2lo,1,$t0lo
|| SHL $T2lo,32-1,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| SHRU $T2hi,8,$t0hi
|| SHL $T2hi,32-8,$t0lo
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| SHRU $T2lo,8,$t0lo
|| SHL $T2lo,32-8,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $S1hi,$T1hi,$T1hi
|| ADDU $S1lo,$T1lo,$T1carry:$T1lo ; T1 = X[i+9]+sigma1()
|| [B1] BNOP loop16_79?
|| SHRU $T2hi,7,$t0hi
|| SHL $T2hi,32-7,$t0lo
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $CHhi,$T1hi,$T1hi
|| ADDU $CHlo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += X[i]
|| SHRU $T2lo,7,$t0lo
XOR $t0lo,$S0lo,$S0lo ; sigma0(Xi[i+1]
ADD $S0hi,$T1hi,$T1hi
|| ADDU $S0lo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += sigma0()
|| [B1] SUB B1,1,B1
NOP ; avoid cross-path stall
ADD $T1carry,$T1hi,$T1hi
;;===== branch to loop16_79? is taken here
break?:
ADD $Ahi,$Actxhi,$Ahi ; accumulate ctx
|| ADDU $Alo,$Actxlo,$Actxlo:$Alo
|| [A0] LDNDW *$INP++,B11:B10 ; pre-fetch input
|| [A0] ADDK -640,$K512 ; rewind pointer to K512
ADD $Bhi,$Bctxhi,$Bhi
|| ADDU $Blo,$Bctxlo,$Bctxlo:$Blo
|| [A0] LDDW *$K512++,$Khi:$Klo ; pre-fetch K512[0]
ADD $Chi,$Cctxhi,$Chi
|| ADDU $Clo,$Cctxlo,$Cctxlo:$Clo
|| ADD $Actxlo,$Ahi,$Ahi
||[!A0] MV $CTXA,$CTXB
ADD $Dhi,$Dctxhi,$Dhi
|| ADDU $Dlo,$Dctxlo,$Dctxlo:$Dlo
|| ADD $Bctxlo,$Bhi,$Bhi
||[!A0] STW $Ahi,*${CTXA}[0^.LITTLE_ENDIAN] ; save ctx
||[!A0] STW $Alo,*${CTXB}[1^.LITTLE_ENDIAN]
ADD $Ehi,$Ectxhi,$Ehi
|| ADDU $Elo,$Ectxlo,$Ectxlo:$Elo
|| ADD $Cctxlo,$Chi,$Chi
|| [A0] BNOP outerloop?
||[!A0] STW $Bhi,*${CTXA}[2^.LITTLE_ENDIAN]
||[!A0] STW $Blo,*${CTXB}[3^.LITTLE_ENDIAN]
ADD $Fhi,$Fctxhi,$Fhi
|| ADDU $Flo,$Fctxlo,$Fctxlo:$Flo
|| ADD $Dctxlo,$Dhi,$Dhi
||[!A0] STW $Chi,*${CTXA}[4^.LITTLE_ENDIAN]
||[!A0] STW $Clo,*${CTXB}[5^.LITTLE_ENDIAN]
ADD $Ghi,$Gctxhi,$Ghi
|| ADDU $Glo,$Gctxlo,$Gctxlo:$Glo
|| ADD $Ectxlo,$Ehi,$Ehi
||[!A0] STW $Dhi,*${CTXA}[6^.LITTLE_ENDIAN]
||[!A0] STW $Dlo,*${CTXB}[7^.LITTLE_ENDIAN]
ADD $Hhi,$Hctxhi,$Hhi
|| ADDU $Hlo,$Hctxlo,$Hctxlo:$Hlo
|| ADD $Fctxlo,$Fhi,$Fhi
||[!A0] STW $Ehi,*${CTXA}[8^.LITTLE_ENDIAN]
||[!A0] STW $Elo,*${CTXB}[9^.LITTLE_ENDIAN]
ADD $Gctxlo,$Ghi,$Ghi
||[!A0] STW $Fhi,*${CTXA}[10^.LITTLE_ENDIAN]
||[!A0] STW $Flo,*${CTXB}[11^.LITTLE_ENDIAN]
ADD $Hctxlo,$Hhi,$Hhi
||[!A0] STW $Ghi,*${CTXA}[12^.LITTLE_ENDIAN]
||[!A0] STW $Glo,*${CTXB}[13^.LITTLE_ENDIAN]
;;===== branch to outerloop? is taken here
STW $Hhi,*${CTXA}[14^.LITTLE_ENDIAN]
|| STW $Hlo,*${CTXB}[15^.LITTLE_ENDIAN]
|| MVK -40,B0
ADD FP,B0,SP ; destroy circular buffer
|| LDDW *FP[-4],A11:A10
LDDW *SP[2],A13:A12
|| LDDW *FP[-2],B11:B10
LDDW *SP[4],B13:B12
|| BNOP RA
LDW *++SP(40),FP ; restore frame pointer
MVK 0,B0
MVC B0,AMR ; clear AMR
NOP 2 ; wait till FP is committed
.endasmfunc
.sect ".const:sha_asm"
.align 128
K512:
.uword 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd
.uword 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc
.uword 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019
.uword 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118
.uword 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe
.uword 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2
.uword 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1
.uword 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694
.uword 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3
.uword 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65
.uword 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483
.uword 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5
.uword 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210
.uword 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4
.uword 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725
.uword 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70
.uword 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926
.uword 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df
.uword 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8
.uword 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b
.uword 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001
.uword 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30
.uword 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910
.uword 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8
.uword 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53
.uword 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8
.uword 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb
.uword 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3
.uword 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60
.uword 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec
.uword 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9
.uword 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b
.uword 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207
.uword 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178
.uword 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6
.uword 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b
.uword 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493
.uword 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c
.uword 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a
.uword 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817
.cstring "SHA512 block transform for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT;