Skip to content

Commit

Permalink
v3.21.1
Browse files Browse the repository at this point in the history
  • Loading branch information
JayDDee committed Feb 9, 2023
1 parent da7030f commit 520d4d5
Show file tree
Hide file tree
Showing 19 changed files with 260 additions and 798 deletions.
7 changes: 6 additions & 1 deletion RELEASE_NOTES
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,15 @@ If not what makes it happen or not happen?
Change Log
----------

v3.21.1

Fixed a segfault in some obsolete algos.
Small optimizations to Hamsi & Shabal AVX2 & AVX512.

v3.21.0

Added minotaurx algo for stratum only.
Blake256 & sha256 prehash optimised to ignore zero-padded data for AVX2 & AVX512.
Blake256 & sha256 prehash optimized to ignore zero-padded data for AVX2 & AVX512.
Other small improvements.

v3.20.3
Expand Down
100 changes: 41 additions & 59 deletions algo/hamsi/hamsi-hash-4way.c
Original file line number Diff line number Diff line change
Expand Up @@ -585,9 +585,8 @@ do { \
t = _mm512_xor_si512( t, c ); \
d = mm512_xoror( a, b, t ); \
t = mm512_xorand( t, a, b ); \
b = mm512_xor3( b, d, t ); \
a = c; \
c = b; \
c = mm512_xor3( b, d, t ); \
b = d; \
d = mm512_not( t ); \
} while (0)
Expand Down Expand Up @@ -635,7 +634,7 @@ do { \

#define ROUND_BIG8( alpha ) \
do { \
__m512i t0, t1, t2, t3; \
__m512i t0, t1, t2, t3, t4, t5; \
s0 = _mm512_xor_si512( s0, alpha[ 0] ); /* m0 */ \
s1 = _mm512_xor_si512( s1, alpha[ 1] ); /* c0 */ \
s2 = _mm512_xor_si512( s2, alpha[ 2] ); /* m1 */ \
Expand All @@ -662,43 +661,35 @@ do { \
s5 = mm512_swap64_32( s5 ); \
sD = mm512_swap64_32( sD ); \
sE = mm512_swap64_32( sE ); \
t1 = _mm512_mask_blend_epi32( 0xaaaa, s4, s5 ); \
t3 = _mm512_mask_blend_epi32( 0xaaaa, sD, sE ); \
L8( s0, t1, s9, t3 ); \
s4 = _mm512_mask_blend_epi32( 0x5555, s4, t1 ); \
s5 = _mm512_mask_blend_epi32( 0xaaaa, s5, t1 ); \
sD = _mm512_mask_blend_epi32( 0x5555, sD, t3 ); \
sE = _mm512_mask_blend_epi32( 0xaaaa, sE, t3 ); \
t0 = _mm512_mask_blend_epi32( 0xaaaa, s4, s5 ); \
t1 = _mm512_mask_blend_epi32( 0xaaaa, sD, sE ); \
L8( s0, t0, s9, t1 ); \
\
s6 = mm512_swap64_32( s6 ); \
sF = mm512_swap64_32( sF ); \
t1 = _mm512_mask_blend_epi32( 0xaaaa, s5, s6 ); \
t2 = _mm512_mask_blend_epi32( 0xaaaa, s5, s6 ); \
t3 = _mm512_mask_blend_epi32( 0xaaaa, sE, sF ); \
L8( s1, t1, sA, t3 ); \
s5 = _mm512_mask_blend_epi32( 0x5555, s5, t1 ); \
s6 = _mm512_mask_blend_epi32( 0xaaaa, s6, t1 ); \
sE = _mm512_mask_blend_epi32( 0x5555, sE, t3 ); \
sF = _mm512_mask_blend_epi32( 0xaaaa, sF, t3 ); \
L8( s1, t2, sA, t3 ); \
s5 = _mm512_mask_blend_epi32( 0x5555, t0, t2 ); \
sE = _mm512_mask_blend_epi32( 0x5555, t1, t3 ); \
\
s7 = mm512_swap64_32( s7 ); \
sC = mm512_swap64_32( sC ); \
t1 = _mm512_mask_blend_epi32( 0xaaaa, s6, s7 ); \
t3 = _mm512_mask_blend_epi32( 0xaaaa, sF, sC ); \
L8( s2, t1, sB, t3 ); \
s6 = _mm512_mask_blend_epi32( 0x5555, s6, t1 ); \
s7 = _mm512_mask_blend_epi32( 0xaaaa, s7, t1 ); \
sF = _mm512_mask_blend_epi32( 0x5555, sF, t3 ); \
sC = _mm512_mask_blend_epi32( 0xaaaa, sC, t3 ); \
t4 = _mm512_mask_blend_epi32( 0xaaaa, s6, s7 ); \
t5 = _mm512_mask_blend_epi32( 0xaaaa, sF, sC ); \
L8( s2, t4, sB, t5 ); \
s6 = _mm512_mask_blend_epi32( 0x5555, t2, t4 ); \
sF = _mm512_mask_blend_epi32( 0x5555, t3, t5 ); \
s6 = mm512_swap64_32( s6 ); \
sF = mm512_swap64_32( sF ); \
\
t1 = _mm512_mask_blend_epi32( 0xaaaa, s7, s4 ); \
t2 = _mm512_mask_blend_epi32( 0xaaaa, s7, s4 ); \
t3 = _mm512_mask_blend_epi32( 0xaaaa, sC, sD ); \
L8( s3, t1, s8, t3 ); \
s7 = _mm512_mask_blend_epi32( 0x5555, s7, t1 ); \
s4 = _mm512_mask_blend_epi32( 0xaaaa, s4, t1 ); \
sC = _mm512_mask_blend_epi32( 0x5555, sC, t3 ); \
sD = _mm512_mask_blend_epi32( 0xaaaa, sD, t3 ); \
L8( s3, t2, s8, t3 ); \
s7 = _mm512_mask_blend_epi32( 0x5555, t4, t2 ); \
s4 = _mm512_mask_blend_epi32( 0xaaaa, t0, t2 ); \
sC = _mm512_mask_blend_epi32( 0x5555, t5, t3 ); \
sD = _mm512_mask_blend_epi32( 0xaaaa, t1, t3 ); \
s7 = mm512_swap64_32( s7 ); \
sC = mm512_swap64_32( sC ); \
\
Expand Down Expand Up @@ -924,10 +915,9 @@ do { \
d = _mm256_xor_si256( d, a ); \
a = _mm256_and_si256( a, b ); \
t = _mm256_xor_si256( t, a ); \
b = _mm256_xor_si256( b, d ); \
b = _mm256_xor_si256( b, t ); \
a = c; \
c = b; \
c = _mm256_xor_si256( b, d ); \
c = _mm256_xor_si256( c, t ); \
b = d; \
d = mm256_not( t ); \
} while (0)
Expand Down Expand Up @@ -977,7 +967,7 @@ do { \

#define ROUND_BIG( alpha ) \
do { \
__m256i t0, t1, t2, t3; \
__m256i t0, t1, t2, t3, t4, t5; \
s0 = _mm256_xor_si256( s0, alpha[ 0] ); \
s1 = _mm256_xor_si256( s1, alpha[ 1] ); \
s2 = _mm256_xor_si256( s2, alpha[ 2] ); \
Expand All @@ -1004,43 +994,35 @@ do { \
s5 = mm256_swap64_32( s5 ); \
sD = mm256_swap64_32( sD ); \
sE = mm256_swap64_32( sE ); \
t1 = _mm256_blend_epi32( s4, s5, 0xaa ); \
t3 = _mm256_blend_epi32( sD, sE, 0xaa ); \
L( s0, t1, s9, t3 ); \
s4 = _mm256_blend_epi32( s4, t1, 0x55 ); \
s5 = _mm256_blend_epi32( s5, t1, 0xaa ); \
sD = _mm256_blend_epi32( sD, t3, 0x55 ); \
sE = _mm256_blend_epi32( sE, t3, 0xaa ); \
t0 = _mm256_blend_epi32( s4, s5, 0xaa ); \
t1 = _mm256_blend_epi32( sD, sE, 0xaa ); \
L( s0, t0, s9, t1 ); \
\
s6 = mm256_swap64_32( s6 ); \
sF = mm256_swap64_32( sF ); \
t1 = _mm256_blend_epi32( s5, s6, 0xaa ); \
t2 = _mm256_blend_epi32( s5, s6, 0xaa ); \
t3 = _mm256_blend_epi32( sE, sF, 0xaa ); \
L( s1, t1, sA, t3 ); \
s5 = _mm256_blend_epi32( s5, t1, 0x55 ); \
s6 = _mm256_blend_epi32( s6, t1, 0xaa ); \
sE = _mm256_blend_epi32( sE, t3, 0x55 ); \
sF = _mm256_blend_epi32( sF, t3, 0xaa ); \
L( s1, t2, sA, t3 ); \
s5 = _mm256_blend_epi32( t0, t2, 0x55 ); \
sE = _mm256_blend_epi32( t1, t3, 0x55 ); \
\
s7 = mm256_swap64_32( s7 ); \
sC = mm256_swap64_32( sC ); \
t1 = _mm256_blend_epi32( s6, s7, 0xaa ); \
t3 = _mm256_blend_epi32( sF, sC, 0xaa ); \
L( s2, t1, sB, t3 ); \
s6 = _mm256_blend_epi32( s6, t1, 0x55 ); \
s7 = _mm256_blend_epi32( s7, t1, 0xaa ); \
sF = _mm256_blend_epi32( sF, t3, 0x55 ); \
sC = _mm256_blend_epi32( sC, t3, 0xaa ); \
t4 = _mm256_blend_epi32( s6, s7, 0xaa ); \
t5 = _mm256_blend_epi32( sF, sC, 0xaa ); \
L( s2, t4, sB, t5 ); \
s6 = _mm256_blend_epi32( t2, t4, 0x55 ); \
sF = _mm256_blend_epi32( t3, t5, 0x55 ); \
s6 = mm256_swap64_32( s6 ); \
sF = mm256_swap64_32( sF ); \
\
t1 = _mm256_blend_epi32( s7, s4, 0xaa ); \
t2 = _mm256_blend_epi32( s7, s4, 0xaa ); \
t3 = _mm256_blend_epi32( sC, sD, 0xaa ); \
L( s3, t1, s8, t3 ); \
s7 = _mm256_blend_epi32( s7, t1, 0x55 ); \
s4 = _mm256_blend_epi32( s4, t1, 0xaa ); \
sC = _mm256_blend_epi32( sC, t3, 0x55 ); \
sD = _mm256_blend_epi32( sD, t3, 0xaa ); \
L( s3, t2, s8, t3 ); \
s7 = _mm256_blend_epi32( t4, t2, 0x55 ); \
s4 = _mm256_blend_epi32( t0, t2, 0xaa ); \
sC = _mm256_blend_epi32( t5, t3, 0x55 ); \
sD = _mm256_blend_epi32( t1, t3, 0xaa ); \
s7 = mm256_swap64_32( s7 ); \
sC = mm256_swap64_32( sC ); \
\
Expand Down
78 changes: 46 additions & 32 deletions algo/haval/haval-hash-4way.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,13 @@ do { \
_mm_add_epi32( w, _mm_set1_epi32( c ) ) ); \
} while (0)

#define STEP1(n, p, x7, x6, x5, x4, x3, x2, x1, x0, w) \
do { \
__m128i t = FP ## n ## _ ## p(x6, x5, x4, x3, x2, x1, x0); \
x7 = _mm_add_epi32( _mm_add_epi32( mm128_ror_32( t, 7 ), \
mm128_ror_32( x7, 11 ) ), w ); \
} while (0)

/*
* PASSy(n, in) computes pass number "y", for a total of "n", using the
* one-argument macro "in" to access input words. Current state is assumed
Expand All @@ -152,22 +159,22 @@ do { \
#define PASS1(n, in) do { \
unsigned pass_count; \
for (pass_count = 0; pass_count < 32; pass_count += 8) { \
STEP(n, 1, s7, s6, s5, s4, s3, s2, s1, s0, \
in(pass_count + 0), SPH_C32(0x00000000)); \
STEP(n, 1, s6, s5, s4, s3, s2, s1, s0, s7, \
in(pass_count + 1), SPH_C32(0x00000000)); \
STEP(n, 1, s5, s4, s3, s2, s1, s0, s7, s6, \
in(pass_count + 2), SPH_C32(0x00000000)); \
STEP(n, 1, s4, s3, s2, s1, s0, s7, s6, s5, \
in(pass_count + 3), SPH_C32(0x00000000)); \
STEP(n, 1, s3, s2, s1, s0, s7, s6, s5, s4, \
in(pass_count + 4), SPH_C32(0x00000000)); \
STEP(n, 1, s2, s1, s0, s7, s6, s5, s4, s3, \
in(pass_count + 5), SPH_C32(0x00000000)); \
STEP(n, 1, s1, s0, s7, s6, s5, s4, s3, s2, \
in(pass_count + 6), SPH_C32(0x00000000)); \
STEP(n, 1, s0, s7, s6, s5, s4, s3, s2, s1, \
in(pass_count + 7), SPH_C32(0x00000000)); \
STEP1(n, 1, s7, s6, s5, s4, s3, s2, s1, s0, \
in(pass_count + 0) ); \
STEP1(n, 1, s6, s5, s4, s3, s2, s1, s0, s7, \
in(pass_count + 1) ); \
STEP1(n, 1, s5, s4, s3, s2, s1, s0, s7, s6, \
in(pass_count + 2) ); \
STEP1(n, 1, s4, s3, s2, s1, s0, s7, s6, s5, \
in(pass_count + 3) ); \
STEP1(n, 1, s3, s2, s1, s0, s7, s6, s5, s4, \
in(pass_count + 4) ); \
STEP1(n, 1, s2, s1, s0, s7, s6, s5, s4, s3, \
in(pass_count + 5) ); \
STEP1(n, 1, s1, s0, s7, s6, s5, s4, s3, s2, \
in(pass_count + 6) ); \
STEP1(n, 1, s0, s7, s6, s5, s4, s3, s2, s1, \
in(pass_count + 7) ); \
} \
} while (0)

Expand Down Expand Up @@ -605,25 +612,32 @@ do { \
_mm256_add_epi32( w, _mm256_set1_epi32( c ) ) ); \
} while (0)

#define STEP1_8W(n, p, x7, x6, x5, x4, x3, x2, x1, x0, w) \
do { \
__m256i t = FP ## n ## _ ## p ## _8W(x6, x5, x4, x3, x2, x1, x0); \
x7 = _mm256_add_epi32( _mm256_add_epi32( mm256_ror_32( t, 7 ), \
mm256_ror_32( x7, 11 ) ), w ); \
} while (0)

#define PASS1_8W(n, in) do { \
unsigned pass_count; \
for (pass_count = 0; pass_count < 32; pass_count += 8) { \
STEP_8W(n, 1, s7, s6, s5, s4, s3, s2, s1, s0, \
in(pass_count + 0), SPH_C32(0x00000000)); \
STEP_8W(n, 1, s6, s5, s4, s3, s2, s1, s0, s7, \
in(pass_count + 1), SPH_C32(0x00000000)); \
STEP_8W(n, 1, s5, s4, s3, s2, s1, s0, s7, s6, \
in(pass_count + 2), SPH_C32(0x00000000)); \
STEP_8W(n, 1, s4, s3, s2, s1, s0, s7, s6, s5, \
in(pass_count + 3), SPH_C32(0x00000000)); \
STEP_8W(n, 1, s3, s2, s1, s0, s7, s6, s5, s4, \
in(pass_count + 4), SPH_C32(0x00000000)); \
STEP_8W(n, 1, s2, s1, s0, s7, s6, s5, s4, s3, \
in(pass_count + 5), SPH_C32(0x00000000)); \
STEP_8W(n, 1, s1, s0, s7, s6, s5, s4, s3, s2, \
in(pass_count + 6), SPH_C32(0x00000000)); \
STEP_8W(n, 1, s0, s7, s6, s5, s4, s3, s2, s1, \
in(pass_count + 7), SPH_C32(0x00000000)); \
STEP1_8W(n, 1, s7, s6, s5, s4, s3, s2, s1, s0, \
in(pass_count + 0) ); \
STEP1_8W(n, 1, s6, s5, s4, s3, s2, s1, s0, s7, \
in(pass_count + 1) ); \
STEP1_8W(n, 1, s5, s4, s3, s2, s1, s0, s7, s6, \
in(pass_count + 2) ); \
STEP1_8W(n, 1, s4, s3, s2, s1, s0, s7, s6, s5, \
in(pass_count + 3) ); \
STEP1_8W(n, 1, s3, s2, s1, s0, s7, s6, s5, s4, \
in(pass_count + 4) ); \
STEP1_8W(n, 1, s2, s1, s0, s7, s6, s5, s4, s3, \
in(pass_count + 5) ); \
STEP1_8W(n, 1, s1, s0, s7, s6, s5, s4, s3, s2, \
in(pass_count + 6) ); \
STEP1_8W(n, 1, s0, s7, s6, s5, s4, s3, s2, s1, \
in(pass_count + 7) ); \
} \
} while (0)

Expand Down
22 changes: 11 additions & 11 deletions algo/keccak/keccak-hash-4way.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,11 +72,11 @@ static const uint64_t RC[] = {
// Targetted macros, keccak-macros.h is included for each target.

#define DECL64(x) __m512i x
#define XOR(d, a, b) (d = _mm512_xor_si512(a,b))
#define XOR64 XOR
#define XOR(d, a, b) (d = _mm512_xor_si512(a,b))
#define XOR64 XOR
#define AND64(d, a, b) (d = _mm512_and_si512(a,b))
#define OR64(d, a, b) (d = _mm512_or_si512(a,b))
#define NOT64(d, s) (d = _mm512_xor_si512(s,m512_neg1))
#define NOT64(d, s) (d = mm512_not( s ) )
#define ROL64(d, v, n) (d = mm512_rol_64(v, n))
#define XOROR(d, a, b, c) (d = mm512_xoror(a, b, c))
#define XORAND(d, a, b, c) (d = mm512_xorand(a, b, c))
Expand Down Expand Up @@ -257,14 +257,14 @@ keccak512_8way_close(void *cc, void *dst)
kc->w[j ] = _mm256_xor_si256( kc->w[j], buf[j] ); \
} while (0)

#define DECL64(x) __m256i x
#define XOR(d, a, b) (d = _mm256_xor_si256(a,b))
#define XOR64 XOR
#define AND64(d, a, b) (d = _mm256_and_si256(a,b))
#define OR64(d, a, b) (d = _mm256_or_si256(a,b))
#define NOT64(d, s) (d = _mm256_xor_si256(s,m256_neg1))
#define ROL64(d, v, n) (d = mm256_rol_64(v, n))
#define XOROR(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_or_si256(b, c)))
#define DECL64(x) __m256i x
#define XOR(d, a, b) (d = _mm256_xor_si256(a,b))
#define XOR64 XOR
#define AND64(d, a, b) (d = _mm256_and_si256(a,b))
#define OR64(d, a, b) (d = _mm256_or_si256(a,b))
#define NOT64(d, s) (d = mm256_not( s ) )
#define ROL64(d, v, n) (d = mm256_rol_64(v, n))
#define XOROR(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_or_si256(b, c)))
#define XORAND(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_and_si256(b, c)))
#define XOR3( d, a, b, c ) (d = mm256_xor3( a, b, c ))

Expand Down
2 changes: 1 addition & 1 deletion algo/lyra2/lyra2z330.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#include "lyra2.h"
#include "simd-utils.h"

__thread uint64_t* lyra2z330_wholeMatrix;
static __thread uint64_t* lyra2z330_wholeMatrix;

void lyra2z330_hash(void *state, const void *input, uint32_t height)
{
Expand Down
Loading

0 comments on commit 520d4d5

Please sign in to comment.