From 221bc3d1d5584726b57dde86c1e0abdcf41f6713 Mon Sep 17 00:00:00 2001 From: Johnny Deluxe Date: Sat, 26 Jan 2019 15:36:09 +0100 Subject: [PATCH] unified shabal --- src/c/common.c | 10 +- src/c/common.h | 60 +++++- src/c/mshabal_128_avx.c | 359 +++++++++++++++++++++++++++++++--- src/c/mshabal_128_avx.h | 21 +- src/c/mshabal_128_neon.c | 358 +++++++++++++++++++++++++++++++--- src/c/mshabal_128_neon.h | 22 ++- src/c/mshabal_128_sse2.c | 356 +++++++++++++++++++++++++++++++--- src/c/mshabal_128_sse2.h | 21 +- src/c/mshabal_256_avx2.c | 333 +++++++++++++++++++++++++++++-- src/c/mshabal_256_avx2.h | 9 +- src/c/mshabal_512_avx512f.c | 377 ++++++++++++++++++++++++++++++++---- src/c/mshabal_512_avx512f.h | 8 +- src/c/shabal_avx.c | 6 +- src/c/shabal_neon.c | 7 +- src/c/shabal_sse2.c | 6 +- src/c/sph_shabal.c | 90 ++++++++- src/c/sph_shabal.h | 7 +- 17 files changed, 1850 insertions(+), 200 deletions(-) diff --git a/src/c/common.c b/src/c/common.c index 065edf4..4bb8ef1 100644 --- a/src/c/common.c +++ b/src/c/common.c @@ -1,7 +1,15 @@ #include "common.h" #include +void write_seed(char seed[32], uint64_t numeric_id) { + numeric_id = bswap_64(numeric_id); + memmove(&seed[0], &numeric_id, 8); + memset(&seed[8], 0, 8); + seed[16] = -128; // shabal message termination bit + memset(&seed[17], 0, 15); +} + void write_term(char term[32]) { term[0] = -128; // shabal message termination bit memset(&term[1], 0, 31); -} \ No newline at end of file +} diff --git a/src/c/common.h b/src/c/common.h index e50a43b..a7aa9b8 100644 --- a/src/c/common.h +++ b/src/c/common.h @@ -1,9 +1,65 @@ +#include + #pragma once +#ifdef _MSC_VER + +#include +#define bswap_32(x) _byteswap_ulong(x) +#define bswap_64(x) _byteswap_uint64(x) + +#elif defined(__APPLE__) + +// Mac OS X / Darwin features +#include +#define bswap_32(x) OSSwapInt32(x) +#define bswap_64(x) OSSwapInt64(x) + +#elif defined(__sun) || defined(sun) + +#include +#define bswap_32(x) BSWAP_32(x) +#define bswap_64(x) BSWAP_64(x) + +#elif defined(__FreeBSD__) + +#include +#define bswap_32(x) bswap32(x) +#define bswap_64(x) bswap64(x) + +#elif defined(__OpenBSD__) + +#include +#define bswap_32(x) swap32(x) +#define bswap_64(x) swap64(x) + +#elif defined(__NetBSD__) + +#include +#include +#if defined(__BSWAP_RENAME) && !defined(__bswap_32) +#define bswap_32(x) bswap32(x) +#define bswap_64(x) bswap64(x) +#endif + +#else + +#include + +#endif + +#define HASH_SIZE 32 +#define HASH_CAP 4096 +#define NUM_SCOOPS 4096 +#define SCOOP_SIZE 64 +#define NONCE_SIZE (HASH_CAP * SCOOP_SIZE) // 4096*64 + +void write_seed(char seed[32], uint64_t numeric_id); + +void write_term(char term[32]); + #define SET_BEST_DEADLINE(d, o) \ if ((d) < *best_deadline) { \ *best_deadline = (d); \ *best_offset = (o); \ } - -void write_term(char term[32]); \ No newline at end of file diff --git a/src/c/mshabal_128_avx.c b/src/c/mshabal_128_avx.c index 0c4fea1..f4c239c 100644 --- a/src/c/mshabal_128_avx.c +++ b/src/c/mshabal_128_avx.c @@ -1,7 +1,7 @@ /* * Parallel implementation of Shabal, using the AVX unit. This code * compiles and runs on x86 architectures, in 32-bit or 64-bit mode, - * which possess a SSE2-compatible SIMD unit. + * which possess a AVX-compatible SIMD unit. * * * (c) 2010 SAPHIR project. This software is provided 'as-is', without @@ -35,7 +35,7 @@ typedef mshabal_u32 u32; #define T32(x) ((x)&C32(0xFFFFFFFF)) #define ROTL32(x, n) T32(((x) << (n)) | ((x) >> (32 - (n)))) -static void mshabal_compress_avx(mshabal_context *sc, const unsigned char *buf0, +static void mshabal_compress_avx(mshabal128_context *sc, const unsigned char *buf0, const unsigned char *buf1, const unsigned char *buf2, const unsigned char *buf3, size_t num) { _mm256_zeroupper(); @@ -47,10 +47,10 @@ static void mshabal_compress_avx(mshabal_context *sc, const unsigned char *buf0, __m128i A[12], B[16], C[16]; __m128i one; - for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i*)sc->state + j); + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); for (j = 0; j < 16; j++) { - B[j] = _mm_loadu_si128((__m128i*)sc->state + j + 12); - C[j] = _mm_loadu_si128((__m128i*)sc->state + j + 28); + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); } one = _mm_set1_epi32(C32(0xFFFFFFFF)); @@ -58,10 +58,10 @@ static void mshabal_compress_avx(mshabal_context *sc, const unsigned char *buf0, while (num-- > 0) { for (j = 0; j < 16 * MSHABAL128_VECTOR_SIZE; j += MSHABAL128_VECTOR_SIZE) { - u.words[j + 0] = *(u32*)(buf0 + j); - u.words[j + 1] = *(u32*)(buf1 + j); - u.words[j + 2] = *(u32*)(buf2 + j); - u.words[j + 3] = *(u32*)(buf3 + j); + u.words[j + 0] = *(u32 *)(buf0 + j); + u.words[j + 1] = *(u32 *)(buf1 + j); + u.words[j + 2] = *(u32 *)(buf2 + j); + u.words[j + 3] = *(u32 *)(buf3 + j); } for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); @@ -206,15 +206,15 @@ static void mshabal_compress_avx(mshabal_context *sc, const unsigned char *buf0, if (++sc->Wlow == 0) sc->Whigh++; } - for (j = 0; j < 12; j++) _mm_storeu_si128((__m128i*)sc->state + j, A[j]); + for (j = 0; j < 12; j++) _mm_storeu_si128((__m128i *)sc->state + j, A[j]); for (j = 0; j < 16; j++) { - _mm_storeu_si128((__m128i*)sc->state + j + 12, B[j]); - _mm_storeu_si128((__m128i*)sc->state + j + 28, C[j]); + _mm_storeu_si128((__m128i *)sc->state + j + 12, B[j]); + _mm_storeu_si128((__m128i *)sc->state + j + 28, C[j]); } #undef M } -void mshabal_init_avx(mshabal_context *sc, unsigned out_size) { +void mshabal_init_avx(mshabal128_context *sc, unsigned out_size) { unsigned u; memset(sc->state, 0, sizeof sc->state); @@ -249,7 +249,7 @@ void mshabal_init_avx(mshabal_context *sc, unsigned out_size) { sc->out_size = out_size; } -void mshabal_avx(mshabal_context *sc, const void *data0, const void *data1, const void *data2, +void mshabal_avx(mshabal128_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, size_t len) { size_t ptr, num; @@ -289,22 +289,21 @@ void mshabal_avx(mshabal_context *sc, const void *data0, const void *data1, cons memcpy(sc->buf2 + ptr, data2, clen); memcpy(sc->buf3 + ptr, data3, clen); mshabal_compress_avx(sc, sc->buf0, sc->buf1, sc->buf2, sc->buf3, 1); - data0 = (const unsigned char*)data0 + clen; - data1 = (const unsigned char*)data1 + clen; - data2 = (const unsigned char*)data2 + clen; - data3 = (const unsigned char*)data3 + clen; + data0 = (const unsigned char *)data0 + clen; + data1 = (const unsigned char *)data1 + clen; + data2 = (const unsigned char *)data2 + clen; + data3 = (const unsigned char *)data3 + clen; len -= clen; } } num = len >> 6; if (num != 0) { - mshabal_compress_avx(sc, (const unsigned char*)data0, (const unsigned char*)data1, - (const unsigned char*)data2, (const unsigned char*)data3, num); - data0 = (const unsigned char*)data0 + (num << 6); - data1 = (const unsigned char*)data1 + (num << 6); - data2 = (const unsigned char*)data2 + (num << 6); - data3 = (const unsigned char*)data3 + (num << 6); + mshabal_compress_avx(sc, data0, data1, data2, data3, num); + data0 = (const unsigned char *)data0 + (num << 6); + data1 = (const unsigned char *)data1 + (num << 6); + data2 = (const unsigned char *)data2 + (num << 6); + data3 = (const unsigned char *)data3 + (num << 6); } len &= 63; memcpy(sc->buf0, data0, len); @@ -314,7 +313,7 @@ void mshabal_avx(mshabal_context *sc, const void *data0, const void *data1, cons sc->ptr = len; } -void mshabal_close_avx(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, +void mshabal_close_avx(mshabal128_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned n, void *dst0, void *dst1, void *dst2, void *dst3) { size_t ptr, off; unsigned z, out_size_w32; @@ -366,8 +365,304 @@ void mshabal_close_avx(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigned } } +// Shabal routine optimized for plotting and hashing +void mshabal_hash_fast_avx(mshabal128_context_fast *sc, void *message, void *termination, + void *dst, unsigned num) { + _mm256_zeroupper(); + union input { + u32 words[16 * MSHABAL128_VECTOR_SIZE]; + __m128i data[16]; + }; + size_t j; + __m128i A[12], B[16], C[16]; + __m128i one; + + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); + for (j = 0; j < 16; j++) { + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); + } + one = _mm_set1_epi32(C32(0xFFFFFFFF)); + + // round 1 +#define M(i) _mm_load_si128((__m128i *)message + i) + + while (num-- > 0) { + for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); + + A[0] = _mm_xor_si128(A[0], _mm_set1_epi32(sc->Wlow)); + A[1] = _mm_xor_si128(A[1], _mm_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm_or_si128(_mm_slli_epi32(B[j], 17), _mm_srli_epi32(B[j], 15)); + +#define PP(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \ + do { \ + __m128i tt; \ + tt = _mm_or_si128(_mm_slli_epi32(xa1, 15), _mm_srli_epi32(xa1, 17)); \ + tt = _mm_add_epi32(_mm_slli_epi32(tt, 2), tt); \ + tt = _mm_xor_si128(_mm_xor_si128(xa0, tt), xc); \ + tt = _mm_add_epi32(_mm_slli_epi32(tt, 1), tt); \ + tt = _mm_xor_si128(_mm_xor_si128(tt, xb1), _mm_xor_si128(_mm_andnot_si128(xb3, xb2), xm)); \ + xa0 = tt; \ + tt = xb0; \ + tt = _mm_or_si128(_mm_slli_epi32(tt, 1), _mm_srli_epi32(tt, 31)); \ + xb0 = _mm_xor_si128(tt, _mm_xor_si128(xa0, one)); \ + } while (0) + + PP(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + A[0xB] = _mm_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm_add_epi32(A[0x0], C[0x3]); + +#define SWAP_AND_SUB(xb, xc, xm) \ + do { \ + __m128i tmp; \ + tmp = xb; \ + xb = _mm_sub_epi32(xc, xm); \ + xc = tmp; \ + } while (0) + + SWAP_AND_SUB(B[0x0], C[0x0], M(0x0)); + SWAP_AND_SUB(B[0x1], C[0x1], M(0x1)); + SWAP_AND_SUB(B[0x2], C[0x2], M(0x2)); + SWAP_AND_SUB(B[0x3], C[0x3], M(0x3)); + SWAP_AND_SUB(B[0x4], C[0x4], M(0x4)); + SWAP_AND_SUB(B[0x5], C[0x5], M(0x5)); + SWAP_AND_SUB(B[0x6], C[0x6], M(0x6)); + SWAP_AND_SUB(B[0x7], C[0x7], M(0x7)); + SWAP_AND_SUB(B[0x8], C[0x8], M(0x8)); + SWAP_AND_SUB(B[0x9], C[0x9], M(0x9)); + SWAP_AND_SUB(B[0xA], C[0xA], M(0xA)); + SWAP_AND_SUB(B[0xB], C[0xB], M(0xB)); + SWAP_AND_SUB(B[0xC], C[0xC], M(0xC)); + SWAP_AND_SUB(B[0xD], C[0xD], M(0xD)); + SWAP_AND_SUB(B[0xE], C[0xE], M(0xE)); + SWAP_AND_SUB(B[0xF], C[0xF], M(0xF)); + + // move data pointer + message = (__m128i *)message + 16; + + if (++sc->Wlow == 0) sc->Whigh++; + } + + // round 2-5 +#define M2(i) _mm_load_si128((__m128i *)termination + i) + + for (int k = 0; k < 4; k++) { + for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M2(j)); + + A[0] = _mm_xor_si128(A[0], _mm_set1_epi32(sc->Wlow)); + A[1] = _mm_xor_si128(A[1], _mm_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm_or_si128(_mm_slli_epi32(B[j], 17), _mm_srli_epi32(B[j], 15)); + + PP(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + A[0xB] = _mm_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm_add_epi32(A[0x0], C[0x3]); + + SWAP_AND_SUB(B[0x0], C[0x0], M2(0x0)); + SWAP_AND_SUB(B[0x1], C[0x1], M2(0x1)); + SWAP_AND_SUB(B[0x2], C[0x2], M2(0x2)); + SWAP_AND_SUB(B[0x3], C[0x3], M2(0x3)); + SWAP_AND_SUB(B[0x4], C[0x4], M2(0x4)); + SWAP_AND_SUB(B[0x5], C[0x5], M2(0x5)); + SWAP_AND_SUB(B[0x6], C[0x6], M2(0x6)); + SWAP_AND_SUB(B[0x7], C[0x7], M2(0x7)); + SWAP_AND_SUB(B[0x8], C[0x8], M2(0x8)); + SWAP_AND_SUB(B[0x9], C[0x9], M2(0x9)); + SWAP_AND_SUB(B[0xA], C[0xA], M2(0xA)); + SWAP_AND_SUB(B[0xB], C[0xB], M2(0xB)); + SWAP_AND_SUB(B[0xC], C[0xC], M2(0xC)); + SWAP_AND_SUB(B[0xD], C[0xD], M2(0xD)); + SWAP_AND_SUB(B[0xE], C[0xE], M2(0xE)); + SWAP_AND_SUB(B[0xF], C[0xF], M2(0xF)); + + if (++sc->Wlow == 0) sc->Whigh++; + + if (sc->Wlow-- == 0) sc->Whigh--; + } + + // download SIMD aligned hashes + for (j = 0; j < 8; j++) { + _mm_storeu_si128((__m128i *)dst + j, C[j + 8]); + } + + // reset Wlow & Whigh + sc->Wlow = 1; + sc->Whigh = 0; +} + // Shabal routine optimized for mining -void mshabal_deadline_fast_avx(mshabal_context_fast *sc, void *message, void *termination, void *dst0, +void mshabal_deadline_fast_avx(mshabal128_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3) { _mm256_zeroupper(); union input { @@ -378,14 +673,14 @@ void mshabal_deadline_fast_avx(mshabal_context_fast *sc, void *message, void *te __m128i A[12], B[16], C[16]; __m128i one; - for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i*)sc->state + j); + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); for (j = 0; j < 16; j++) { - B[j] = _mm_loadu_si128((__m128i*)sc->state + j + 12); - C[j] = _mm_loadu_si128((__m128i*)sc->state + j + 28); + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); } one = _mm_set1_epi32(C32(0xFFFFFFFF)); - // round 1/5 + // round 1 #define M(i) _mm_load_si128((__m128i *)message + i) for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); @@ -648,8 +943,8 @@ void mshabal_deadline_fast_avx(mshabal_context_fast *sc, void *message, void *te // download SIMD aligned deadlines u32 simd_dst[8]; - _mm_storeu_si128((__m128i*)&simd_dst[0], C[8]); - _mm_storeu_si128((__m128i*)&simd_dst[4], C[9]); + _mm_storeu_si128((__m128i *)&simd_dst[0], C[8]); + _mm_storeu_si128((__m128i *)&simd_dst[4], C[9]); // unpack SIMD data unsigned z; diff --git a/src/c/mshabal_128_avx.h b/src/c/mshabal_128_avx.h index 951659c..5a869db 100644 --- a/src/c/mshabal_128_avx.h +++ b/src/c/mshabal_128_avx.h @@ -97,14 +97,14 @@ typedef struct { mshabal_u32 state[(12 + 16 + 16) * MSHABAL128_VECTOR_SIZE]; mshabal_u32 Whigh, Wlow; unsigned out_size; -} mshabal_context; +} mshabal128_context; #pragma pack(1) typedef struct { mshabal_u32 state[(12 + 16 + 16) * MSHABAL128_VECTOR_SIZE]; mshabal_u32 Whigh, Wlow; unsigned out_size; -} mshabal_context_fast; +} mshabal128_context_fast; #pragma pack() /* @@ -112,7 +112,7 @@ typedef struct { * of 32, between 32 and 512 (inclusive). The output size is expressed * in bits. */ -void mshabal_init_avx(mshabal_context *sc, unsigned out_size); +void mshabal_init_avx(mshabal128_context *sc, unsigned out_size); /* * Process some more data bytes; four chunks of data, pointed to by @@ -126,7 +126,7 @@ void mshabal_init_avx(mshabal_context *sc, unsigned out_size); * corresponding instance is deactivated (the final value obtained from * that instance is undefined). */ -void mshabal_avx(mshabal_context *sc, const void *data0, const void *data1, const void *data2, +void mshabal_avx(mshabal128_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, size_t len); /* @@ -151,15 +151,22 @@ void mshabal_avx(mshabal_context *sc, const void *data0, const void *data1, cons * release it, or reinitialize it with mshabal_init(). The mshabal_close() * function does NOT imply a hidden call to mshabal_init(). */ -void mshabal_close_avx(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, +void mshabal_close_avx(mshabal128_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned n, void *dst0, void *dst1, void *dst2, void *dst3); /* - * optimised Shabal Routine for PoC Mining + * optimised Shabal routine for PoC mining */ -void mshabal_deadline_fast_avx(mshabal_context_fast *sc, void *message, void *termination, void *dst0, +void mshabal_deadline_fast_avx(mshabal128_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3); + +/* + * optimised Shabal routine for PoC plotting and hashing + */ +void mshabal_hash_fast_avx(mshabal128_context_fast *sc, void *message, void *termination, + void *dst, unsigned num); + #ifdef __cplusplus } #endif diff --git a/src/c/mshabal_128_neon.c b/src/c/mshabal_128_neon.c index bb79094..62ea371 100644 --- a/src/c/mshabal_128_neon.c +++ b/src/c/mshabal_128_neon.c @@ -1,7 +1,7 @@ /* * Parallel implementation of Shabal, using the NEON unit. This code * compiles and runs on x86 architectures, in 32-bit or 64-bit mode, - * which possess a SSE2-compatible SIMD unit. + * which possess a NEON-compatible SIMD unit. * * * (c) 2010 SAPHIR project. This software is provided 'as-is', without @@ -35,7 +35,7 @@ typedef mshabal_u32 u32; #define T32(x) ((x)&C32(0xFFFFFFFF)) #define ROTL32(x, n) T32(((x) << (n)) | ((x) >> (32 - (n)))) -static void mshabal_compress_neon(mshabal_context *sc, const unsigned char *buf0, +static void mshabal_compress_neon(mshabal128_context *sc, const unsigned char *buf0, const unsigned char *buf1, const unsigned char *buf2, const unsigned char *buf3, size_t num) { union { @@ -46,10 +46,10 @@ static void mshabal_compress_neon(mshabal_context *sc, const unsigned char *buf0 __m128i A[12], B[16], C[16]; __m128i one; - for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i*)sc->state + j); + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); for (j = 0; j < 16; j++) { - B[j] = _mm_loadu_si128((__m128i*)sc->state + j + 12); - C[j] = _mm_loadu_si128((__m128i*)sc->state + j + 28); + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); } one = _mm_set1_epi32(C32(0xFFFFFFFF)); @@ -57,10 +57,10 @@ static void mshabal_compress_neon(mshabal_context *sc, const unsigned char *buf0 while (num-- > 0) { for (j = 0; j < 16 * MSHABAL128_VECTOR_SIZE; j += MSHABAL128_VECTOR_SIZE) { - u.words[j + 0] = *(u32*)(buf0 + j); - u.words[j + 1] = *(u32*)(buf1 + j); - u.words[j + 2] = *(u32*)(buf2 + j); - u.words[j + 3] = *(u32*)(buf3 + j); + u.words[j + 0] = *(u32 *)(buf0 + j); + u.words[j + 1] = *(u32 *)(buf1 + j); + u.words[j + 2] = *(u32 *)(buf2 + j); + u.words[j + 3] = *(u32 *)(buf3 + j); } for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); @@ -205,15 +205,15 @@ static void mshabal_compress_neon(mshabal_context *sc, const unsigned char *buf0 if (++sc->Wlow == 0) sc->Whigh++; } - for (j = 0; j < 12; j++) _mm_storeu_si128((__m128i*)sc->state + j, A[j]); + for (j = 0; j < 12; j++) _mm_storeu_si128((__m128i *)sc->state + j, A[j]); for (j = 0; j < 16; j++) { - _mm_storeu_si128((__m128i*)sc->state + j + 12, B[j]); - _mm_storeu_si128((__m128i*)sc->state + j + 28, C[j]); + _mm_storeu_si128((__m128i *)sc->state + j + 12, B[j]); + _mm_storeu_si128((__m128i *)sc->state + j + 28, C[j]); } #undef M } -void mshabal_init_neon(mshabal_context *sc, unsigned out_size) { +void mshabal_init_neon(mshabal128_context *sc, unsigned out_size) { unsigned u; memset(sc->state, 0, sizeof sc->state); @@ -248,7 +248,7 @@ void mshabal_init_neon(mshabal_context *sc, unsigned out_size) { sc->out_size = out_size; } -void mshabal_neon(mshabal_context *sc, const void *data0, const void *data1, const void *data2, +void mshabal_neon(mshabal128_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, size_t len) { size_t ptr, num; @@ -288,22 +288,21 @@ void mshabal_neon(mshabal_context *sc, const void *data0, const void *data1, con memcpy(sc->buf2 + ptr, data2, clen); memcpy(sc->buf3 + ptr, data3, clen); mshabal_compress_neon(sc, sc->buf0, sc->buf1, sc->buf2, sc->buf3, 1); - data0 = (const unsigned char*)data0 + clen; - data1 = (const unsigned char*)data1 + clen; - data2 = (const unsigned char*)data2 + clen; - data3 = (const unsigned char*)data3 + clen; + data0 = (const unsigned char *)data0 + clen; + data1 = (const unsigned char *)data1 + clen; + data2 = (const unsigned char *)data2 + clen; + data3 = (const unsigned char *)data3 + clen; len -= clen; } } num = len >> 6; if (num != 0) { - mshabal_compress_neon(sc, (const unsigned char*)data0, (const unsigned char*)data1, - (const unsigned char*)data2, (const unsigned char*)data3, num); - data0 = (const unsigned char*)data0 + (num << 6); - data1 = (const unsigned char*)data1 + (num << 6); - data2 = (const unsigned char*)data2 + (num << 6); - data3 = (const unsigned char*)data3 + (num << 6); + mshabal_compress_neon(sc,data0, data1, data2, data3, num); + data0 = (const unsigned char *)data0 + (num << 6); + data1 = (const unsigned char *)data1 + (num << 6); + data2 = (const unsigned char *)data2 + (num << 6); + data3 = (const unsigned char *)data3 + (num << 6); } len &= 63; memcpy(sc->buf0, data0, len); @@ -313,7 +312,7 @@ void mshabal_neon(mshabal_context *sc, const void *data0, const void *data1, con sc->ptr = len; } -void mshabal_close_neon(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, +void mshabal_close_neon(mshabal128_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned n, void *dst0, void *dst1, void *dst2, void *dst3) { size_t ptr, off; unsigned z, out_size_w32; @@ -365,8 +364,303 @@ void mshabal_close_neon(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigne } } +// Shabal routine optimized for plotting and hashing +void mshabal_hash_fast_neon(mshabal128_context_fast *sc, void *message, void *termination, + void *dst, unsigned num) { + union input { + u32 words[16 * MSHABAL128_VECTOR_SIZE]; + __m128i data[16]; + }; + size_t j; + __m128i A[12], B[16], C[16]; + __m128i one; + + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); + for (j = 0; j < 16; j++) { + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); + } + one = _mm_set1_epi32(C32(0xFFFFFFFF)); + + // round 1 +#define M(i) _mm_load_si128((__m128i *)message + i) + + while (num-- > 0) { + for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); + + A[0] = _mm_xor_si128(A[0], _mm_set1_epi32(sc->Wlow)); + A[1] = _mm_xor_si128(A[1], _mm_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm_or_si128(_mm_slli_epi32(B[j], 17), _mm_srli_epi32(B[j], 15)); + +#define PP(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \ + do { \ + __m128i tt; \ + tt = _mm_or_si128(_mm_slli_epi32(xa1, 15), _mm_srli_epi32(xa1, 17)); \ + tt = _mm_add_epi32(_mm_slli_epi32(tt, 2), tt); \ + tt = _mm_xor_si128(_mm_xor_si128(xa0, tt), xc); \ + tt = _mm_add_epi32(_mm_slli_epi32(tt, 1), tt); \ + tt = _mm_xor_si128(_mm_xor_si128(tt, xb1), _mm_xor_si128(_mm_andnot_si128(xb3, xb2), xm)); \ + xa0 = tt; \ + tt = xb0; \ + tt = _mm_or_si128(_mm_slli_epi32(tt, 1), _mm_srli_epi32(tt, 31)); \ + xb0 = _mm_xor_si128(tt, _mm_xor_si128(xa0, one)); \ + } while (0) + + PP(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + A[0xB] = _mm_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm_add_epi32(A[0x0], C[0x3]); + +#define SWAP_AND_SUB(xb, xc, xm) \ + do { \ + __m128i tmp; \ + tmp = xb; \ + xb = _mm_sub_epi32(xc, xm); \ + xc = tmp; \ + } while (0) + + SWAP_AND_SUB(B[0x0], C[0x0], M(0x0)); + SWAP_AND_SUB(B[0x1], C[0x1], M(0x1)); + SWAP_AND_SUB(B[0x2], C[0x2], M(0x2)); + SWAP_AND_SUB(B[0x3], C[0x3], M(0x3)); + SWAP_AND_SUB(B[0x4], C[0x4], M(0x4)); + SWAP_AND_SUB(B[0x5], C[0x5], M(0x5)); + SWAP_AND_SUB(B[0x6], C[0x6], M(0x6)); + SWAP_AND_SUB(B[0x7], C[0x7], M(0x7)); + SWAP_AND_SUB(B[0x8], C[0x8], M(0x8)); + SWAP_AND_SUB(B[0x9], C[0x9], M(0x9)); + SWAP_AND_SUB(B[0xA], C[0xA], M(0xA)); + SWAP_AND_SUB(B[0xB], C[0xB], M(0xB)); + SWAP_AND_SUB(B[0xC], C[0xC], M(0xC)); + SWAP_AND_SUB(B[0xD], C[0xD], M(0xD)); + SWAP_AND_SUB(B[0xE], C[0xE], M(0xE)); + SWAP_AND_SUB(B[0xF], C[0xF], M(0xF)); + + // move data pointer + message = (__m128i *)message + 16; + + if (++sc->Wlow == 0) sc->Whigh++; + } + + // round 2-5 +#define M2(i) _mm_load_si128((__m128i *)termination + i) + + for (int k = 0; k < 4; k++) { + for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M2(j)); + + A[0] = _mm_xor_si128(A[0], _mm_set1_epi32(sc->Wlow)); + A[1] = _mm_xor_si128(A[1], _mm_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm_or_si128(_mm_slli_epi32(B[j], 17), _mm_srli_epi32(B[j], 15)); + + PP(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + A[0xB] = _mm_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm_add_epi32(A[0x0], C[0x3]); + + SWAP_AND_SUB(B[0x0], C[0x0], M2(0x0)); + SWAP_AND_SUB(B[0x1], C[0x1], M2(0x1)); + SWAP_AND_SUB(B[0x2], C[0x2], M2(0x2)); + SWAP_AND_SUB(B[0x3], C[0x3], M2(0x3)); + SWAP_AND_SUB(B[0x4], C[0x4], M2(0x4)); + SWAP_AND_SUB(B[0x5], C[0x5], M2(0x5)); + SWAP_AND_SUB(B[0x6], C[0x6], M2(0x6)); + SWAP_AND_SUB(B[0x7], C[0x7], M2(0x7)); + SWAP_AND_SUB(B[0x8], C[0x8], M2(0x8)); + SWAP_AND_SUB(B[0x9], C[0x9], M2(0x9)); + SWAP_AND_SUB(B[0xA], C[0xA], M2(0xA)); + SWAP_AND_SUB(B[0xB], C[0xB], M2(0xB)); + SWAP_AND_SUB(B[0xC], C[0xC], M2(0xC)); + SWAP_AND_SUB(B[0xD], C[0xD], M2(0xD)); + SWAP_AND_SUB(B[0xE], C[0xE], M2(0xE)); + SWAP_AND_SUB(B[0xF], C[0xF], M2(0xF)); + + if (++sc->Wlow == 0) sc->Whigh++; + + if (sc->Wlow-- == 0) sc->Whigh--; + } + + // download SIMD aligned hashes + for (j = 0; j < 8; j++) { + _mm_storeu_si128((__m128i *)dst + j, C[j + 8]); + } + + // reset Wlow & Whigh + sc->Wlow = 1; + sc->Whigh = 0; +} + // Shabal routine optimized for mining -void mshabal_deadline_fast_neon(mshabal_context_fast *sc, void *message, void *termination, void *dst0, +void mshabal_deadline_fast_neon(mshabal128_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3) { union input { u32 words[16 * MSHABAL128_VECTOR_SIZE]; @@ -376,14 +670,14 @@ void mshabal_deadline_fast_neon(mshabal_context_fast *sc, void *message, void *t __m128i A[12], B[16], C[16]; __m128i one; - for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i*)sc->state + j); + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); for (j = 0; j < 16; j++) { - B[j] = _mm_loadu_si128((__m128i*)sc->state + j + 12); - C[j] = _mm_loadu_si128((__m128i*)sc->state + j + 28); + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); } one = _mm_set1_epi32(C32(0xFFFFFFFF)); - // round 1/5 + // round 1 #define M(i) _mm_load_si128((__m128i *)message + i) for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); @@ -646,8 +940,8 @@ void mshabal_deadline_fast_neon(mshabal_context_fast *sc, void *message, void *t // download SIMD aligned deadlines u32 simd_dst[8]; - _mm_storeu_si128((__m128i*)&simd_dst[0], C[8]); - _mm_storeu_si128((__m128i*)&simd_dst[4], C[9]); + _mm_storeu_si128((__m128i *)&simd_dst[0], C[8]); + _mm_storeu_si128((__m128i *)&simd_dst[4], C[9]); // unpack SIMD data unsigned z; diff --git a/src/c/mshabal_128_neon.h b/src/c/mshabal_128_neon.h index f72a528..d230981 100644 --- a/src/c/mshabal_128_neon.h +++ b/src/c/mshabal_128_neon.h @@ -97,14 +97,14 @@ typedef struct { mshabal_u32 state[(12 + 16 + 16) * MSHABAL128_VECTOR_SIZE]; mshabal_u32 Whigh, Wlow; unsigned out_size; -} mshabal_context; +} mshabal128_context; #pragma pack(1) typedef struct { mshabal_u32 state[(12 + 16 + 16) * MSHABAL128_VECTOR_SIZE]; mshabal_u32 Whigh, Wlow; unsigned out_size; -} mshabal_context_fast; +} mshabal128_context_fast; #pragma pack() /* @@ -112,8 +112,7 @@ typedef struct { * of 32, between 32 and 512 (inclusive). The output size is expressed * in bits. */ -void mshabal_init_neon(mshabal_context *sc, unsigned out_size); - +void mshabal_init_neon(mshabal128_context *sc, unsigned out_size); /* * Process some more data bytes; four chunks of data, pointed to by @@ -127,7 +126,7 @@ void mshabal_init_neon(mshabal_context *sc, unsigned out_size); * corresponding instance is deactivated (the final value obtained from * that instance is undefined). */ -void eon_mshabal_neon(mshabal_context *sc, const void *data0, const void *data1, const void *data2, +void mshabal_neon(mshabal128_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, size_t len); /* @@ -152,15 +151,22 @@ void eon_mshabal_neon(mshabal_context *sc, const void *data0, const void *data1, * release it, or reinitialize it with mshabal_init(). The mshabal_close() * function does NOT imply a hidden call to mshabal_init(). */ -void mshabal_close_neon(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, +void mshabal_close_neon(mshabal128_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned n, void *dst0, void *dst1, void *dst2, void *dst3); /* - * optimised Shabal Routine for PoC Mining + * optimised Shabal routine for PoC mining */ -void mshabal_deadline_fast_neon(mshabal_context_fast *sc, void *u1, void *u2, void *dst0, +void mshabal_deadline_fast_neon(mshabal128_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3); + +/* + * optimised Shabal routine for PoC plotting and hashing + */ +void mshabal_hash_fast_neon(mshabal128_context_fast *sc, void *message, void *termination, + void *dst, unsigned num); + #ifdef __cplusplus } #endif diff --git a/src/c/mshabal_128_sse2.c b/src/c/mshabal_128_sse2.c index 8e865e6..e147a75 100644 --- a/src/c/mshabal_128_sse2.c +++ b/src/c/mshabal_128_sse2.c @@ -35,7 +35,7 @@ typedef mshabal_u32 u32; #define T32(x) ((x)&C32(0xFFFFFFFF)) #define ROTL32(x, n) T32(((x) << (n)) | ((x) >> (32 - (n)))) -static void mshabal_compress_sse2(mshabal_context *sc, const unsigned char *buf0, +static void mshabal_compress_sse2(mshabal128_context *sc, const unsigned char *buf0, const unsigned char *buf1, const unsigned char *buf2, const unsigned char *buf3, size_t num) { union { @@ -46,10 +46,10 @@ static void mshabal_compress_sse2(mshabal_context *sc, const unsigned char *buf0 __m128i A[12], B[16], C[16]; __m128i one; - for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i*)sc->state + j); + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); for (j = 0; j < 16; j++) { - B[j] = _mm_loadu_si128((__m128i*)sc->state + j + 12); - C[j] = _mm_loadu_si128((__m128i*)sc->state + j + 28); + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); } one = _mm_set1_epi32(C32(0xFFFFFFFF)); @@ -57,10 +57,10 @@ static void mshabal_compress_sse2(mshabal_context *sc, const unsigned char *buf0 while (num-- > 0) { for (j = 0; j < 16 * MSHABAL128_VECTOR_SIZE; j += MSHABAL128_VECTOR_SIZE) { - u.words[j + 0] = *(u32*)(buf0 + j); - u.words[j + 1] = *(u32*)(buf1 + j); - u.words[j + 2] = *(u32*)(buf2 + j); - u.words[j + 3] = *(u32*)(buf3 + j); + u.words[j + 0] = *(u32 *)(buf0 + j); + u.words[j + 1] = *(u32 *)(buf1 + j); + u.words[j + 2] = *(u32 *)(buf2 + j); + u.words[j + 3] = *(u32 *)(buf3 + j); } for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); @@ -205,15 +205,15 @@ static void mshabal_compress_sse2(mshabal_context *sc, const unsigned char *buf0 if (++sc->Wlow == 0) sc->Whigh++; } - for (j = 0; j < 12; j++) _mm_storeu_si128((__m128i*)sc->state + j, A[j]); + for (j = 0; j < 12; j++) _mm_storeu_si128((__m128i *)sc->state + j, A[j]); for (j = 0; j < 16; j++) { - _mm_storeu_si128((__m128i*)sc->state + j + 12, B[j]); - _mm_storeu_si128((__m128i*)sc->state + j + 28, C[j]); + _mm_storeu_si128((__m128i *)sc->state + j + 12, B[j]); + _mm_storeu_si128((__m128i *)sc->state + j + 28, C[j]); } #undef M } -void mshabal_init_sse2(mshabal_context *sc, unsigned out_size) { +void mshabal_init_sse2(mshabal128_context *sc, unsigned out_size) { unsigned u; memset(sc->state, 0, sizeof sc->state); @@ -248,7 +248,7 @@ void mshabal_init_sse2(mshabal_context *sc, unsigned out_size) { sc->out_size = out_size; } -void mshabal_sse2(mshabal_context *sc, const void *data0, const void *data1, const void *data2, +void mshabal_sse2(mshabal128_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, size_t len) { size_t ptr, num; @@ -288,22 +288,21 @@ void mshabal_sse2(mshabal_context *sc, const void *data0, const void *data1, con memcpy(sc->buf2 + ptr, data2, clen); memcpy(sc->buf3 + ptr, data3, clen); mshabal_compress_sse2(sc, sc->buf0, sc->buf1, sc->buf2, sc->buf3, 1); - data0 = (const unsigned char*)data0 + clen; - data1 = (const unsigned char*)data1 + clen; - data2 = (const unsigned char*)data2 + clen; - data3 = (const unsigned char*)data3 + clen; + data0 = (const unsigned char *)data0 + clen; + data1 = (const unsigned char *)data1 + clen; + data2 = (const unsigned char *)data2 + clen; + data3 = (const unsigned char *)data3 + clen; len -= clen; } } num = len >> 6; if (num != 0) { - mshabal_compress_sse2(sc, (const unsigned char*)data0, (const unsigned char*)data1, - (const unsigned char*)data2, (const unsigned char*)data3, num); - data0 = (const unsigned char*)data0 + (num << 6); - data1 = (const unsigned char*)data1 + (num << 6); - data2 = (const unsigned char*)data2 + (num << 6); - data3 = (const unsigned char*)data3 + (num << 6); + mshabal_compress_sse2(sc, data0, data1, data2, data3, num); + data0 = (const unsigned char *)data0 + (num << 6); + data1 = (const unsigned char *)data1 + (num << 6); + data2 = (const unsigned char *)data2 + (num << 6); + data3 = (const unsigned char *)data3 + (num << 6); } len &= 63; memcpy(sc->buf0, data0, len); @@ -313,7 +312,7 @@ void mshabal_sse2(mshabal_context *sc, const void *data0, const void *data1, con sc->ptr = len; } -void mshabal_close_sse2(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, +void mshabal_close_sse2(mshabal128_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned n, void *dst0, void *dst1, void *dst2, void *dst3) { size_t ptr, off; unsigned z, out_size_w32; @@ -365,8 +364,303 @@ void mshabal_close_sse2(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigne } } +// Shabal routine optimized for plotting and hashing +void mshabal_hash_fast_sse2(mshabal128_context_fast *sc, void *message, void *termination, + void *dst, unsigned num) { + union input { + u32 words[16 * MSHABAL128_VECTOR_SIZE]; + __m128i data[16]; + }; + size_t j; + __m128i A[12], B[16], C[16]; + __m128i one; + + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); + for (j = 0; j < 16; j++) { + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); + } + one = _mm_set1_epi32(C32(0xFFFFFFFF)); + + // round 1 +#define M(i) _mm_load_si128((__m128i *)message + i) + + while (num-- > 0) { + for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); + + A[0] = _mm_xor_si128(A[0], _mm_set1_epi32(sc->Wlow)); + A[1] = _mm_xor_si128(A[1], _mm_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm_or_si128(_mm_slli_epi32(B[j], 17), _mm_srli_epi32(B[j], 15)); + +#define PP(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \ + do { \ + __m128i tt; \ + tt = _mm_or_si128(_mm_slli_epi32(xa1, 15), _mm_srli_epi32(xa1, 17)); \ + tt = _mm_add_epi32(_mm_slli_epi32(tt, 2), tt); \ + tt = _mm_xor_si128(_mm_xor_si128(xa0, tt), xc); \ + tt = _mm_add_epi32(_mm_slli_epi32(tt, 1), tt); \ + tt = _mm_xor_si128(_mm_xor_si128(tt, xb1), _mm_xor_si128(_mm_andnot_si128(xb3, xb2), xm)); \ + xa0 = tt; \ + tt = xb0; \ + tt = _mm_or_si128(_mm_slli_epi32(tt, 1), _mm_srli_epi32(tt, 31)); \ + xb0 = _mm_xor_si128(tt, _mm_xor_si128(xa0, one)); \ + } while (0) + + PP(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + A[0xB] = _mm_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm_add_epi32(A[0x0], C[0x3]); + +#define SWAP_AND_SUB(xb, xc, xm) \ + do { \ + __m128i tmp; \ + tmp = xb; \ + xb = _mm_sub_epi32(xc, xm); \ + xc = tmp; \ + } while (0) + + SWAP_AND_SUB(B[0x0], C[0x0], M(0x0)); + SWAP_AND_SUB(B[0x1], C[0x1], M(0x1)); + SWAP_AND_SUB(B[0x2], C[0x2], M(0x2)); + SWAP_AND_SUB(B[0x3], C[0x3], M(0x3)); + SWAP_AND_SUB(B[0x4], C[0x4], M(0x4)); + SWAP_AND_SUB(B[0x5], C[0x5], M(0x5)); + SWAP_AND_SUB(B[0x6], C[0x6], M(0x6)); + SWAP_AND_SUB(B[0x7], C[0x7], M(0x7)); + SWAP_AND_SUB(B[0x8], C[0x8], M(0x8)); + SWAP_AND_SUB(B[0x9], C[0x9], M(0x9)); + SWAP_AND_SUB(B[0xA], C[0xA], M(0xA)); + SWAP_AND_SUB(B[0xB], C[0xB], M(0xB)); + SWAP_AND_SUB(B[0xC], C[0xC], M(0xC)); + SWAP_AND_SUB(B[0xD], C[0xD], M(0xD)); + SWAP_AND_SUB(B[0xE], C[0xE], M(0xE)); + SWAP_AND_SUB(B[0xF], C[0xF], M(0xF)); + + // move data pointer + message = (__m128i *)message + 16; + + if (++sc->Wlow == 0) sc->Whigh++; + } + + // round 2-5 +#define M2(i) _mm_load_si128((__m128i *)termination + i) + + for (int k = 0; k < 4; k++) { + for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M2(j)); + + A[0] = _mm_xor_si128(A[0], _mm_set1_epi32(sc->Wlow)); + A[1] = _mm_xor_si128(A[1], _mm_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm_or_si128(_mm_slli_epi32(B[j], 17), _mm_srli_epi32(B[j], 15)); + + PP(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + A[0xB] = _mm_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm_add_epi32(A[0x0], C[0x3]); + + SWAP_AND_SUB(B[0x0], C[0x0], M2(0x0)); + SWAP_AND_SUB(B[0x1], C[0x1], M2(0x1)); + SWAP_AND_SUB(B[0x2], C[0x2], M2(0x2)); + SWAP_AND_SUB(B[0x3], C[0x3], M2(0x3)); + SWAP_AND_SUB(B[0x4], C[0x4], M2(0x4)); + SWAP_AND_SUB(B[0x5], C[0x5], M2(0x5)); + SWAP_AND_SUB(B[0x6], C[0x6], M2(0x6)); + SWAP_AND_SUB(B[0x7], C[0x7], M2(0x7)); + SWAP_AND_SUB(B[0x8], C[0x8], M2(0x8)); + SWAP_AND_SUB(B[0x9], C[0x9], M2(0x9)); + SWAP_AND_SUB(B[0xA], C[0xA], M2(0xA)); + SWAP_AND_SUB(B[0xB], C[0xB], M2(0xB)); + SWAP_AND_SUB(B[0xC], C[0xC], M2(0xC)); + SWAP_AND_SUB(B[0xD], C[0xD], M2(0xD)); + SWAP_AND_SUB(B[0xE], C[0xE], M2(0xE)); + SWAP_AND_SUB(B[0xF], C[0xF], M2(0xF)); + + if (++sc->Wlow == 0) sc->Whigh++; + + if (sc->Wlow-- == 0) sc->Whigh--; + } + + // download SIMD aligned hashes + for (j = 0; j < 8; j++) { + _mm_storeu_si128((__m128i *)dst + j, C[j + 8]); + } + + // reset Wlow & Whigh + sc->Wlow = 1; + sc->Whigh = 0; +} + // Shabal routine optimized for mining -void mshabal_deadline_fast_sse2(mshabal_context_fast *sc, void *message, void *termination, void *dst0, +void mshabal_deadline_fast_sse2(mshabal128_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3) { union input { u32 words[16 * MSHABAL128_VECTOR_SIZE]; @@ -376,14 +670,14 @@ void mshabal_deadline_fast_sse2(mshabal_context_fast *sc, void *message, void *t __m128i A[12], B[16], C[16]; __m128i one; - for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i*)sc->state + j); + for (j = 0; j < 12; j++) A[j] = _mm_loadu_si128((__m128i *)sc->state + j); for (j = 0; j < 16; j++) { - B[j] = _mm_loadu_si128((__m128i*)sc->state + j + 12); - C[j] = _mm_loadu_si128((__m128i*)sc->state + j + 28); + B[j] = _mm_loadu_si128((__m128i *)sc->state + j + 12); + C[j] = _mm_loadu_si128((__m128i *)sc->state + j + 28); } one = _mm_set1_epi32(C32(0xFFFFFFFF)); - // round 1/5 + // round 1 #define M(i) _mm_load_si128((__m128i *)message + i) for (j = 0; j < 16; j++) B[j] = _mm_add_epi32(B[j], M(j)); @@ -646,8 +940,8 @@ void mshabal_deadline_fast_sse2(mshabal_context_fast *sc, void *message, void *t // download SIMD aligned deadlines u32 simd_dst[8]; - _mm_storeu_si128((__m128i*)&simd_dst[0], C[8]); - _mm_storeu_si128((__m128i*)&simd_dst[4], C[9]); + _mm_storeu_si128((__m128i *)&simd_dst[0], C[8]); + _mm_storeu_si128((__m128i *)&simd_dst[4], C[9]); // unpack SIMD data unsigned z; diff --git a/src/c/mshabal_128_sse2.h b/src/c/mshabal_128_sse2.h index 32cf50e..5874469 100644 --- a/src/c/mshabal_128_sse2.h +++ b/src/c/mshabal_128_sse2.h @@ -97,14 +97,14 @@ typedef struct { mshabal_u32 state[(12 + 16 + 16) * MSHABAL128_VECTOR_SIZE]; mshabal_u32 Whigh, Wlow; unsigned out_size; -} mshabal_context; +} mshabal128_context; #pragma pack(1) typedef struct { mshabal_u32 state[(12 + 16 + 16) * MSHABAL128_VECTOR_SIZE]; mshabal_u32 Whigh, Wlow; unsigned out_size; -} mshabal_context_fast; +} mshabal128_context_fast; #pragma pack() /* @@ -112,7 +112,7 @@ typedef struct { * of 32, between 32 and 512 (inclusive). The output size is expressed * in bits. */ -void mshabal_init_sse2(mshabal_context *sc, unsigned out_size); +void mshabal_init_sse2(mshabal128_context *sc, unsigned out_size); /* * Process some more data bytes; four chunks of data, pointed to by @@ -126,7 +126,7 @@ void mshabal_init_sse2(mshabal_context *sc, unsigned out_size); * corresponding instance is deactivated (the final value obtained from * that instance is undefined). */ -void mshabal_sse2(mshabal_context *sc, const void *data0, const void *data1, const void *data2, +void mshabal_sse2(mshabal128_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, size_t len); /* @@ -151,15 +151,22 @@ void mshabal_sse2(mshabal_context *sc, const void *data0, const void *data1, con * release it, or reinitialize it with mshabal_init(). The mshabal_close() * function does NOT imply a hidden call to mshabal_init(). */ -void mshabal_close_sse2(mshabal_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, +void mshabal_close_sse2(mshabal128_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned n, void *dst0, void *dst1, void *dst2, void *dst3); /* - * optimised Shabal Routine for PoC Mining + * optimised Shabal routine for PoC plotting and hashing */ -void mshabal_deadline_fast_sse2(mshabal_context_fast *sc, void *message, void *termination, void *dst0, +void mshabal_hash_fast_sse2(mshabal128_context_fast *sc, void *message, void *termination, + void *dst, unsigned num); + +/* + * optimised Shabal routine for PoC mining + */ +void mshabal_deadline_fast_sse2(mshabal128_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3); + #ifdef __cplusplus } #endif diff --git a/src/c/mshabal_256_avx2.c b/src/c/mshabal_256_avx2.c index c08b04e..2081c09 100644 --- a/src/c/mshabal_256_avx2.c +++ b/src/c/mshabal_256_avx2.c @@ -3,7 +3,7 @@ * compiles and runs on x86 architectures, in 32-bit or 64-bit mode, * which possess a AVX2-compatible SIMD unit. * - * + * * (c) 2010 SAPHIR project. This software is provided 'as-is', without * any epxress or implied warranty. In no event will the authors be held * liable for any damages arising from the use of this software. @@ -36,10 +36,10 @@ typedef mshabal_u32 u32; #define ROTL32(x, n) T32(((x) << (n)) | ((x) >> (32 - (n)))) static void mshabal_compress_avx2(mshabal256_context *sc, const unsigned char *buf0, - const unsigned char *buf1, const unsigned char *buf2, - const unsigned char *buf3, const unsigned char *buf4, - const unsigned char *buf5, const unsigned char *buf6, - const unsigned char *buf7, size_t num) { + const unsigned char *buf1, const unsigned char *buf2, + const unsigned char *buf3, const unsigned char *buf4, + const unsigned char *buf5, const unsigned char *buf6, + const unsigned char *buf7, size_t num) { union { u32 words[16 * MSHABAL256_VECTOR_SIZE]; __m256i data[16]; @@ -278,13 +278,13 @@ void mshabal_init_avx2(mshabal256_context *sc, unsigned out_size) { sc->buf7[4 * u + 1] = (out_size + u + 16) >> 8; } mshabal_compress_avx2(sc, sc->buf0, sc->buf1, sc->buf2, sc->buf3, sc->buf4, sc->buf5, - sc->buf6, sc->buf7, 1); + sc->buf6, sc->buf7, 1); sc->ptr = 0; sc->out_size = out_size; } -void mshabal_avx2(mshabal256_context* sc, const void* data0, const void* data1, const void* data2, const void* data3, - const void* data4, const void* data5, const void* data6, const void* data7, size_t len) { +void mshabal_avx2(mshabal256_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, + const void *data4, const void *data5, const void *data6, const void *data7, size_t len) { size_t ptr, num; if (data0 == NULL) { @@ -351,7 +351,7 @@ void mshabal_avx2(mshabal256_context* sc, const void* data0, const void* data1, memcpy(sc->buf6 + ptr, data6, clen); memcpy(sc->buf7 + ptr, data7, clen); mshabal_compress_avx2(sc, sc->buf0, sc->buf1, sc->buf2, sc->buf3, sc->buf4, sc->buf5, - sc->buf6, sc->buf7, 1); + sc->buf6, sc->buf7, 1); data0 = (const unsigned char *)data0 + clen; data1 = (const unsigned char *)data1 + clen; data2 = (const unsigned char *)data2 + clen; @@ -366,10 +366,7 @@ void mshabal_avx2(mshabal256_context* sc, const void* data0, const void* data1, num = len >> 6; if (num != 0) { - mshabal_compress_avx2(sc, (const unsigned char *)data0, (const unsigned char *)data1, - (const unsigned char *)data2, (const unsigned char *)data3, - (const unsigned char *)data4, (const unsigned char *)data5, - (const unsigned char *)data6, (const unsigned char *)data7, num); + mshabal_compress_avx2(sc, data0, data1, data2, data3, data4, data5, data6, data7, num); data0 = (const unsigned char *)data0 + (num << 6); data1 = (const unsigned char *)data1 + (num << 6); data2 = (const unsigned char *)data2 + (num << 6); @@ -391,7 +388,7 @@ void mshabal_avx2(mshabal256_context* sc, const void* data0, const void* data1, sc->ptr = len; } -void mshabal256_close_avx2(mshabal256_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, +void mshabal_close_avx2(mshabal256_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned ub4, unsigned ub5, unsigned ub6, unsigned ub7, unsigned n, void *dst0, void *dst1, void *dst2, void *dst3, void *dst4, void *dst5, void *dst6, void *dst7) { @@ -482,9 +479,9 @@ void mshabal256_close_avx2(mshabal256_context *sc, unsigned ub0, unsigned ub1, u } } -// Shabal routine optimized for mining -void mshabal_deadline_fast_avx2(mshabal256_context_fast* sc, void* message, void* termination, void* dst0, void* dst1, void* dst2, - void* dst3, void* dst4, void* dst5, void* dst6, void* dst7) { +// Shabal routines optimized for plotting and hashing +void mshabal_hash_fast_avx2(mshabal256_context_fast *sc, void *message, void *termination, + void *dst, unsigned num) { union input { u32 words[16 * MSHABAL256_VECTOR_SIZE]; __m256i data[16]; @@ -501,7 +498,303 @@ void mshabal_deadline_fast_avx2(mshabal256_context_fast* sc, void* message, void } one = _mm256_set1_epi32(C32(0xFFFFFFFF)); - // round 1/5 + // round 1 +#define M(i) _mm256_loadu_si256((__m256i *)message + i) + + while (num-- > 0) { + for (j = 0; j < 16; j++) B[j] = _mm256_add_epi32(B[j], M(j)); + + A[0] = _mm256_xor_si256(A[0], _mm256_set1_epi32(sc->Wlow)); + A[1] = _mm256_xor_si256(A[1], _mm256_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm256_or_si256(_mm256_slli_epi32(B[j], 17), _mm256_srli_epi32(B[j], 15)); + +#define PP256(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \ + do { \ + __m256i tt; \ + tt = _mm256_or_si256(_mm256_slli_epi32(xa1, 15), _mm256_srli_epi32(xa1, 17)); \ + tt = _mm256_add_epi32(_mm256_slli_epi32(tt, 2), tt); \ + tt = _mm256_xor_si256(_mm256_xor_si256(xa0, tt), xc); \ + tt = _mm256_add_epi32(_mm256_slli_epi32(tt, 1), tt); \ + tt = _mm256_xor_si256(_mm256_xor_si256(tt, xb1), \ + _mm256_xor_si256(_mm256_andnot_si256(xb3, xb2), xm)); \ + xa0 = tt; \ + tt = xb0; \ + tt = _mm256_or_si256(_mm256_slli_epi32(tt, 1), _mm256_srli_epi32(tt, 31)); \ + xb0 = _mm256_xor_si256(tt, _mm256_xor_si256(xa0, one)); \ + } while (0) + + PP256(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP256(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP256(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP256(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP256(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP256(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP256(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP256(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP256(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP256(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP256(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP256(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP256(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP256(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP256(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP256(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP256(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP256(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP256(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP256(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP256(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP256(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP256(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP256(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP256(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP256(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP256(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP256(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP256(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP256(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP256(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP256(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP256(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP256(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP256(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP256(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP256(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP256(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP256(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP256(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP256(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP256(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP256(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP256(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP256(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP256(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP256(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP256(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + A[0xB] = _mm256_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm256_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm256_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm256_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm256_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm256_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm256_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm256_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm256_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm256_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm256_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm256_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm256_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm256_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm256_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm256_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm256_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm256_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm256_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm256_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm256_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm256_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm256_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm256_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm256_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm256_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm256_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm256_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm256_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm256_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm256_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm256_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm256_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm256_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm256_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm256_add_epi32(A[0x0], C[0x3]); + +#define SWAP_AND_SUB256(xb, xc, xm) \ + do { \ + __m256i tmp; \ + tmp = xb; \ + xb = _mm256_sub_epi32(xc, xm); \ + xc = tmp; \ + } while (0) + + SWAP_AND_SUB256(B[0x0], C[0x0], M(0x0)); + SWAP_AND_SUB256(B[0x1], C[0x1], M(0x1)); + SWAP_AND_SUB256(B[0x2], C[0x2], M(0x2)); + SWAP_AND_SUB256(B[0x3], C[0x3], M(0x3)); + SWAP_AND_SUB256(B[0x4], C[0x4], M(0x4)); + SWAP_AND_SUB256(B[0x5], C[0x5], M(0x5)); + SWAP_AND_SUB256(B[0x6], C[0x6], M(0x6)); + SWAP_AND_SUB256(B[0x7], C[0x7], M(0x7)); + SWAP_AND_SUB256(B[0x8], C[0x8], M(0x8)); + SWAP_AND_SUB256(B[0x9], C[0x9], M(0x9)); + SWAP_AND_SUB256(B[0xA], C[0xA], M(0xA)); + SWAP_AND_SUB256(B[0xB], C[0xB], M(0xB)); + SWAP_AND_SUB256(B[0xC], C[0xC], M(0xC)); + SWAP_AND_SUB256(B[0xD], C[0xD], M(0xD)); + SWAP_AND_SUB256(B[0xE], C[0xE], M(0xE)); + SWAP_AND_SUB256(B[0xF], C[0xF], M(0xF)); + + // move data pointer + message = (__m256i *)message + 16; + + if (++sc->Wlow == 0) sc->Whigh++; + } + + // round 2-5 +#define M2(i) _mm256_load_si256((__m256i *)termination + i) + + for (int k = 0; k < 4; k++) { + for (j = 0; j < 16; j++) B[j] = _mm256_add_epi32(B[j], M2(j)); + + A[0] = _mm256_xor_si256(A[0], _mm256_set1_epi32(sc->Wlow)); + A[1] = _mm256_xor_si256(A[1], _mm256_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm256_or_si256(_mm256_slli_epi32(B[j], 17), _mm256_srli_epi32(B[j], 15)); + + PP256(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP256(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP256(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP256(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP256(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP256(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP256(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP256(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP256(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP256(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP256(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP256(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP256(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP256(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP256(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP256(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP256(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP256(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP256(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP256(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP256(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP256(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP256(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP256(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP256(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP256(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP256(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP256(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP256(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP256(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP256(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP256(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP256(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP256(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP256(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP256(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP256(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP256(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP256(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP256(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP256(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP256(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP256(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP256(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP256(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP256(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP256(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP256(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + A[0xB] = _mm256_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm256_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm256_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm256_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm256_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm256_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm256_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm256_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm256_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm256_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm256_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm256_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm256_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm256_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm256_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm256_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm256_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm256_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm256_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm256_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm256_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm256_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm256_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm256_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm256_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm256_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm256_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm256_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm256_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm256_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm256_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm256_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm256_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm256_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm256_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm256_add_epi32(A[0x0], C[0x3]); + + SWAP_AND_SUB256(B[0x0], C[0x0], M2(0x0)); + SWAP_AND_SUB256(B[0x1], C[0x1], M2(0x1)); + SWAP_AND_SUB256(B[0x2], C[0x2], M2(0x2)); + SWAP_AND_SUB256(B[0x3], C[0x3], M2(0x3)); + SWAP_AND_SUB256(B[0x4], C[0x4], M2(0x4)); + SWAP_AND_SUB256(B[0x5], C[0x5], M2(0x5)); + SWAP_AND_SUB256(B[0x6], C[0x6], M2(0x6)); + SWAP_AND_SUB256(B[0x7], C[0x7], M2(0x7)); + SWAP_AND_SUB256(B[0x8], C[0x8], M2(0x8)); + SWAP_AND_SUB256(B[0x9], C[0x9], M2(0x9)); + SWAP_AND_SUB256(B[0xA], C[0xA], M2(0xA)); + SWAP_AND_SUB256(B[0xB], C[0xB], M2(0xB)); + SWAP_AND_SUB256(B[0xC], C[0xC], M2(0xC)); + SWAP_AND_SUB256(B[0xD], C[0xD], M2(0xD)); + SWAP_AND_SUB256(B[0xE], C[0xE], M2(0xE)); + SWAP_AND_SUB256(B[0xF], C[0xF], M2(0xF)); + + if (++sc->Wlow == 0) sc->Whigh++; + + if (sc->Wlow-- == 0) sc->Whigh--; + } + + // download SIMD aligned hashes + for (j = 0; j < 8; j++) { + _mm256_storeu_si256((__m256i *)dst + j, C[j+8]); + } + + // reset Wlow & Whigh + sc->Wlow = 1; + sc->Whigh = 0; +} + +// Shabal routine optimized for mining +void mshabal_deadline_fast_avx2(mshabal256_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, + void *dst3, void *dst4, void *dst5, void *dst6, void *dst7) { + union input { + u32 words[16 * MSHABAL256_VECTOR_SIZE]; + __m256i data[16]; + }; + size_t j; + __m256i A[12], B[16], C[16]; + __m256i one; + + for (j = 0; j < 12; j++) A[j] = _mm256_loadu_si256((__m256i *)sc->state + j); + for (j = 0; j < 16; j++) { + B[j] = _mm256_loadu_si256((__m256i *)sc->state + j + 12); + C[j] = _mm256_loadu_si256((__m256i *)sc->state + j + 28); + } + one = _mm256_set1_epi32(C32(0xFFFFFFFF)); + + // round 1 #define M(i) _mm256_loadu_si256((__m256i *)message + i) for (j = 0; j < 16; j++) B[j] = _mm256_add_epi32(B[j], M(j)); @@ -766,8 +1059,8 @@ void mshabal_deadline_fast_avx2(mshabal256_context_fast* sc, void* message, void // download SIMD aligned deadlines u32 simd_dst[16]; - _mm256_storeu_si256((__m256i*)&simd_dst[0], C[8]); - _mm256_storeu_si256((__m256i*)&simd_dst[8], C[9]); + _mm256_storeu_si256((__m256i *)&simd_dst[0], C[8]); + _mm256_storeu_si256((__m256i *)&simd_dst[8], C[9]); // unpack SIMD data unsigned z; diff --git a/src/c/mshabal_256_avx2.h b/src/c/mshabal_256_avx2.h index 1306c63..4c0cb38 100644 --- a/src/c/mshabal_256_avx2.h +++ b/src/c/mshabal_256_avx2.h @@ -161,12 +161,17 @@ void mshabal_close_avx2(mshabal256_context *sc, unsigned ub0, unsigned ub1, unsi void *dst5, void *dst6, void *dst7); /* - * optimised Shabal Routine for PoC Mining + * optimised Shabal routine for PoC plotting and hashing + */ +void mshabal256_openclose_fast(mshabal256_context_fast *sc, void *message, void *termination, + void *dst, unsigned len); + +/* + * optimised Shabal routine for PoC mining */ void mshabal_deadline_fast_avx2(mshabal256_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3, void *dst4, void *dst5, void *dst6, void *dst7); - #ifdef __cplusplus } #endif diff --git a/src/c/mshabal_512_avx512f.c b/src/c/mshabal_512_avx512f.c index 3046da0..06c7c7b 100644 --- a/src/c/mshabal_512_avx512f.c +++ b/src/c/mshabal_512_avx512f.c @@ -242,6 +242,7 @@ static void mshabal_compress_avx512f(mshabal512_context *sc, const unsigned char _mm512_storeu_si512((__m512i *)sc->state + j + 12, B[j]); _mm512_storeu_si512((__m512i *)sc->state + j + 28, C[j]); } + #undef M } @@ -344,7 +345,7 @@ void mshabal_init_avx512f(mshabal512_context *sc, unsigned out_size) { sc->out_size = out_size; } -void simd512_mshabal_avx512f(mshabal512_context *sc, const void *data0, const void *data1, const void *data2, +void mshabal_avx512f(mshabal512_context *sc, const void *data0, const void *data1, const void *data2, const void *data3, const void *data4, const void *data5, const void *data6, const void *data7, const void *data8, const void *data9, const void *data10, const void *data11, const void *data12, const void *data13, const void *data14, const void *data15, size_t len) { @@ -472,36 +473,30 @@ void simd512_mshabal_avx512f(mshabal512_context *sc, const void *data0, const vo mshabal_compress_avx512f(sc, sc->buf0, sc->buf1, sc->buf2, sc->buf3, sc->buf4, sc->buf5, sc->buf6, sc->buf7, sc->buf8, sc->buf9, sc->buf10, sc->buf11, sc->buf12, sc->buf13, sc->buf14, sc->buf15, 1); - data0 = (unsigned char *)data0 + clen; - data1 = (unsigned char *)data1 + clen; - data2 = (unsigned char *)data2 + clen; - data3 = (unsigned char *)data3 + clen; - data4 = (unsigned char *)data4 + clen; - data5 = (unsigned char *)data5 + clen; - data6 = (unsigned char *)data6 + clen; - data7 = (unsigned char *)data7 + clen; - data8 = (unsigned char *)data8 + clen; - data9 = (unsigned char *)data9 + clen; - data10 = (unsigned char *)data10 + clen; - data11 = (unsigned char *)data11 + clen; - data12 = (unsigned char *)data12 + clen; - data13 = (unsigned char *)data13 + clen; - data14 = (unsigned char *)data14 + clen; - data15 = (unsigned char *)data15 + clen; + data0 = (const unsigned char *)data0 + clen; + data1 = (const unsigned char *)data1 + clen; + data2 = (const unsigned char *)data2 + clen; + data3 = (const unsigned char *)data3 + clen; + data4 = (const unsigned char *)data4 + clen; + data5 = (const unsigned char *)data5 + clen; + data6 = (const unsigned char *)data6 + clen; + data7 = (const unsigned char *)data7 + clen; + data8 = (const unsigned char *)data8 + clen; + data9 = (const unsigned char *)data9 + clen; + data10 = (const unsigned char *)data10 + clen; + data11 = (const unsigned char *)data11 + clen; + data12 = (const unsigned char *)data12 + clen; + data13 = (const unsigned char *)data13 + clen; + data14 = (const unsigned char *)data14 + clen; + data15 = (const unsigned char *)data15 + clen; len -= clen; } } num = len >> 6; if (num != 0) { - mshabal_compress_avx512f(sc, (const unsigned char *)data0, (const unsigned char *)data1, - (const unsigned char *)data2, (const unsigned char *)data3, - (const unsigned char *)data4, (const unsigned char *)data5, - (const unsigned char *)data6, (const unsigned char *)data7, - (const unsigned char *)data8, (const unsigned char *)data9, - (const unsigned char *)data10, (const unsigned char *)data11, - (const unsigned char *)data12, (const unsigned char *)data13, - (const unsigned char *)data14, (const unsigned char *)data15, num); + mshabal_compress_avx512f(sc, data0, data1, data2, data3, data4, data5, data6, data7, + data8, data9, data10, data11, data12, data13, data14, data15, num); data0 = (const unsigned char *)data0 + (num << 6); data1 = (const unsigned char *)data1 + (num << 6); data2 = (const unsigned char *)data2 + (num << 6); @@ -528,18 +523,18 @@ void simd512_mshabal_avx512f(mshabal512_context *sc, const void *data0, const vo memcpy(sc->buf5, data5, len); memcpy(sc->buf6, data6, len); memcpy(sc->buf7, data7, len); - memcpy(sc->buf0, data8, len); - memcpy(sc->buf1, data9, len); - memcpy(sc->buf2, data10, len); - memcpy(sc->buf3, data11, len); - memcpy(sc->buf4, data12, len); - memcpy(sc->buf5, data13, len); - memcpy(sc->buf6, data14, len); - memcpy(sc->buf7, data15, len); + memcpy(sc->buf8, data8, len); + memcpy(sc->buf9, data9, len); + memcpy(sc->buf10, data10, len); + memcpy(sc->buf11, data11, len); + memcpy(sc->buf12, data12, len); + memcpy(sc->buf13, data13, len); + memcpy(sc->buf14, data14, len); + memcpy(sc->buf15, data15, len); sc->ptr = len; } -void mshabal256_close_avx512f(mshabal512_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, +void mshabal_close_avx512f(mshabal512_context *sc, unsigned ub0, unsigned ub1, unsigned ub2, unsigned ub3, unsigned ub4, unsigned ub5, unsigned ub6, unsigned ub7, unsigned ub8, unsigned ub9, unsigned ub10, unsigned ub11, unsigned ub12, unsigned ub13, unsigned ub14, unsigned ub15, @@ -651,61 +646,357 @@ void mshabal256_close_avx512f(mshabal512_context *sc, unsigned ub0, unsigned ub1 if (dst8 != NULL) { u32 *out; - out = (u32 *)dst0; + out = (u32 *)dst8; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 8]; } if (dst9 != NULL) { u32 *out; - out = (u32 *)dst1; + out = (u32 *)dst9; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 9]; } if (dst10 != NULL) { u32 *out; - out = (u32 *)dst2; + out = (u32 *)dst10; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 10]; } if (dst11 != NULL) { u32 *out; - out = (u32 *)dst3; + out = (u32 *)dst11; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 11]; } if (dst12 != NULL) { u32 *out; - out = (u32 *)dst4; + out = (u32 *)dst12; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 12]; } if (dst13 != NULL) { u32 *out; - out = (u32 *)dst5; + out = (u32 *)dst13; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 13]; } if (dst14 != NULL) { u32 *out; - out = (u32 *)dst6; + out = (u32 *)dst14; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 14]; } if (dst15 != NULL) { u32 *out; - out = (u32 *)dst7; + out = (u32 *)dst15; for (z = 0; z < out_size_w32; z++) out[z] = sc->state[off + z * MSHABAL512_VECTOR_SIZE + 15]; } } +// Shabal routine optimized for plotting and hashing +void mshabal_hash_fast_avx512f(mshabal512_context_fast *sc, void *message, void *termination, + void *dst, unsigned num) { + union input { + u32 words[16 * MSHABAL512_VECTOR_SIZE]; + __m512i data[16]; + }; + size_t j; + __m512i A[12], B[16], C[16]; + __m512i one; + + for (j = 0; j < 12; j++) A[j] = _mm512_loadu_si512((__m512i *)sc->state + j); + for (j = 0; j < 16; j++) { + B[j] = _mm512_loadu_si512((__m512i *)sc->state + j + 12); + C[j] = _mm512_loadu_si512((__m512i *)sc->state + j + 28); + } + one = _mm512_set1_epi32(C32(0xFFFFFFFF)); + + // round 1 +#define M(i) _mm512_load_si512((__m512i *)message + i) + + while (num-- > 0) { + for (j = 0; j < 16; j++) B[j] = _mm512_add_epi32(B[j], M(j)); + + A[0] = _mm512_xor_si512(A[0], _mm512_set1_epi32(sc->Wlow)); + A[1] = _mm512_xor_si512(A[1], _mm512_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm512_or_si512(_mm512_slli_epi32(B[j], 17), _mm512_srli_epi32(B[j], 15)); + +#define PP512(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \ + do { \ + __m512i tt; \ + tt = _mm512_or_si512(_mm512_slli_epi32(xa1, 15), _mm512_srli_epi32(xa1, 17)); \ + tt = _mm512_add_epi32(_mm512_slli_epi32(tt, 2), tt); \ + tt = _mm512_xor_si512(_mm512_xor_si512(xa0, tt), xc); \ + tt = _mm512_add_epi32(_mm512_slli_epi32(tt, 1), tt); \ + tt = _mm512_xor_si512(_mm512_xor_si512(tt, xb1), \ + _mm512_xor_si512(_mm512_andnot_si512(xb3, xb2), xm)); \ + xa0 = tt; \ + tt = xb0; \ + tt = _mm512_or_si512(_mm512_slli_epi32(tt, 1), _mm512_srli_epi32(tt, 31)); \ + xb0 = _mm512_xor_si512(tt, _mm512_xor_si512(xa0, one)); \ + } while (0) + + PP512(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP512(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP512(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP512(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP512(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP512(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP512(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP512(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP512(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP512(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP512(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP512(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP512(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP512(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP512(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP512(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP512(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP512(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP512(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP512(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP512(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP512(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP512(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP512(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP512(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP512(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP512(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP512(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP512(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP512(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP512(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP512(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + PP512(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M(0x0)); + PP512(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M(0x1)); + PP512(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M(0x2)); + PP512(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M(0x3)); + PP512(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M(0x4)); + PP512(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M(0x5)); + PP512(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M(0x6)); + PP512(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M(0x7)); + PP512(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M(0x8)); + PP512(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M(0x9)); + PP512(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M(0xA)); + PP512(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M(0xB)); + PP512(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M(0xC)); + PP512(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M(0xD)); + PP512(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M(0xE)); + PP512(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M(0xF)); + + A[0xB] = _mm512_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm512_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm512_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm512_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm512_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm512_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm512_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm512_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm512_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm512_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm512_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm512_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm512_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm512_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm512_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm512_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm512_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm512_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm512_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm512_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm512_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm512_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm512_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm512_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm512_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm512_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm512_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm512_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm512_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm512_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm512_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm512_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm512_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm512_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm512_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm512_add_epi32(A[0x0], C[0x3]); + +#define SWAP_AND_SUB512(xb, xc, xm) \ + do { \ + __m512i tmp; \ + tmp = xb; \ + xb = _mm512_sub_epi32(xc, xm); \ + xc = tmp; \ + } while (0) + + SWAP_AND_SUB512(B[0x0], C[0x0], M(0x0)); + SWAP_AND_SUB512(B[0x1], C[0x1], M(0x1)); + SWAP_AND_SUB512(B[0x2], C[0x2], M(0x2)); + SWAP_AND_SUB512(B[0x3], C[0x3], M(0x3)); + SWAP_AND_SUB512(B[0x4], C[0x4], M(0x4)); + SWAP_AND_SUB512(B[0x5], C[0x5], M(0x5)); + SWAP_AND_SUB512(B[0x6], C[0x6], M(0x6)); + SWAP_AND_SUB512(B[0x7], C[0x7], M(0x7)); + SWAP_AND_SUB512(B[0x8], C[0x8], M(0x8)); + SWAP_AND_SUB512(B[0x9], C[0x9], M(0x9)); + SWAP_AND_SUB512(B[0xA], C[0xA], M(0xA)); + SWAP_AND_SUB512(B[0xB], C[0xB], M(0xB)); + SWAP_AND_SUB512(B[0xC], C[0xC], M(0xC)); + SWAP_AND_SUB512(B[0xD], C[0xD], M(0xD)); + SWAP_AND_SUB512(B[0xE], C[0xE], M(0xE)); + SWAP_AND_SUB512(B[0xF], C[0xF], M(0xF)); + + // move data pointer + message = (__m512i *)message + 16; + + if (++sc->Wlow == 0) sc->Whigh++; + } + + // round 2-5 +#define M2(i) _mm512_load_si512((__m512i *)termination + i) + + for (int k = 0; k < 4; k++) { + for (j = 0; j < 16; j++) B[j] = _mm512_add_epi32(B[j], M2(j)); + + A[0] = _mm512_xor_si512(A[0], _mm512_set1_epi32(sc->Wlow)); + A[1] = _mm512_xor_si512(A[1], _mm512_set1_epi32(sc->Whigh)); + + for (j = 0; j < 16; j++) + B[j] = _mm512_or_si512(_mm512_slli_epi32(B[j], 17), _mm512_srli_epi32(B[j], 15)); + + PP512(A[0x0], A[0xB], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP512(A[0x1], A[0x0], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP512(A[0x2], A[0x1], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP512(A[0x3], A[0x2], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP512(A[0x4], A[0x3], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP512(A[0x5], A[0x4], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP512(A[0x6], A[0x5], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP512(A[0x7], A[0x6], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP512(A[0x8], A[0x7], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP512(A[0x9], A[0x8], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP512(A[0xA], A[0x9], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP512(A[0xB], A[0xA], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP512(A[0x0], A[0xB], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP512(A[0x1], A[0x0], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP512(A[0x2], A[0x1], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP512(A[0x3], A[0x2], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP512(A[0x4], A[0x3], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP512(A[0x5], A[0x4], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP512(A[0x6], A[0x5], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP512(A[0x7], A[0x6], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP512(A[0x8], A[0x7], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP512(A[0x9], A[0x8], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP512(A[0xA], A[0x9], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP512(A[0xB], A[0xA], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP512(A[0x0], A[0xB], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP512(A[0x1], A[0x0], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP512(A[0x2], A[0x1], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP512(A[0x3], A[0x2], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP512(A[0x4], A[0x3], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP512(A[0x5], A[0x4], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP512(A[0x6], A[0x5], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP512(A[0x7], A[0x6], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + PP512(A[0x8], A[0x7], B[0x0], B[0xD], B[0x9], B[0x6], C[0x8], M2(0x0)); + PP512(A[0x9], A[0x8], B[0x1], B[0xE], B[0xA], B[0x7], C[0x7], M2(0x1)); + PP512(A[0xA], A[0x9], B[0x2], B[0xF], B[0xB], B[0x8], C[0x6], M2(0x2)); + PP512(A[0xB], A[0xA], B[0x3], B[0x0], B[0xC], B[0x9], C[0x5], M2(0x3)); + PP512(A[0x0], A[0xB], B[0x4], B[0x1], B[0xD], B[0xA], C[0x4], M2(0x4)); + PP512(A[0x1], A[0x0], B[0x5], B[0x2], B[0xE], B[0xB], C[0x3], M2(0x5)); + PP512(A[0x2], A[0x1], B[0x6], B[0x3], B[0xF], B[0xC], C[0x2], M2(0x6)); + PP512(A[0x3], A[0x2], B[0x7], B[0x4], B[0x0], B[0xD], C[0x1], M2(0x7)); + PP512(A[0x4], A[0x3], B[0x8], B[0x5], B[0x1], B[0xE], C[0x0], M2(0x8)); + PP512(A[0x5], A[0x4], B[0x9], B[0x6], B[0x2], B[0xF], C[0xF], M2(0x9)); + PP512(A[0x6], A[0x5], B[0xA], B[0x7], B[0x3], B[0x0], C[0xE], M2(0xA)); + PP512(A[0x7], A[0x6], B[0xB], B[0x8], B[0x4], B[0x1], C[0xD], M2(0xB)); + PP512(A[0x8], A[0x7], B[0xC], B[0x9], B[0x5], B[0x2], C[0xC], M2(0xC)); + PP512(A[0x9], A[0x8], B[0xD], B[0xA], B[0x6], B[0x3], C[0xB], M2(0xD)); + PP512(A[0xA], A[0x9], B[0xE], B[0xB], B[0x7], B[0x4], C[0xA], M2(0xE)); + PP512(A[0xB], A[0xA], B[0xF], B[0xC], B[0x8], B[0x5], C[0x9], M2(0xF)); + + A[0xB] = _mm512_add_epi32(A[0xB], C[0x6]); + A[0xA] = _mm512_add_epi32(A[0xA], C[0x5]); + A[0x9] = _mm512_add_epi32(A[0x9], C[0x4]); + A[0x8] = _mm512_add_epi32(A[0x8], C[0x3]); + A[0x7] = _mm512_add_epi32(A[0x7], C[0x2]); + A[0x6] = _mm512_add_epi32(A[0x6], C[0x1]); + A[0x5] = _mm512_add_epi32(A[0x5], C[0x0]); + A[0x4] = _mm512_add_epi32(A[0x4], C[0xF]); + A[0x3] = _mm512_add_epi32(A[0x3], C[0xE]); + A[0x2] = _mm512_add_epi32(A[0x2], C[0xD]); + A[0x1] = _mm512_add_epi32(A[0x1], C[0xC]); + A[0x0] = _mm512_add_epi32(A[0x0], C[0xB]); + A[0xB] = _mm512_add_epi32(A[0xB], C[0xA]); + A[0xA] = _mm512_add_epi32(A[0xA], C[0x9]); + A[0x9] = _mm512_add_epi32(A[0x9], C[0x8]); + A[0x8] = _mm512_add_epi32(A[0x8], C[0x7]); + A[0x7] = _mm512_add_epi32(A[0x7], C[0x6]); + A[0x6] = _mm512_add_epi32(A[0x6], C[0x5]); + A[0x5] = _mm512_add_epi32(A[0x5], C[0x4]); + A[0x4] = _mm512_add_epi32(A[0x4], C[0x3]); + A[0x3] = _mm512_add_epi32(A[0x3], C[0x2]); + A[0x2] = _mm512_add_epi32(A[0x2], C[0x1]); + A[0x1] = _mm512_add_epi32(A[0x1], C[0x0]); + A[0x0] = _mm512_add_epi32(A[0x0], C[0xF]); + A[0xB] = _mm512_add_epi32(A[0xB], C[0xE]); + A[0xA] = _mm512_add_epi32(A[0xA], C[0xD]); + A[0x9] = _mm512_add_epi32(A[0x9], C[0xC]); + A[0x8] = _mm512_add_epi32(A[0x8], C[0xB]); + A[0x7] = _mm512_add_epi32(A[0x7], C[0xA]); + A[0x6] = _mm512_add_epi32(A[0x6], C[0x9]); + A[0x5] = _mm512_add_epi32(A[0x5], C[0x8]); + A[0x4] = _mm512_add_epi32(A[0x4], C[0x7]); + A[0x3] = _mm512_add_epi32(A[0x3], C[0x6]); + A[0x2] = _mm512_add_epi32(A[0x2], C[0x5]); + A[0x1] = _mm512_add_epi32(A[0x1], C[0x4]); + A[0x0] = _mm512_add_epi32(A[0x0], C[0x3]); + + SWAP_AND_SUB512(B[0x0], C[0x0], M2(0x0)); + SWAP_AND_SUB512(B[0x1], C[0x1], M2(0x1)); + SWAP_AND_SUB512(B[0x2], C[0x2], M2(0x2)); + SWAP_AND_SUB512(B[0x3], C[0x3], M2(0x3)); + SWAP_AND_SUB512(B[0x4], C[0x4], M2(0x4)); + SWAP_AND_SUB512(B[0x5], C[0x5], M2(0x5)); + SWAP_AND_SUB512(B[0x6], C[0x6], M2(0x6)); + SWAP_AND_SUB512(B[0x7], C[0x7], M2(0x7)); + SWAP_AND_SUB512(B[0x8], C[0x8], M2(0x8)); + SWAP_AND_SUB512(B[0x9], C[0x9], M2(0x9)); + SWAP_AND_SUB512(B[0xA], C[0xA], M2(0xA)); + SWAP_AND_SUB512(B[0xB], C[0xB], M2(0xB)); + SWAP_AND_SUB512(B[0xC], C[0xC], M2(0xC)); + SWAP_AND_SUB512(B[0xD], C[0xD], M2(0xD)); + SWAP_AND_SUB512(B[0xE], C[0xE], M2(0xE)); + SWAP_AND_SUB512(B[0xF], C[0xF], M2(0xF)); + + if (++sc->Wlow == 0) sc->Whigh++; + + if (sc->Wlow-- == 0) sc->Whigh--; + } + + // download SIMD aligned hashes + for (j = 0; j < 8; j++) { + _mm512_storeu_si512((__m512i *)dst + j, C[j+8]); + } + + // reset Wlow & Whigh + sc->Wlow = 1; + sc->Whigh = 0; +} + // Shabal routine optimized for mining void mshabal_deadline_fast_avx512f(mshabal512_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3, void *dst4, void *dst5, @@ -727,7 +1018,7 @@ void mshabal_deadline_fast_avx512f(mshabal512_context_fast *sc, void *message, v } one = _mm512_set1_epi32(C32(0xFFFFFFFF)); - // round 1/5 + // round 1 #define M(i) _mm512_load_si512((__m512i *)message + i) for (j = 0; j < 16; j++) B[j] = _mm512_add_epi32(B[j], M(j)); diff --git a/src/c/mshabal_512_avx512f.h b/src/c/mshabal_512_avx512f.h index 8dd6b62..57455f5 100644 --- a/src/c/mshabal_512_avx512f.h +++ b/src/c/mshabal_512_avx512f.h @@ -174,7 +174,13 @@ void mshabal_close_avx512f(mshabal512_context *sc, unsigned ub0, unsigned ub1, u void *dst12, void *dst13, void *dst14, void *dst15); /* - * optimised Shabal Routine for PoC Mining + * optimised Shabal routine for PoC plotting and hashing + */ +void mshabal_hash_fast_avx512f(mshabal512_context_fast *sc, void *message, void *termination, + void *dst, unsigned len); + +/* + * optimised Shabal routine for PoC mining */ void mshabal_deadline_fast_avx512f(mshabal512_context_fast *sc, void *message, void *termination, void *dst0, void *dst1, void *dst2, void *dst3, void *dst4, void *dst5, diff --git a/src/c/shabal_avx.c b/src/c/shabal_avx.c index 78926c5..70e29c8 100644 --- a/src/c/shabal_avx.c +++ b/src/c/shabal_avx.c @@ -5,8 +5,8 @@ #include "mshabal_128_avx.h" #include "sph_shabal.h" -mshabal_context global_128; -mshabal_context_fast global_128_fast; +mshabal128_context global_128; +mshabal128_context_fast global_128_fast; void init_shabal_avx() { mshabal_init_avx(&global_128, 256); @@ -23,7 +23,7 @@ void find_best_deadline_avx(char *scoops, uint64_t nonce_count, char *gensig, write_term(term); // local copy of global fast context - mshabal_context_fast x; + mshabal128_context_fast x; memcpy(&x, &global_128_fast, sizeof(global_128_fast)); // prepare shabal inputs diff --git a/src/c/shabal_neon.c b/src/c/shabal_neon.c index c9a2c99..313642b 100644 --- a/src/c/shabal_neon.c +++ b/src/c/shabal_neon.c @@ -1,13 +1,12 @@ #include "shabal_neon.h" - #include "SSE2NEON.h" #include #include "common.h" #include "mshabal_128_neon.h" #include "sph_shabal.h" -mshabal_context global_128; -mshabal_context_fast global_128_fast; +mshabal128_context global_128; +mshabal128_context_fast global_128_fast; void init_shabal_neon() { mshabal_init_neon(&global_128, 256); @@ -24,7 +23,7 @@ void find_best_deadline_neon(char *scoops, uint64_t nonce_count, char *gensig, write_term(term); // local copy of global fast context - mshabal_context_fast x; + mshabal128_context_fast x; memcpy(&x, &global_128_fast, sizeof(global_128_fast)); // prepare shabal inputs diff --git a/src/c/shabal_sse2.c b/src/c/shabal_sse2.c index 23e3478..4f5e593 100644 --- a/src/c/shabal_sse2.c +++ b/src/c/shabal_sse2.c @@ -5,8 +5,8 @@ #include "mshabal_128_sse2.h" #include "sph_shabal.h" -mshabal_context global_128; -mshabal_context_fast global_128_fast; +mshabal128_context global_128; +mshabal128_context_fast global_128_fast; void init_shabal_sse2() { mshabal_init_sse2(&global_128, 256); @@ -23,7 +23,7 @@ void find_best_deadline_sse2(char *scoops, uint64_t nonce_count, char *gensig, write_term(term); // local copy of global fast context - mshabal_context_fast x; + mshabal128_context_fast x; memcpy(&x, &global_128_fast, sizeof(global_128_fast)); // prepare shabal inputs diff --git a/src/c/sph_shabal.c b/src/c/sph_shabal.c index 1a11616..c1507b7 100644 --- a/src/c/sph_shabal.c +++ b/src/c/sph_shabal.c @@ -540,7 +540,91 @@ void sph_shabal256_addbits_and_close(void* cc, unsigned ub, unsigned n, void* ds shabal_close(cc, ub, n, dst, 8); } -/* see sph_shabal.h */ +// Shabal routines optimized for plotting and hashing +void sph_shabal_hash_fast(void *message, void *termination, void* dst, unsigned num) { + sph_u32 + A00 = A_init_256[0], A01 = A_init_256[1], A02 = A_init_256[2], A03 = A_init_256[3], + A04 = A_init_256[4], A05 = A_init_256[5], A06 = A_init_256[6], A07 = A_init_256[7], + A08 = A_init_256[8], A09 = A_init_256[9], A0A = A_init_256[10], A0B = A_init_256[11]; + sph_u32 + B0 = B_init_256[0], B1 = B_init_256[1], B2 = B_init_256[2], B3 = B_init_256[3], + B4 = B_init_256[4], B5 = B_init_256[5], B6 = B_init_256[6], B7 = B_init_256[7], + B8 = B_init_256[8], B9 = B_init_256[9], BA = B_init_256[10], BB = B_init_256[11], + BC = B_init_256[12], BD = B_init_256[13], BE = B_init_256[14], BF = B_init_256[15]; + sph_u32 + C0 = C_init_256[0], C1 = C_init_256[1], C2 = C_init_256[2], C3 = C_init_256[3], + C4 = C_init_256[4], C5 = C_init_256[5], C6 = C_init_256[6], C7 = C_init_256[7], + C8 = C_init_256[8], C9 = C_init_256[9], CA = C_init_256[10], CB = C_init_256[11], + CC = C_init_256[12], CD = C_init_256[13], CE = C_init_256[14], CF = C_init_256[15]; + sph_u32 M0, M1, M2, M3, M4, M5, M6, M7, M8, M9, MA, MB, MC, MD, ME, MF; + sph_u32 Wlow = 1, Whigh = 0; + + while (num-- > 0) { + M0 = ((unsigned int *)message)[0]; + M1 = ((unsigned int *)message)[1]; + M2 = ((unsigned int *)message)[2]; + M3 = ((unsigned int *)message)[3]; + M4 = ((unsigned int *)message)[4]; + M5 = ((unsigned int *)message)[5]; + M6 = ((unsigned int *)message)[6]; + M7 = ((unsigned int *)message)[7]; + M8 = ((unsigned int *)message)[8]; + M9 = ((unsigned int *)message)[9]; + MA = ((unsigned int *)message)[10]; + MB = ((unsigned int *)message)[11]; + MC = ((unsigned int *)message)[12]; + MD = ((unsigned int *)message)[13]; + ME = ((unsigned int *)message)[14]; + MF = ((unsigned int *)message)[15]; + + INPUT_BLOCK_ADD; + XOR_W; + APPLY_P; + INPUT_BLOCK_SUB; + SWAP_BC; + INCR_W; + + message = (unsigned int *)message + 16; + } + + M0 = ((unsigned int *)termination)[0]; + M1 = ((unsigned int *)termination)[1]; + M2 = ((unsigned int *)termination)[2]; + M3 = ((unsigned int *)termination)[3]; + M4 = ((unsigned int *)termination)[4]; + M5 = ((unsigned int *)termination)[5]; + M6 = ((unsigned int *)termination)[6]; + M7 = ((unsigned int *)termination)[7]; + M8 = ((unsigned int *)termination)[8]; + M9 = ((unsigned int *)termination)[9]; + MA = ((unsigned int *)termination)[10]; + MB = ((unsigned int *)termination)[11]; + MC = ((unsigned int *)termination)[12]; + MD = ((unsigned int *)termination)[13]; + ME = ((unsigned int *)termination)[14]; + MF = ((unsigned int *)termination)[15]; + + INPUT_BLOCK_ADD; + XOR_W; + APPLY_P; + + for (int i = 0; i < 3; i++) { + SWAP_BC; + XOR_W; + APPLY_P; + } + + sph_enc32le_aligned((sph_u32 *)dst, B8); + sph_enc32le_aligned((sph_u32 *)dst + 1, B9); + sph_enc32le_aligned((sph_u32 *)dst + 2, BA); + sph_enc32le_aligned((sph_u32 *)dst + 3, BB); + sph_enc32le_aligned((sph_u32 *)dst + 4, BC); + sph_enc32le_aligned((sph_u32 *)dst + 5, BD); + sph_enc32le_aligned((sph_u32 *)dst + 6, BE); + sph_enc32le_aligned((sph_u32 *)dst + 7, BF); +} + +// Shabal routines optimized for mining void sph_shabal_deadline_fast(void *scoop_data, void *gen_sig, void *dst) { sph_u32 A00 = A_init_256[0], A01 = A_init_256[1], A02 = A_init_256[2], A03 = A_init_256[3], @@ -604,6 +688,6 @@ void sph_shabal_deadline_fast(void *scoop_data, void *gen_sig, void *dst) { APPLY_P; } - sph_enc32le_aligned((sph_u32*)dst, B8); - sph_enc32le_aligned((sph_u32*)dst + 1, B9); + sph_enc32le_aligned((sph_u32 *)dst, B8); + sph_enc32le_aligned((sph_u32 *)dst + 1, B9); } \ No newline at end of file diff --git a/src/c/sph_shabal.h b/src/c/sph_shabal.h index 79f44f8..ca6c772 100644 --- a/src/c/sph_shabal.h +++ b/src/c/sph_shabal.h @@ -118,7 +118,12 @@ void sph_shabal256_close(void* cc, void* dst); void sph_shabal256_addbits_and_close(void* cc, unsigned ub, unsigned n, void* dst); /* - * optimised Shabal Routine for PoC Mining + * optimised Shabal routine for PoC plotting and hashing + */ +void sph_shabal_hash_fast(void *message, void *termination, void* dst, unsigned num); + +/* + * optimised Shabal routine for PoC mining */ void sph_shabal_deadline_fast(void *scoop_data, void *gen_sig, void *dst);