Skip to content

Commit

Permalink
Merge pull request monero-project#9 from quazarcoin/BMR_master
Browse files Browse the repository at this point in the history
Optimize cn_slow_hash
  • Loading branch information
bitmonero-project committed May 26, 2014
2 parents 3f2ab0a + 1ac768a commit 0ab0900
Show file tree
Hide file tree
Showing 2 changed files with 228 additions and 107 deletions.
10 changes: 7 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ else()
else()
set(STATIC_ASSERT_FLAG "-Dstatic_assert=_Static_assert")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -D_GNU_SOURCE ${MINGW_FLAG} ${STATIC_ASSERT_FLAG} ${WARNINGS} ${C_WARNINGS} ${ARCH_FLAG}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -D_GNU_SOURCE ${MINGW_FLAG} ${WARNINGS} ${CXX_WARNINGS} ${ARCH_FLAG}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -D_GNU_SOURCE ${MINGW_FLAG} ${STATIC_ASSERT_FLAG} ${WARNINGS} ${C_WARNINGS} ${ARCH_FLAG} -maes")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -D_GNU_SOURCE ${MINGW_FLAG} ${WARNINGS} ${CXX_WARNINGS} ${ARCH_FLAG} -maes")
if(APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGTEST_HAS_TR1_TUPLE=0")
endif()
Expand All @@ -68,7 +68,11 @@ else()
else()
set(DEBUG_FLAGS "-g3 -O0")
endif()
set(RELEASE_FLAGS "-Ofast -DNDEBUG -Wno-unused-variable -flto")
set(RELEASE_FLAGS "-Ofast -DNDEBUG -Wno-unused-variable")
if(NOT APPLE)
# There is a clang bug that does not allow to compile code that uses AES-NI intrinsics if -flto is enabled
set(RELEASE_FLAGS "${RELEASE_FLAGS} -flto")
endif()
#if(CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT MINGW)
# set(RELEASE_FLAGS "${RELEASE_FLAGS} -fno-fat-lto-objects")
#endif()
Expand Down
325 changes: 221 additions & 104 deletions src/crypto/slow-hash.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,131 +12,248 @@
#include "hash-ops.h"
#include "oaes_lib.h"

static void (*const extra_hashes[4])(const void *, size_t, char *) = {
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
};
#include <emmintrin.h>

#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
#include <intrin.h>
#define STATIC
#define INLINE __inline
#if !defined(RDATA_ALIGN16)
#define RDATA_ALIGN16 __declspec(align(16))
#endif
#else
#include <wmmintrin.h>
#define STATIC static
#define INLINE inline
#if !defined(RDATA_ALIGN16)
#define RDATA_ALIGN16 __attribute__ ((aligned(16)))
#endif
#endif

#define MEMORY (1 << 21) /* 2 MiB */
#define MEMORY (1 << 21) // 2MB scratchpad
#define ITER (1 << 20)
#define AES_BLOCK_SIZE 16
#define AES_KEY_SIZE 32 /*16*/
#define AES_KEY_SIZE 32
#define INIT_SIZE_BLK 8
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)

static size_t e2i(const uint8_t* a) { return (*((uint64_t*)a) / AES_BLOCK_SIZE) & (MEMORY / AES_BLOCK_SIZE - 1); }
#define U64(x) ((uint64_t *) (x))
#define R128(x) ((__m128i *) (x))

static void mul(const uint8_t* a, const uint8_t* b, uint8_t* res) {
uint64_t a0, b0;
uint64_t hi, lo;
#pragma pack(push, 1)
union cn_slow_hash_state
{
union hash_state hs;
struct
{
uint8_t k[64];
uint8_t init[INIT_SIZE_BYTE];
};
};
#pragma pack(pop)

a0 = SWAP64LE(((uint64_t*)a)[0]);
b0 = SWAP64LE(((uint64_t*)b)[0]);
lo = mul128(a0, b0, &hi);
((uint64_t*)res)[0] = SWAP64LE(hi);
((uint64_t*)res)[1] = SWAP64LE(lo);
#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define cpuid(info,x) __cpuidex(info,x,0)
#else
void cpuid(int CPUInfo[4], int InfoType)
{
__asm__ __volatile__
(
"cpuid":
"=a" (CPUInfo[0]),
"=b" (CPUInfo[1]),
"=c" (CPUInfo[2]),
"=d" (CPUInfo[3]) :
"a" (InfoType), "c" (0)
);
}
#endif

static void sum_half_blocks(uint8_t* a, const uint8_t* b) {
uint64_t a0, a1, b0, b1;

a0 = SWAP64LE(((uint64_t*)a)[0]);
a1 = SWAP64LE(((uint64_t*)a)[1]);
b0 = SWAP64LE(((uint64_t*)b)[0]);
b1 = SWAP64LE(((uint64_t*)b)[1]);
a0 += b0;
a1 += b1;
((uint64_t*)a)[0] = SWAP64LE(a0);
((uint64_t*)a)[1] = SWAP64LE(a1);
STATIC INLINE void mul(const uint8_t *a, const uint8_t *b, uint8_t *res)
{
uint64_t a0, b0;
uint64_t hi, lo;

a0 = U64(a)[0];
b0 = U64(b)[0];
lo = mul128(a0, b0, &hi);
U64(res)[0] = hi;
U64(res)[1] = lo;
}

static void copy_block(uint8_t* dst, const uint8_t* src) {
((uint64_t*)dst)[0] = ((uint64_t*)src)[0];
((uint64_t*)dst)[1] = ((uint64_t*)src)[1];
STATIC INLINE void sum_half_blocks(uint8_t *a, const uint8_t *b)
{
uint64_t a0, a1, b0, b1;
a0 = U64(a)[0];
a1 = U64(a)[1];
b0 = U64(b)[0];
b1 = U64(b)[1];
a0 += b0;
a1 += b1;
U64(a)[0] = a0;
U64(a)[1] = a1;
}

static void xor_blocks(uint8_t* a, const uint8_t* b) {
((uint64_t*)a)[0] ^= ((uint64_t*)b)[0];
((uint64_t*)a)[1] ^= ((uint64_t*)b)[1];
STATIC INLINE void swap_blocks(uint8_t *a, uint8_t *b)
{
uint64_t t[2];
U64(t)[0] = U64(a)[0];
U64(t)[1] = U64(a)[1];
U64(a)[0] = U64(b)[0];
U64(a)[1] = U64(b)[1];
U64(b)[0] = U64(t)[0];
U64(b)[1] = U64(t)[1];
}

static void xor_blocks_dst(const uint8_t* a, const uint8_t* b, uint8_t* dst) {
((uint64_t*)dst)[0] = ((uint64_t*)a)[0] ^ ((uint64_t*)b)[0];
((uint64_t*)dst)[1] = ((uint64_t*)a)[1] ^ ((uint64_t*)b)[1];
STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b)
{
U64(a)[0] ^= U64(b)[0];
U64(a)[1] ^= U64(b)[1];
}

static void mul_sum_xor_dst(const uint8_t* a, uint8_t* c, uint8_t* dst) {
uint8_t product[AES_BLOCK_SIZE];
mul(a, dst, product);
sum_half_blocks(product, c);
STATIC INLINE int check_aes_hw(void)
{
int cpuid_results[4];
static int supported = -1;

if(supported >= 0)
return supported;

xor_blocks_dst(dst, product, c);
copy_block(dst, product);
cpuid(cpuid_results,1);
return supported = cpuid_results[2] & (1 << 25);
}

#pragma pack(push, 1)
union cn_slow_hash_state {
union hash_state hs;
struct {
uint8_t k[64];
uint8_t init[INIT_SIZE_BYTE];
};
};
#pragma pack(pop)
STATIC INLINE void aesni_pseudo_round(const uint8_t *in, uint8_t *out,
const uint8_t *expandedKey)
{
__m128i *k = R128(expandedKey);
__m128i d;

d = _mm_loadu_si128(R128(in));
d = _mm_aesenc_si128(d, *R128(&k[0]));
d = _mm_aesenc_si128(d, *R128(&k[1]));
d = _mm_aesenc_si128(d, *R128(&k[2]));
d = _mm_aesenc_si128(d, *R128(&k[3]));
d = _mm_aesenc_si128(d, *R128(&k[4]));
d = _mm_aesenc_si128(d, *R128(&k[5]));
d = _mm_aesenc_si128(d, *R128(&k[6]));
d = _mm_aesenc_si128(d, *R128(&k[7]));
d = _mm_aesenc_si128(d, *R128(&k[8]));
d = _mm_aesenc_si128(d, *R128(&k[9]));
_mm_storeu_si128((R128(out)), d);
}

void cn_slow_hash(const void *data, size_t length, char *hash)
{
uint8_t long_state[MEMORY];
uint8_t text[INIT_SIZE_BYTE];
uint8_t a[AES_BLOCK_SIZE];
uint8_t b[AES_BLOCK_SIZE];
uint8_t d[AES_BLOCK_SIZE];
uint8_t aes_key[AES_KEY_SIZE];
RDATA_ALIGN16 uint8_t expandedKey[256];

union cn_slow_hash_state state;

size_t i, j;
uint8_t *p = NULL;
oaes_ctx *aes_ctx;

int useAes = check_aes_hw();
static void (*const extra_hashes[4])(const void *, size_t, char *) =
{
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
};

hash_process(&state.hs, data, length);
memcpy(text, state.init, INIT_SIZE_BYTE);

void cn_slow_hash(const void *data, size_t length, char *hash) {
uint8_t long_state[MEMORY];
union cn_slow_hash_state state;
uint8_t text[INIT_SIZE_BYTE];
uint8_t a[AES_BLOCK_SIZE];
uint8_t b[AES_BLOCK_SIZE];
uint8_t c[AES_BLOCK_SIZE];
size_t i, j;
uint8_t aes_key[AES_KEY_SIZE];
oaes_ctx* aes_ctx;

hash_process(&state.hs, data, length);
memcpy(text, state.init, INIT_SIZE_BYTE);
memcpy(aes_key, state.hs.b, AES_KEY_SIZE);
aes_ctx = (oaes_ctx*)oaes_alloc();

oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE);
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
for (j = 0; j < INIT_SIZE_BLK; j++)
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);

memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}

for (i = 0; i < 16; i++) {
a[i] = state.k[ i] ^ state.k[32 + i];
b[i] = state.k[16 + i] ^ state.k[48 + i];
}

for (i = 0; i < ITER / 2; i++) {
/* Dependency chain: address -> read value ------+
* written value <-+ hard function (AES or MUL) <+
* next address <-+
*/
/* Iteration 1 */
j = e2i(a);
aesb_single_round(&long_state[j * AES_BLOCK_SIZE], c, a);
xor_blocks_dst(c, b, &long_state[j * AES_BLOCK_SIZE]);
assert(j == e2i(a));
/* Iteration 2 */
mul_sum_xor_dst(c, a, &long_state[e2i(c) * AES_BLOCK_SIZE]);
copy_block(b, c);
}

memcpy(text, state.init, INIT_SIZE_BYTE);
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
for (j = 0; j < INIT_SIZE_BLK; j++) {
xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
aesb_pseudo_round(&text[j * AES_BLOCK_SIZE], &text[j * AES_BLOCK_SIZE], aes_ctx->key->exp_data);
aes_ctx = (oaes_ctx *) oaes_alloc();
oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE);

// use aligned data
memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len);

if(useAes)
{
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
aesni_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);
memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}
else
{
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);

memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}

U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0];
U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1];
U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0];
U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1];

for(i = 0; i < ITER / 2; i++)
{
#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE)
#define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4)

// Iteration 1
p = &long_state[state_index(a)];

if(useAes)
_mm_storeu_si128(R128(p), _mm_aesenc_si128(_mm_loadu_si128(R128(p)), _mm_loadu_si128(R128(a))));
else
aesb_single_round(p, p, a);

xor_blocks(b, p);
swap_blocks(b, p);
swap_blocks(a, b);

// Iteration 2
p = &long_state[state_index(a)];

mul(a, p, d);
sum_half_blocks(b, d);
swap_blocks(b, p);
xor_blocks(b, p);
swap_blocks(a, b);
}
}
memcpy(state.init, text, INIT_SIZE_BYTE);
hash_permutation(&state.hs);
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
oaes_free((OAES_CTX **)&aes_ctx);

memcpy(text, state.init, INIT_SIZE_BYTE);
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len);
if(useAes)
{
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
{
xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
aesni_pseudo_round(&text[j * AES_BLOCK_SIZE], &text[j * AES_BLOCK_SIZE], expandedKey);
}
}
}
else
{
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
{
xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey);
}
}
}

oaes_free((OAES_CTX **) &aes_ctx);
memcpy(state.init, text, INIT_SIZE_BYTE);
hash_permutation(&state.hs);
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
}

0 comments on commit 0ab0900

Please sign in to comment.