diff --git a/src/cryptonote_basic/cryptonote_format_utils.cpp b/src/cryptonote_basic/cryptonote_format_utils.cpp index 745dfb72e7..0ad29aed24 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.cpp +++ b/src/cryptonote_basic/cryptonote_format_utils.cpp @@ -489,13 +489,6 @@ namespace cryptonote return pk == out_key.key; } //--------------------------------------------------------------- - bool is_out_to_acc_precomp(const crypto::public_key& spend_public_key, const txout_to_key& out_key, const crypto::key_derivation& derivation, size_t output_index) - { - crypto::public_key pk; - derive_public_key(derivation, output_index, spend_public_key, pk); - return pk == out_key.key; - } - //--------------------------------------------------------------- bool lookup_acc_outs(const account_keys& acc, const transaction& tx, std::vector& outs, uint64_t& money_transfered) { crypto::public_key tx_pub_key = get_tx_pub_key_from_extra(tx); diff --git a/src/cryptonote_basic/cryptonote_format_utils.h b/src/cryptonote_basic/cryptonote_format_utils.h index d8ccf8eecd..3c9f5e5ff0 100644 --- a/src/cryptonote_basic/cryptonote_format_utils.h +++ b/src/cryptonote_basic/cryptonote_format_utils.h @@ -70,7 +70,6 @@ namespace cryptonote bool get_payment_id_from_tx_extra_nonce(const blobdata& extra_nonce, crypto::hash& payment_id); bool get_encrypted_payment_id_from_tx_extra_nonce(const blobdata& extra_nonce, crypto::hash8& payment_id); bool is_out_to_acc(const account_keys& acc, const txout_to_key& out_key, const crypto::public_key& tx_pub_key, size_t output_index); - bool is_out_to_acc_precomp(const crypto::public_key& spend_public_key, const txout_to_key& out_key, const crypto::key_derivation& derivation, size_t output_index); bool lookup_acc_outs(const account_keys& acc, const transaction& tx, const crypto::public_key& tx_pub_key, std::vector& outs, uint64_t& money_transfered); bool lookup_acc_outs(const account_keys& acc, const transaction& tx, std::vector& outs, uint64_t& money_transfered); bool get_tx_fee(const transaction& tx, uint64_t & fee); diff --git a/src/wallet/CMakeLists.txt b/src/wallet/CMakeLists.txt index 6390800515..7a9e65ece4 100644 --- a/src/wallet/CMakeLists.txt +++ b/src/wallet/CMakeLists.txt @@ -28,6 +28,8 @@ # include (${PROJECT_SOURCE_DIR}/cmake/libutils.cmake) +add_subdirectory(crypto) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(wallet_sources @@ -83,6 +85,7 @@ target_link_libraries(wallet ${Boost_THREAD_LIBRARY} ${Boost_REGEX_LIBRARY} PRIVATE + wallet-crypto ${EXTRA_LIBRARIES}) add_dependencies(wallet version) @@ -143,3 +146,4 @@ if (BUILD_GUI_DEPS) install(FILES ${wallet_api_headers} DESTINATION include/wallet) endif() + diff --git a/src/wallet/api/wallet_manager.cpp b/src/wallet/api/wallet_manager.cpp index a23533530d..ca68374bc6 100644 --- a/src/wallet/api/wallet_manager.cpp +++ b/src/wallet/api/wallet_manager.cpp @@ -37,6 +37,7 @@ #include "common/updates.h" #include "version.h" #include "net/http_client.h" +#include "wallet/crypto/import.h" #include #include @@ -256,7 +257,7 @@ bool WalletManagerImpl::checkPayment(const std::string &address_text, const std: } crypto::key_derivation derivation; - if (!crypto::generate_key_derivation(address.m_view_public_key, tx_key, derivation)) + if (!tools::wallet_only::generate_key_derivation(address.m_view_public_key, tx_key, derivation)) { error = tr("failed to generate key derivation from supplied parameters"); return false; @@ -270,7 +271,7 @@ bool WalletManagerImpl::checkPayment(const std::string &address_text, const std: continue; const cryptonote::txout_to_key tx_out_to_key = boost::get(tx.vout[n].target); crypto::public_key pubkey; - derive_public_key(derivation, n, address.m_spend_public_key, pubkey); + tools::wallet_only::derive_public_key(derivation, n, address.m_spend_public_key, pubkey); if (pubkey == tx_out_to_key.key) { uint64_t amount; @@ -285,7 +286,7 @@ bool WalletManagerImpl::checkPayment(const std::string &address_text, const std: rct::key Ctmp; //rct::key amount_key = rct::hash_to_scalar(rct::scalarmultKey(rct::pk2rct(address.m_view_public_key), rct::sk2rct(tx_key))); crypto::key_derivation derivation; - bool r = crypto::generate_key_derivation(address.m_view_public_key, tx_key, derivation); + bool r = tools::wallet_only::generate_key_derivation(address.m_view_public_key, tx_key, derivation); if (!r) { LOG_ERROR("Failed to generate key derivation to decode rct output " << n); diff --git a/src/wallet/crypto/CMakeLists.txt b/src/wallet/crypto/CMakeLists.txt new file mode 100644 index 0000000000..49dac5b6d6 --- /dev/null +++ b/src/wallet/crypto/CMakeLists.txt @@ -0,0 +1,30 @@ + +set(WALLET_CRYPTO_CHOICES "none" "auto" "amd64-51-30k" "amd64-64-24k") +set(WALLET_CRYPTO "auto" CACHE STRING "Select a wallet import performance mode") + +if (${WALLET_CRYPTO} STREQUAL "auto") + if (UNIX AND CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|AMD64.*|x86_64.*") + message("Wallet import performance - using \"amd64-51-30k\"") + set(WALLET_CRYPTO "amd64-51-30k") + else () + message("Wallet import performance disabled - no improvements for platform") + set(WALLET_CRYPTO "none") + endif () +endif () + +list(FIND WALLET_CRYPTO_CHOICES ${WALLET_CRYPTO} CHOICE_FOUND) +if (CHOICE_FOUND LESS 0) + message(FATAL_ERROR "Invalid WALLET_CRYPTO option ${WALLET_CRYPTO}") +endif () + +string(REPLACE "-" "_" WALLET_CRYPTO_NAMESPACE "${WALLET_CRYPTO}") +if (NOT ${WALLET_CRYPTO} STREQUAL "none") + include("${CMAKE_CURRENT_SOURCE_DIR}/${WALLET_CRYPTO}.cmake") +endif () + +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/import.h.in" "${CMAKE_CURRENT_SOURCE_DIR}/import.h") +add_library(wallet-crypto INTERFACE) +if (WALLET_CRYPTO_LIBS) + target_link_libraries(wallet-crypto INTERFACE ${WALLET_CRYPTO_LIBS}) +endif () + diff --git a/src/wallet/crypto/README.md b/src/wallet/crypto/README.md new file mode 100644 index 0000000000..2e20d96540 --- /dev/null +++ b/src/wallet/crypto/README.md @@ -0,0 +1,52 @@ +# Monero Wallet Performance Crypto +## Usage + +Monero wallet import performance is toggled with `-DWALLET_CRYPTO=`. The +following options are valid strings for the option: + - *`none`* uses the standard default monero (ref10) implementation + - *`auto`* uses `amd64-51-30k` when targeting amd64, and `none` otherwise. + This is the default. + - *`amd64-51-30k`* uses amd64-51-30k code from supercop + - *`amd64-64-24k`* uses amd64-64-24k code from supercop + +## How it Works + +CMake generates `src/wallet/crypto/import.h` based on user configuration. The +header file always _aliases_ `generate_key_derivation` and `derive_public_key` +in the `tools::wallet_only` namespace. So when the performance code is disabled, +the default functions are called (via aliasing) for identical performance and +behavior. If performance is enabled, it aliases the enabled function. This +_could_ allow multiple performance implementations to be simultaneously +compiled into the program (in fact it was designed this way). The original +crypto functions are *never* modified. + +## Implementations +### `amd64-51-30k` + +The code from supercop (`src/wallet/crypto/ed25519/amd64-51-30k`) is untouched +with the exception that it is made position independent. Additionally, the +following additions were "necessary": + - *`unpack_vartime`* - the supercop version automatically negates the x-pos. + - *`monero_scalarmult`* - the supercop version does not do strict ECDH - only + base multiplication and double (i.e. a*B + c*G). + - *`crypto_sign_ed25519_amd64_51_30k_batch_choose_tp`* - the default version + did not use `z` for space savings. This meant multiple inversions in the + pre-compuation stage OR a non-constant adaption of the double scalar code. + The first option was implemented - a modified ASM that "selects" the `z` + from the pre-computation stage so that only a single inversion is done at + the end and constant time behavior is maintained. + +### `amd64-64-24k` + +The code from supercop (`src/wallet/crypto/ed25519/amd64-64-24k`) is completely +with the exception that is it made position independent. See +(amd64-51-30 section)[#amd64-51-30k] for other changes made. + +## Future Directions +### Scalar Multiplication + +It _might_ be faster to convert to the montgomery curve to use supercop +scalarmult code for the ECDH step, but the performance could be tight since it +has to convert back to edwards curve after completion. Lots of investigation is +needed, because the conversion back is going to need y-point recovery (all of +the code uses `X/Z` only for computing the ECDH). diff --git a/src/wallet/crypto/amd64-51-30k-choose_tp.s b/src/wallet/crypto/amd64-51-30k-choose_tp.s new file mode 100644 index 0000000000..08ac3c726a --- /dev/null +++ b/src/wallet/crypto/amd64-51-30k-choose_tp.s @@ -0,0 +1,2189 @@ + +# qhasm: int64 tp + +# qhasm: int64 pos + +# qhasm: int64 b + +# qhasm: int64 basep + +# qhasm: input tp + +# qhasm: input pos + +# qhasm: input b + +# qhasm: input basep + +# qhasm: int64 mask + +# qhasm: int64 u + +# qhasm: int64 tysubx0 + +# qhasm: int64 tysubx1 + +# qhasm: int64 tysubx2 + +# qhasm: int64 tysubx3 + +# qhasm: int64 tysubx4 + +# qhasm: int64 txaddy0 + +# qhasm: int64 txaddy1 + +# qhasm: int64 txaddy2 + +# qhasm: int64 txaddy3 + +# qhasm: int64 txaddy4 + +# qhasm: int64 tt2d0 + +# qhasm: int64 tt2d1 + +# qhasm: int64 tt2d2 + +# qhasm: int64 tt2d3 + +# qhasm: int64 tt2d4 + +# qhasm: int64 tt0 + +# qhasm: int64 tt1 + +# qhasm: int64 tt2 + +# qhasm: int64 tt3 + +# qhasm: int64 tt4 + +# qhasm: int64 t + +# qhasm: stack64 tp_stack + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_choose_t +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_choose_tp +.globl crypto_sign_ed25519_amd64_51_30k_batch_choose_tp +_crypto_sign_ed25519_amd64_51_30k_batch_choose_tp: +crypto_sign_ed25519_amd64_51_30k_batch_choose_tp: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: tp_stack = tp +# asm 1: movq tp_stack=stack64#8 +# asm 2: movq tp_stack=56(%rsp) +movq %rdi,56(%rsp) + +# qhasm: pos *= 960 +# asm 1: imulq $960,pos=int64#1 +# asm 2: imulq $960,pos=%rdi +imulq $960,%rsi,%rdi + +# qhasm: mask = b +# asm 1: mov mask=int64#2 +# asm 2: mov mask=%rsi +mov %rdx,%rsi + +# qhasm: (int64) mask >>= 7 +# asm 1: sar $7,u=int64#5 +# asm 2: mov u=%r8 +mov %rdx,%r8 + +# qhasm: u += mask +# asm 1: add tysubx0=int64#2 +# asm 2: mov $1,>tysubx0=%rsi +mov $1,%rsi + +# qhasm: tysubx1 = 0 +# asm 1: mov $0,>tysubx1=int64#6 +# asm 2: mov $0,>tysubx1=%r9 +mov $0,%r9 + +# qhasm: tysubx2 = 0 +# asm 1: mov $0,>tysubx2=int64#7 +# asm 2: mov $0,>tysubx2=%rax +mov $0,%rax + +# qhasm: tysubx3 = 0 +# asm 1: mov $0,>tysubx3=int64#8 +# asm 2: mov $0,>tysubx3=%r10 +mov $0,%r10 + +# qhasm: tysubx4 = 0 +# asm 1: mov $0,>tysubx4=int64#9 +# asm 2: mov $0,>tysubx4=%r11 +mov $0,%r11 + +# qhasm: txaddy0 = 1 +# asm 1: mov $1,>txaddy0=int64#10 +# asm 2: mov $1,>txaddy0=%r12 +mov $1,%r12 + +# qhasm: txaddy1 = 0 +# asm 1: mov $0,>txaddy1=int64#11 +# asm 2: mov $0,>txaddy1=%r13 +mov $0,%r13 + +# qhasm: txaddy2 = 0 +# asm 1: mov $0,>txaddy2=int64#12 +# asm 2: mov $0,>txaddy2=%r14 +mov $0,%r14 + +# qhasm: txaddy3 = 0 +# asm 1: mov $0,>txaddy3=int64#13 +# asm 2: mov $0,>txaddy3=%r15 +mov $0,%r15 + +# qhasm: txaddy4 = 0 +# asm 1: mov $0,>txaddy4=int64#14 +# asm 2: mov $0,>txaddy4=%rbx +mov $0,%rbx + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#15 +# asm 2: movq 0(t=%rbp +movq 0(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 8(t=%rbp +movq 8(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 16(t=%rbp +movq 16(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 24(t=%rbp +movq 24(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 32(t=%rbp +movq 32(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 40(t=%rbp +movq 40(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 48(t=%rbp +movq 48(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 56(t=%rbp +movq 56(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 64(t=%rbp +movq 64(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 72(t=%rbp +movq 72(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 160(t=%rbp +movq 160(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 168(t=%rbp +movq 168(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 176(t=%rbp +movq 176(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 184(t=%rbp +movq 184(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 192(t=%rbp +movq 192(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 200(t=%rbp +movq 200(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 208(t=%rbp +movq 208(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 216(t=%rbp +movq 216(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 184(t=%rbp +movq 224(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 232(t=%rbp +movq 232(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 320(t=%rbp +movq 320(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 328(t=%rbp +movq 328(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 336(t=%rbp +movq 336(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 344(t=%rbp +movq 344(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 272(t=%rbp +movq 352(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 360(t=%rbp +movq 360(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 368(t=%rbp +movq 368(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 376(t=%rbp +movq 376(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 384(t=%rbp +movq 384(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 392(t=%rbp +movq 392(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 480(t=%rbp +movq 480(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 488(t=%rbp +movq 488(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 496(t=%rbp +movq 496(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 384(t=%rbp +movq 504(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 512(t=%rbp +movq 512(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 400(t=%rbp +movq 520(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 528(t=%rbp +movq 528(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 536(t=%rbp +movq 536(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 424(t=%rbp +movq 544(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 552(t=%rbp +movq 552(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 640(t=%rbp +movq 640(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 648(t=%rbp +movq 648(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 656(t=%rbp +movq 656(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 664(t=%rbp +movq 664(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 512(t=%rbp +movq 672(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 680(t=%rbp +movq 680(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 688(t=%rbp +movq 688(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 696(t=%rbp +movq 696(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 704(t=%rbp +movq 704(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 712(t=%rbp +movq 712(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 800(t=%rbp +movq 800(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 808(t=%rbp +movq 808(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 816(t=%rbp +movq 816(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 824(t=%rbp +movq 824(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 832(t=%rbp +movq 832(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 640(t=%rbp +movq 840(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 848(t=%rbp +movq 848(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 856(t=%rbp +movq 856(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 864(t=%rbp +movq 864(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 872(t=%rbp +movq 872(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 960(t=%rbp +movq 960(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 968(t=%rbp +movq 968(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 976(t=%rbp +movq 976(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 984(t=%rbp +movq 984(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 992(t=%rbp +movq 992(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1000(t=%rbp +movq 1000(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1008(t=%rbp +movq 1008(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1016(t=%rbp +movq 1016(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1024(t=%rbp +movq 1024(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1032(t=%rbp +movq 1032(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1120(t=%rbp +movq 1120(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1128(t=%rbp +movq 1128(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1136(t=%rbp +movq 1136(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1144(t=%rbp +movq 1144(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1152(t=%rbp +movq 1152(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1160(t=%rbp +movq 1160(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1168(t=%rbp +movq 1168(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1176(t=%rbp +movq 1176(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1184(t=%rbp +movq 1184(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 1192(t=%rbp +movq 1192(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: mov t=%rbp +mov %rsi,%rbp + +# qhasm: tysubx0 = txaddy0 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %r9,%rbp + +# qhasm: tysubx1 = txaddy1 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %rax,%rbp + +# qhasm: tysubx2 = txaddy2 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %r10,%rbp + +# qhasm: tysubx3 = txaddy3 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %r11,%rbp + +# qhasm: tysubx4 = txaddy4 if signed< +# asm 1: cmovl tp=int64#15 +# asm 2: movq tp=%rbp +movq 56(%rsp),%rbp + +# qhasm: *(uint64 *)(tp + 0) = tysubx0 +# asm 1: movq tt2d0=int64#2 +# asm 2: mov $0,>tt2d0=%rsi +mov $0,%rsi + +# qhasm: tt2d1 = 0 +# asm 1: mov $0,>tt2d1=int64#6 +# asm 2: mov $0,>tt2d1=%r9 +mov $0,%r9 + +# qhasm: tt2d2 = 0 +# asm 1: mov $0,>tt2d2=int64#7 +# asm 2: mov $0,>tt2d2=%rax +mov $0,%rax + +# qhasm: tt2d3 = 0 +# asm 1: mov $0,>tt2d3=int64#8 +# asm 2: mov $0,>tt2d3=%r10 +mov $0,%r10 + +# qhasm: tt2d4 = 0 +# asm 1: mov $0,>tt2d4=int64#9 +# asm 2: mov $0,>tt2d4=%r11 +mov $0,%r11 + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#10 +# asm 2: movq 80(t=%r12 +movq 80(%rcx,%rdi),%rbp + +# qhasm: tz0 = t if = +cmove %rbp,%r12 + +# qhasm: t = *(uint64 *)(basep + 88 + pos) +# asm 1: movq 80(t=int64#10 +# asm 2: movq 80(t=%r12 +movq 88(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r13 + +# qhasm: t = *(uint64 *)(basep + 96 + pos) +# asm 1: movq 96(t=int64#10 +# asm 2: movq 96(t=%r12 +movq 96(%rcx,%rdi),%rbp + +# qhasm: tz2 = t if = +cmove %rbp,%r14 + +# qhasm: t = *(uint64 *)(basep + 104 + pos) +# asm 1: movq 104(t=int64#10 +# asm 2: movq 104(t=%r12 +movq 104(%rcx,%rdi),%rbp + +# qhasm: tz3 = t if = +cmove %rbp,%r15 + +# qhasm: t = *(uint64 *)(basep + 112 + pos) +# asm 1: movq 112(t=int64#10 +# asm 2: movq 112(t=%r12 +movq 112(%rcx,%rdi),%rbp + +# qhasm: tz4 = t if = +cmove %rbp,%rbx + +# qhasm: t = *(uint64 *)(basep + 120 + pos) +# asm 1: movq 120(t=int64#10 +# asm 2: movq 120(t=%r12 +movq 120(%rcx,%rdi),%rbp + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 128(t=%r12 +movq 128(%rcx,%rdi),%rbp + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 136(t=%r12 +movq 136(%rcx,%rdi),%rbp + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 144(t=%r12 +movq 144(%rcx,%rdi),%rbp + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 152(t=%r12 +movq 152(%rcx,%rdi),%rbp + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 240(t=%r12 +movq 240(%rcx,%rdi),%rbp + +# qhasm: tz0 = t if = +cmove %rbp,%r12 + +# qhasm: t = *(uint64 *)(basep + 248 + pos) +# asm 1: movq 248(t=int64#10 +# asm 2: movq 248(t=%r12 +movq 248(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r13 + +# qhasm: t = *(uint64 *)(basep + 256 + pos) +# asm 1: movq 256(t=int64#10 +# asm 2: movq 256(t=%r12 +movq 256(%rcx,%rdi),%rbp + +# qhasm: tz2 = t if = +cmove %rbp,%r14 + +# qhasm: t = *(uint64 *)(basep + 264 + pos) +# asm 1: movq 264(t=int64#10 +# asm 2: movq 264(t=%r12 +movq 264(%rcx,%rdi),%rbp + +# qhasm: tz3 = t if = +cmove %rbp,%r15 + +# qhasm: t = *(uint64 *)(basep + 272 + pos) +# asm 1: movq 272(t=int64#10 +# asm 2: movq 272(t=%r12 +movq 272(%rcx,%rdi),%rbp + +# qhasm: tz4 = t if = +cmove %rbp,%rbx + +# qhasm: t = *(uint64 *)(basep + 280 + pos) +# asm 1: movq 280(t=int64#10 +# asm 2: movq 280(t=%r12 +movq 280(%rcx,%rdi),%rbp + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 288(t=%r12 +movq 288(%rcx,%rdi),%rbp + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 296(t=%r12 +movq 296(%rcx,%rdi),%rbp + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 304(t=%r12 +movq 304(%rcx,%rdi),%rbp + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 312(t=%r12 +movq 312(%rcx,%rdi),%rbp + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 400(t=%r12 +movq 400(%rcx,%rdi),%rbp + +# qhasm: tz0 = t if = +cmove %rbp,%r12 + +# qhasm: t = *(uint64 *)(basep + 408 + pos) +# asm 1: movq 408(t=int64#10 +# asm 2: movq 408(t=%r12 +movq 408(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r13 + +# qhasm: t = *(uint64 *)(basep + 416 + pos) +# asm 1: movq 416(t=int64#10 +# asm 2: movq 416(t=%r12 +movq 416(%rcx,%rdi),%rbp + +# qhasm: tz2 = t if = +cmove %rbp,%r14 + +# qhasm: t = *(uint64 *)(basep + 424 + pos) +# asm 1: movq 424(t=int64#10 +# asm 2: movq 424(t=%r12 +movq 424(%rcx,%rdi),%rbp + +# qhasm: tz3 = t if = +cmove %rbp,%r15 + +# qhasm: t = *(uint64 *)(basep + 432 + pos) +# asm 1: movq 432(t=int64#10 +# asm 2: movq 432(t=%r12 +movq 432(%rcx,%rdi),%rbp + +# qhasm: tz4 = t if = +cmove %rbp,%rbx + +# qhasm: t = *(uint64 *)(basep + 440 + pos) +# asm 1: movq 440(t=int64#10 +# asm 2: movq 440(t=%r12 +movq 440(%rcx,%rdi),%rbp + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 448(t=%r12 +movq 448(%rcx,%rdi),%rbp + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 336(t=%r12 +movq 456(%rcx,%rdi),%rbp + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 344(t=%r12 +movq 464(%rcx,%rdi),%rbp + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 472(t=%r12 +movq 472(%rcx,%rdi),%rbp + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 560(t=%r12 +movq 560(%rcx,%rdi),%rbp + +# qhasm: tz0 = t if = +cmove %rbp,%r12 + +# qhasm: t = *(uint64 *)(basep + 568 + pos) +# asm 1: movq 568(t=int64#10 +# asm 2: movq 568(t=%r12 +movq 568(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r13 + +# qhasm: t = *(uint64 *)(basep + 576 + pos) +# asm 1: movq 576(t=int64#10 +# asm 2: movq 576(t=%r12 +movq 576(%rcx,%rdi),%rbp + +# qhasm: tz2 = t if = +cmove %rbp,%r14 + +# qhasm: t = *(uint64 *)(basep + 584 + pos) +# asm 1: movq 584(t=int64#10 +# asm 2: movq 584(t=%r12 +movq 584(%rcx,%rdi),%rbp + +# qhasm: tz3 = t if = +cmove %rbp,%r15 + +# qhasm: t = *(uint64 *)(basep + 592 + pos) +# asm 1: movq 592(t=int64#10 +# asm 2: movq 592(t=%r12 +movq 592(%rcx,%rdi),%rbp + +# qhasm: tz4 = t if = +cmove %rbp,%rbx + +# qhasm: t = *(uint64 *)(basep + 600 + pos) +# asm 1: movq 600(t=int64#10 +# asm 2: movq 600(t=%r12 +movq 600(%rcx,%rdi),%rbp + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 608(t=%r12 +movq 608(%rcx,%rdi),%rbp + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 616(t=%r12 +movq 616(%rcx,%rdi),%rbp + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 624(t=%r12 +movq 624(%rcx,%rdi),%rbp + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 632(t=%r12 +movq 632(%rcx,%rdi),%rbp + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 720(t=%r12 +movq 720(%rcx,%rdi),%rbp + +# qhasm: tz0 = t if = +cmove %rbp,%r12 + +# qhasm: t = *(uint64 *)(basep + 728 + pos) +# asm 1: movq 728(t=int64#10 +# asm 2: movq 728(t=%r12 +movq 728(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r13 + +# qhasm: t = *(uint64 *)(basep + 736 + pos) +# asm 1: movq 736(t=int64#10 +# asm 2: movq 736(t=%r12 +movq 736(%rcx,%rdi),%rbp + +# qhasm: tz2 = t if = +cmove %rbp,%r14 + +# qhasm: t = *(uint64 *)(basep + 744 + pos) +# asm 1: movq 744(t=int64#10 +# asm 2: movq 744(t=%r12 +movq 744(%rcx,%rdi),%rbp + +# qhasm: tz3 = t if = +cmove %rbp,%r15 + +# qhasm: t = *(uint64 *)(basep + 752 + pos) +# asm 1: movq 752(t=int64#10 +# asm 2: movq 752(t=%r12 +movq 752(%rcx,%rdi),%rbp + +# qhasm: tz4 = t if = +cmove %rbp,%rbx + +# qhasm: t = *(uint64 *)(basep + 760 + pos) +# asm 1: movq 760(t=int64#10 +# asm 2: movq 760(t=%r12 +movq 760(%rcx,%rdi),%rbp + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 768(t=%r12 +movq 768(%rcx,%rdi),%rbp + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 776(t=%r12 +movq 776(%rcx,%rdi),%rbp + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 784(t=%r12 +movq 784(%rcx,%rdi),%rbp + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 792(t=%r12 +movq 792(%rcx,%rdi),%rbp + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 880(t=%r12 +movq 880(%rcx,%rdi),%rbp + +# qhasm: tz0 = t if = +cmove %rbp,%r12 + +# qhasm: t = *(uint64 *)(basep + 888 + pos) +# asm 1: movq 888(t=int64#10 +# asm 2: movq 888(t=%r12 +movq 888(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r13 + +# qhasm: t = *(uint64 *)(basep + 896 + pos) +# asm 1: movq 896(t=int64#10 +# asm 2: movq 896(t=%r12 +movq 896(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r14 + +# qhasm: t = *(uint64 *)(basep + 904 + pos) +# asm 1: movq 904(t=int64#10 +# asm 2: movq 904(t=%r12 +movq 904(%rcx,%rdi),%rbp + +# qhasm: tz2 = t if = +cmove %rbp,%r15 + +# qhasm: t = *(uint64 *)(basep + 912 + pos) +# asm 1: movq 912(t=int64#10 +# asm 2: movq 912(t=%r12 +movq 912(%rcx,%rdi),%rbp + +# qhasm: tz3 = t if = +cmove %rbp,%rbx + +# qhasm: t = *(uint64 *)(basep + 920 + pos) +# asm 1: movq 920(t=int64#10 +# asm 2: movq 920(t=%r12 +movq 920(%rcx,%rdi),%rbp + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 928(t=%r12 +movq 928(%rcx,%rdi),%rbp + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 936(t=%r12 +movq 936(%rcx,%rdi),%rbp + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 944(t=%r12 +movq 944(%rcx,%rdi),%rbp + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 952(t=%r12 +movq 952(%rcx,%rdi),%rbp + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 1040(t=%r12 +movq 1040(%rcx,%rdi),%rbp + +# qhasm: tz0 = t if = +cmove %rbp,%r12 + +# qhasm: t = *(uint64 *)(basep + 1048 + pos) +# asm 1: movq 1048(t=int64#10 +# asm 2: movq 1048(t=%r12 +movq 1048(%rcx,%rdi),%rbp + +# qhasm: tz1 = t if = +cmove %rbp,%r13 + +# qhasm: t = *(uint64 *)(basep + 1056 + pos) +# asm 1: movq 1056(t=int64#10 +# asm 2: movq 1056(t=%r12 +movq 1056(%rcx,%rdi),%rbp + +# qhasm: tz2 = t if = +cmove %rbp,%r14 + +# qhasm: t = *(uint64 *)(basep + 1064 + pos) +# asm 1: movq 1064(t=int64#10 +# asm 2: movq 1064(t=%r12 +movq 1064(%rcx,%rdi),%rbp + +# qhasm: tz3 = t if = +cmove %rbp,%r15 + +# qhasm: t = *(uint64 *)(basep + 1072 + pos) +# asm 1: movq 1072(t=int64#10 +# asm 2: movq 1072(t=%r12 +movq 1072(%rcx,%rdi),%rbp + +# qhasm: tz4 = t if = +cmove %rbp,%rbx + +# qhasm: t = *(uint64 *)(basep + 1080 + pos) +# asm 1: movq 1080(t=int64#10 +# asm 2: movq 1080(t=%r12 +movq 1080(%rcx,%rdi),%rbp + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 1088(t=%r12 +movq 1088(%rcx,%rdi),%rbp + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 1096(t=%r12 +movq 1096(%rcx,%rdi),%rbp + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 1104(t=%r12 +movq 1104(%rcx,%rdi),%rbp + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 1112(t=%r12 +movq 1112(%rcx,%rdi),%rbp + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 1200(t=%r8 +movq 1200(%rcx,%rdi),%r8 + +# qhasm: tz0 = t if = +cmove %r8,%r12 + +# qhasm: t = *(uint64 *)(basep + 1208 + pos) +# asm 1: movq 1208(t=int64#5 +# asm 2: movq 1208(t=%r8 +movq 1208(%rcx,%rdi),%r8 + +# qhasm: tz1 = t if = +cmove %r8,%r13 + +# qhasm: t = *(uint64 *)(basep + 1216 + pos) +# asm 1: movq 1216(t=int64#5 +# asm 2: movq 1216(t=%r8 +movq 1216(%rcx,%rdi),%r8 + +# qhasm: tz2 = t if = +cmove %r8,%r14 + +# qhasm: t = *(uint64 *)(basep + 1224 + pos) +# asm 1: movq 1224(t=int64#5 +# asm 2: movq 1224(t=%r8 +movq 1224(%rcx,%rdi),%r8 + +# qhasm: tz3 = t if = +cmove %r8,%r15 + +# qhasm: t = *(uint64 *)(basep + 1232 + pos) +# asm 1: movq 1232(t=int64#5 +# asm 2: movq 1232(t=%r8 +movq 1232(%rcx,%rdi),%r8 + +# qhasm: tz4 = t if = +cmove %r8,%rbx + +# qhasm: t = *(uint64 *)(basep + 1240 + pos) +# asm 1: movq 1240(t=int64#5 +# asm 2: movq 1240(t=%r8 +movq 1240(%rcx,%rdi),%r8 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 1248(t=%r8 +movq 1248(%rcx,%rdi),%r8 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 1256(t=%r8 +movq 1256(%rcx,%rdi),%r8 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 1264(t=%r8 +movq 1264(%rcx,%rdi),%r8 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#1 +# asm 2: movq 1272(t=%rdi +movq 1272(%rcx,%rdi),%rdi + +# qhasm: tt2d4 = t if = +# asm 1: cmove tp=int64#15 +# asm 2: movq tp=%rbp +movq 56(%rsp),%rbp + +# qhasm: *(uint64 *)(tp + 80) = tz0 +# asm 1: movq tt0=int64#1 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>tt0=%rdi +movq crypto_sign_ed25519_amd64_51_30k_batch_2P0(%rip),%rdi + +# qhasm: tt1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=int64#4 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=%rcx +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%rcx + +# qhasm: tt2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=int64#5 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=%r8 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r8 + +# qhasm: tt3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=int64#10 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=%r12 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r12 + +# qhasm: tt4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=int64#11 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=%r13 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234(%rip),%r13 + +# qhasm: tt0 -= tt2d0 +# asm 1: sub caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/wallet/crypto/amd64-51-30k.c b/src/wallet/crypto/amd64-51-30k.c new file mode 100644 index 0000000000..e0f6b4ec54 --- /dev/null +++ b/src/wallet/crypto/amd64-51-30k.c @@ -0,0 +1,58 @@ +// Copyright (c) 2017, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Parts of this file from bench.cr.yp.to/supercop.html (2017-07-25): +// Daniel J. Bernstein +// Niels Duif +// Tanja Lange +// lead: Peter Schwabe +// Bo-Yin Yang + +#include +#include "fe25519.h" + +/* constants below can be found in various fles in ed25519/amd64-51-30k */ + +/* d */ +static const fe25519 ecd = {{929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575}}; +/* 2*d */ +static const fe25519 ec2d = {{1859910466990425, 932731440258426, 1072319116312658, 1815898335770999, 633789495995903}}; +/* sqrt(-1) */ +static const fe25519 sqrtm1 = {{1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133}}; + +#define choose_tp crypto_sign_ed25519_amd64_51_30k_batch_choose_tp +#define crypto_scalarmult crypto_scalarmult_curve25519_amd64_51_30k +#include "amd64.c.inc" + +int monero_wallet_amd64_51_30k_get_tx_key(char* out, char const* pub, char const* sec) { + return get_tx_key(out, pub, sec); +} +int monero_wallet_amd64_51_30k_get_tx_output_public(char* out, char const* pub, char const* sec) { + return get_tx_output_public(out, pub, sec); +} + diff --git a/src/wallet/crypto/amd64-51-30k.cmake b/src/wallet/crypto/amd64-51-30k.cmake new file mode 100644 index 0000000000..0a8365c7ff --- /dev/null +++ b/src/wallet/crypto/amd64-51-30k.cmake @@ -0,0 +1,13 @@ +include("${CMAKE_CURRENT_SOURCE_DIR}/amd64.cmake") +enable_language(ASM-ATT) +add_wallet_amd64_lib( + "wallet-crypto-amd64-51-30k" + "amd64-51-30k" + "ed25519/amd64-51-30k" + SOURCES + "ed25519/amd64-51-30k/fe25519_add.c" + "ed25519/amd64-51-30k/fe25519_nsquare.s" + "ed25519/amd64-51-30k/fe25519_sub.c" + "ed25519/amd64-51-30k/ge25519_p1p1_to_pniels.s" +) +list(APPEND WALLET_CRYPTO_LIBS "wallet-crypto-amd64-51-30k") diff --git a/src/wallet/crypto/amd64-51-30k.h b/src/wallet/crypto/amd64-51-30k.h new file mode 100644 index 0000000000..aff0d2329f --- /dev/null +++ b/src/wallet/crypto/amd64-51-30k.h @@ -0,0 +1,57 @@ +// Copyright (c) 2017, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include "crypto/crypto.h" + +extern "C" { + int monero_wallet_amd64_51_30k_get_tx_key(char*, char const*, char const*); + int monero_wallet_amd64_51_30k_get_tx_output_public(char*, char const*, char const*); +} + +namespace tools { + namespace wallet_only { + namespace amd64_51_30k { + inline bool generate_key_derivation( + crypto::public_key const& pub, crypto::secret_key const& sec, crypto::key_derivation& out + ) { + return monero_wallet_amd64_51_30k_get_tx_key(out.data, pub.data, sec.data) == 0; + } + + inline bool derive_public_key( + crypto::key_derivation const& d, std::size_t index, crypto::public_key const& pub, crypto::public_key& out + ) { + ec_scalar scalar; + crypto::derivation_to_scalar(d, index, scalar); + return monero_wallet_amd64_51_30k_get_tx_output_public(out.data, pub.data, scalar.data) == 0; + } + } + } +} diff --git a/src/wallet/crypto/amd64-64-24k-choose_tp.s b/src/wallet/crypto/amd64-64-24k-choose_tp.s new file mode 100644 index 0000000000..523ad32f8f --- /dev/null +++ b/src/wallet/crypto/amd64-64-24k-choose_tp.s @@ -0,0 +1,1925 @@ + +# qhasm: int64 tp + +# qhasm: int64 pos + +# qhasm: int64 b + +# qhasm: int64 basep + +# qhasm: input tp + +# qhasm: input pos + +# qhasm: input b + +# qhasm: input basep + +# qhasm: int64 mask + +# qhasm: int64 u + +# qhasm: int64 tysubx0 + +# qhasm: int64 tysubx1 + +# qhasm: int64 tysubx2 + +# qhasm: int64 tysubx3 + +# qhasm: int64 txaddy0 + +# qhasm: int64 txaddy1 + +# qhasm: int64 txaddy2 + +# qhasm: int64 txaddy3 + +# qhasm: int64 tt2d0 + +# qhasm: int64 tt2d1 + +# qhasm: int64 tt2d2 + +# qhasm: int64 tt2d3 + +# qhasm: int64 tt0 + +# qhasm: int64 tt1 + +# qhasm: int64 tt2 + +# qhasm: int64 tt3 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: int64 t + +# qhasm: stack64 tp_stack + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_choose_t +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_choose_tp +.globl crypto_sign_ed25519_amd64_64_choose_tp +_crypto_sign_ed25519_amd64_64_choose_tp: +crypto_sign_ed25519_amd64_64_choose_tp: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: tp_stack = tp +# asm 1: movq tp_stack=stack64#8 +# asm 2: movq tp_stack=56(%rsp) +movq %rdi,56(%rsp) + +# qhasm: pos *= 768 +# asm 1: imulq $768,pos=int64#1 +# asm 2: imulq $768,pos=%rdi +imulq $768,%rsi,%rdi + +# qhasm: mask = b +# asm 1: mov mask=int64#2 +# asm 2: mov mask=%rsi +mov %rdx,%rsi + +# qhasm: (int64) mask >>= 7 +# asm 1: sar $7,u=int64#5 +# asm 2: mov u=%r8 +mov %rdx,%r8 + +# qhasm: u += mask +# asm 1: add tysubx0=int64#2 +# asm 2: mov $1,>tysubx0=%rsi +mov $1,%rsi + +# qhasm: tysubx1 = 0 +# asm 1: mov $0,>tysubx1=int64#6 +# asm 2: mov $0,>tysubx1=%r9 +mov $0,%r9 + +# qhasm: tysubx2 = 0 +# asm 1: mov $0,>tysubx2=int64#7 +# asm 2: mov $0,>tysubx2=%rax +mov $0,%rax + +# qhasm: tysubx3 = 0 +# asm 1: mov $0,>tysubx3=int64#8 +# asm 2: mov $0,>tysubx3=%r10 +mov $0,%r10 + +# qhasm: txaddy0 = 1 +# asm 1: mov $1,>txaddy0=int64#9 +# asm 2: mov $1,>txaddy0=%r11 +mov $1,%r11 + +# qhasm: txaddy1 = 0 +# asm 1: mov $0,>txaddy1=int64#10 +# asm 2: mov $0,>txaddy1=%r12 +mov $0,%r12 + +# qhasm: txaddy2 = 0 +# asm 1: mov $0,>txaddy2=int64#11 +# asm 2: mov $0,>txaddy2=%r13 +mov $0,%r13 + +# qhasm: txaddy3 = 0 +# asm 1: mov $0,>txaddy3=int64#12 +# asm 2: mov $0,>txaddy3=%r14 +mov $0,%r14 + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#13 +# asm 2: movq 0(t=%r15 +movq 0(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 8(t=%r15 +movq 8(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 16(t=%r15 +movq 16(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 24(t=%r15 +movq 24(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 32(t=%r15 +movq 32(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 40(t=%r15 +movq 40(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 48(t=%r15 +movq 48(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 56(t=%r15 +movq 56(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 128(t=%r15 +movq 128(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 136(t=%r15 +movq 136(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 144(t=%r15 +movq 144(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 152(t=%r15 +movq 152(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 160(t=%r15 +movq 160(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 168(t=%r15 +movq 168(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 176(t=%r15 +movq 176(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 184(t=%r15 +movq 184(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 256(t=%r15 +movq 256(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 264(t=%r15 +movq 264(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 272(t=%r15 +movq 272(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 280(t=%r15 +movq 280(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 288(t=%r15 +movq 288(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 296(t=%r15 +movq 296(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 304(t=%r15 +movq 304(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 312(t=%r15 +movq 312(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 384(t=%r15 +movq 384(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 392(t=%r15 +movq 392(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 400(t=%r15 +movq 400(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 408(t=%r15 +movq 408(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 416(t=%r15 +movq 416(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 424(t=%r15 +movq 424(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 432(t=%r15 +movq 432(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 440(t=%r15 +movq 440(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 512(t=%r15 +movq 512(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 520(t=%r15 +movq 520(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 528(t=%r15 +movq 528(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 536(t=%r15 +movq 536(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 544(t=%r15 +movq 544(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 552(t=%r15 +movq 552(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 560(t=%r15 +movq 560(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 568(t=%r15 +movq 568(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 640(t=%r15 +movq 640(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 648(t=%r15 +movq 648(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 656(t=%r15 +movq 656(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 664(t=%r15 +movq 664(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 672(t=%r15 +movq 672(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 680(t=%r15 +movq 680(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 688(t=%r15 +movq 688(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 696(t=%r15 +movq 696(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 768(t=%r15 +movq 768(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 776(t=%r15 +movq 776(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 784(t=%r15 +movq 784(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 792(t=%r15 +movq 792(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 800(t=%r15 +movq 800(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 808(t=%r15 +movq 808(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 816(t=%r15 +movq 816(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 824(t=%r15 +movq 824(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 896(t=%r15 +movq 896(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 896(t=%r15 +movq 904(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 912(t=%r15 +movq 912(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 920(t=%r15 +movq 920(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 928(t=%r15 +movq 928(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 936(t=%r15 +movq 936(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 944(t=%r15 +movq 944(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 952(t=%r15 +movq 952(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: mov t=%r15 +mov %rsi,%r15 + +# qhasm: tysubx0 = txaddy0 if signed< +# asm 1: cmovl t=int64#13 +# asm 2: mov t=%r15 +mov %r9,%r15 + +# qhasm: tysubx1 = txaddy1 if signed< +# asm 1: cmovl t=int64#13 +# asm 2: mov t=%r15 +mov %rax,%r15 + +# qhasm: tysubx2 = txaddy2 if signed< +# asm 1: cmovl t=int64#13 +# asm 2: mov t=%r15 +mov %r10,%r15 + +# qhasm: tysubx3 = txaddy3 if signed< +# asm 1: cmovl tp=int64#13 +# asm 2: movq tp=%r15 +movq 56(%rsp),%r15 + +# qhasm: *(uint64 *)(tp + 0) = tysubx0 +# asm 1: movq tt2d0=int64#2 +# asm 2: mov $0,>tt2d0=%rsi +mov $0,%rsi + +# qhasm: tt2d1 = 0 +# asm 1: mov $0,>tt2d1=int64#6 +# asm 2: mov $0,>tt2d1=%r9 +mov $0,%r9 + +# qhasm: tt2d2 = 0 +# asm 1: mov $0,>tt2d2=int64#7 +# asm 2: mov $0,>tt2d2=%rax +mov $0,%rax + +# qhasm: tt2d3 = 0 +# asm 1: mov $0,>tt2d3=int64#8 +# asm 2: mov $0,>tt2d3=%r10 +mov $0,%r10 + +# qhasm: tz0 = 1 +# asm 1: mov $0,>tz0=int64#1 +# asm 2: mov $0,>tz0=%rbp +mov $1,%rbp + +# qhasm: tz1 = 0 +# asm 1: mov $0,>tz1=int64#4 +# asm 2: mov $0,>tz1=%r12 +mov $0,%r12 + +# qhasm: tz2 = 0 +# asm 1: mov $0,>tz2=int64#5 +# asm 2: mov $0,>tz2=%r13 +mov $0,%r13 + +# qhasm: tz3 = 0 +# asm 1: mov $0,>tz3=int64#9 +# asm 2: mov $0,>tz3=%r14 +mov $0,%r14 + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#9 +# asm 2: movq 64(t=%r11 +movq 64(%rcx,%rdi),%r11 + +# qhasm: z0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 72(t=%r11 +movq 72(%rcx,%rdi),%r11 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 80(t=%r11 +movq 80(%rcx,%rdi),%r11 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 88(t=%r11 +movq 88(%rcx,%rdi),%r11 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 96(t=%r11 +movq 96(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 104(t=%r11 +movq 104(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 112(t=%r11 +movq 112(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 120(t=%r11 +movq 120(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 192(t=%r11 +movq 192(%rcx,%rdi),%r11 + +# qhasm: tz0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 200(t=%r11 +movq 200(%rcx,%rdi),%r11 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 208(t=%r11 +movq 208(%rcx,%rdi),%r11 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 216(t=%r11 +movq 216(%rcx,%rdi),%r11 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 224(t=%r11 +movq 224(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 232(t=%r11 +movq 232(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 240(t=%r11 +movq 240(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 248(t=%r11 +movq 248(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 320(t=%r11 +movq 320(%rcx,%rdi),%r11 + +# qhasm: tz0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 328(t=%r11 +movq 328(%rcx,%rdi),%r11 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 336(t=%r11 +movq 336(%rcx,%rdi),%r11 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 344(t=%r11 +movq 344(%rcx,%rdi),%r11 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 352(t=%r11 +movq 352(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 360(t=%r11 +movq 360(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 368(t=%r11 +movq 368(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 376(t=%r11 +movq 376(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 448(t=%r11 +movq 448(%rcx,%rdi),%r11 + +# qhasm: tz0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 456(t=%r11 +movq 456(%rcx,%rdi),%r11 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 464(t=%r11 +movq 464(%rcx,%rdi),%r11 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 472(t=%r11 +movq 472(%rcx,%rdi),%r11 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 480(t=%r11 +movq 480(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 488(t=%r11 +movq 488(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 496(t=%r11 +movq 496(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 504(t=%r11 +movq 504(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 576(t=%r11 +movq 576(%rcx,%rdi),%r11 + +# qhasm: tz0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 584(t=%r11 +movq 584(%rcx,%rdi),%r11 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 592(t=%r11 +movq 592(%rcx,%rdi),%r11 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 600(t=%r11 +movq 600(%rcx,%rdi),%r11 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 608(t=%r11 +movq 608(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 616(t=%r11 +movq 616(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 624(t=%r11 +movq 624(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 632(t=%r11 +movq 632(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 704(t=%r11 +movq 704(%rcx,%rdi),%r11 + +# qhasm: tz0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 712(t=%r11 +movq 712(%rcx,%rdi),%r11 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 720(t=%r11 +movq 720(%rcx,%rdi),%r11 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 728(t=%r11 +movq 728(%rcx,%rdi),%r11 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 736(t=%r11 +movq 736(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 744(t=%r11 +movq 744(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 752(t=%r11 +movq 752(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 760(t=%r11 +movq 760(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 832(t=%r11 +movq 832(%rcx,%rdi),%r11 + +# qhasm: tz0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 840(t=%r11 +movq 840(%rcx,%rdi),%r11 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 848(t=%r11 +movq 848(%rcx,%rdi),%r11 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 856(t=%r11 +movq 856(%rcx,%rdi),%r11 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 864(t=%r11 +movq 864(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 872(t=%r11 +movq 872(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 880(t=%r11 +movq 880(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 88(t=%r11 +movq 888(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 960(t=%r8 +movq 960(%rcx,%rdi),%r8 + +# qhasm: tz0 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 968(t=%r8 +movq 968(%rcx,%rdi),%r8 + +# qhasm: tz1 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 976(t=%r8 +movq 976(%rcx,%rdi),%r8 + +# qhasm: tz2 = t if = +# asm 1: cmove t=int64#1 +# asm 2: movq 984(t=%r14 +movq 984(%rcx,%rdi),%r8 + +# qhasm: tz3 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 992(t=%r8 +movq 992(%rcx,%rdi),%r8 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 1000(t=%r8 +movq 1000(%rcx,%rdi),%r8 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 1008(t=%r8 +movq 1008(%rcx,%rdi),%r8 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#1 +# asm 2: movq 1016(t=%rdi +movq 1016(%rcx,%rdi),%rdi + +# qhasm: *(uint64 *)(tp + 64) = tz0 +# asm 1: movq tt0=int64#1 +# asm 2: mov $0,>tt0=%rdi +mov $0,%rdi + +# qhasm: tt1 = 0 +# asm 1: mov $0,>tt1=int64#4 +# asm 2: mov $0,>tt1=%rcx +mov $0,%rcx + +# qhasm: tt2 = 0 +# asm 1: mov $0,>tt2=int64#5 +# asm 2: mov $0,>tt2=%r8 +mov $0,%r8 + +# qhasm: tt3 = 0 +# asm 1: mov $0,>tt3=int64#9 +# asm 2: mov $0,>tt3=%r11 +mov $0,%r11 + +# qhasm: carry? tt0 -= tt2d0 +# asm 1: sub subt0=int64#10 +# asm 2: mov $0,>subt0=%r12 +mov $0,%r12 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#11 +# asm 2: mov $38,>subt1=%r13 +mov $38,%r13 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/wallet/crypto/amd64-64-24k.c b/src/wallet/crypto/amd64-64-24k.c new file mode 100644 index 0000000000..e4e0831dc1 --- /dev/null +++ b/src/wallet/crypto/amd64-64-24k.c @@ -0,0 +1,70 @@ +// Copyright (c) 2017, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Parts of this file from bench.cr.yp.to/supercop.html (2017-07-25): +// Daniel J. Bernstein +// Niels Duif +// Tanja Lange +// lead: Peter Schwabe +// Bo-Yin Yang + +#include +#include +#include "fe25519.h" +#include "ge25519.h" + +/* constants below can be found in various fles in ed25519/amd64-64-24k */ + +/* d */ +static const fe25519 ecd = {{0x75EB4DCA135978A3, 0x00700A4D4141D8AB, 0x8CC740797779E898, 0x52036CEE2B6FFE73}}; +/* 2*d */ +static const fe25519 ec2d = {{0xEBD69B9426B2F146, 0x00E0149A8283B156, 0x198E80F2EEF3D130, 0xA406D9DC56DFFCE7}}; +/* sqrt(-1) */ +static const fe25519 sqrtm1 = {{0xC4EE1B274A0EA0B0, 0x2F431806AD2FE478, 0x2B4D00993DFBD7A7, 0x2B8324804FC1DF0B}}; + +/* taken from loop in ed25519/amd64-64-24k/ge25519_double_scalarmult.c */ +static void ge25519_p1p1_to_pniels(ge25519_pniels* out, ge25519_p1p1 const* in) { + assert(out && in); + ge25519_p1p1_to_p3((ge25519_p3*)out, in); + const fe25519 d = out->ysubx; + fe25519_sub(&(out->ysubx), &(out->xaddy), &(out->ysubx)); + fe25519_add(&(out->xaddy), &(out->xaddy), &d); + fe25519_mul(&(out->t2d), &(out->t2d), &ec2d); +} + +#define choose_tp crypto_sign_ed25519_amd64_64_choose_tp +#define crypto_scalarmult crypto_scalarmult_curve25519_amd64_64_24k +#include "amd64.c.inc" + +int monero_wallet_amd64_64_24k_get_tx_key(char* out, char const* pub, char const* sec) { + return get_tx_key(out, pub, sec); +} +int monero_wallet_amd64_64_24k_get_tx_output_public(char* out, char const* pub, char const* sec) { + return get_tx_output_public(out, pub, sec); +} + diff --git a/src/wallet/crypto/amd64-64-24k.cmake b/src/wallet/crypto/amd64-64-24k.cmake new file mode 100644 index 0000000000..f70999a2fc --- /dev/null +++ b/src/wallet/crypto/amd64-64-24k.cmake @@ -0,0 +1,11 @@ +include("${CMAKE_CURRENT_SOURCE_DIR}/amd64.cmake") +enable_language(ASM-ATT) +add_wallet_amd64_lib( + "wallet-crypto-amd64-64-24k" + "amd64-64-24k" + "ed25519/amd64-64-24k" + SOURCES + "ed25519/amd64-64-24k/fe25519_add.s" + "ed25519/amd64-64-24k/fe25519_sub.s" +) +list(APPEND WALLET_CRYPTO_LIBS "wallet-crypto-amd64-64-24k") diff --git a/src/wallet/crypto/amd64-64-24k.h b/src/wallet/crypto/amd64-64-24k.h new file mode 100644 index 0000000000..ceb4247bf6 --- /dev/null +++ b/src/wallet/crypto/amd64-64-24k.h @@ -0,0 +1,57 @@ +// Copyright (c) 2017, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include "crypto/crypto.h" + +extern "C" { + int monero_wallet_amd64_64_24k_get_tx_key(char*, char const*, char const*); + int monero_wallet_amd64_64_24k_get_tx_output_public(char*, char const*, char const*); +} + +namespace tools { + namespace wallet_only { + namespace amd64_64_24k { + inline bool generate_key_derivation( + crypto::public_key const& pub, crypto::secret_key const& sec, crypto::key_derivation& out + ) { + return monero_wallet_amd64_64_24k_get_tx_key(out.data, pub.data, sec.data) == 0; + } + + inline bool derive_public_key( + crypto::key_derivation const& d, std::size_t index, crypto::public_key const& pub, crypto::public_key& out + ) { + ec_scalar scalar; + crypto::derivation_to_scalar(d, index, scalar); + return monero_wallet_amd64_64_24k_get_tx_output_public(out.data, pub.data, scalar.data) == 0; + } + } + } +} diff --git a/src/wallet/crypto/amd64.c.inc b/src/wallet/crypto/amd64.c.inc new file mode 100644 index 0000000000..eeed7e9ac0 --- /dev/null +++ b/src/wallet/crypto/amd64.c.inc @@ -0,0 +1,178 @@ +// Copyright (c) 2017, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers +// +// Parts of this file from biench.cr.yp.to/supercop.html (2017-02-25): +// Daniel J. Bernstein +// Niels Duif +// Tanja Lange +// lead: Peter Schwabe +// Bo-Yin Yang + +#include +#include "ge25519.h" + +extern void choose_tp(ge25519_pniels *t, unsigned long long pos, signed long long b, const ge25519_pniels *base_multiples); + +/* return 0 on success, -1 otherwise. Taken from +ed25519/amd64-51-30k/ge25519_unpackneg.c - the negation is removed. */ +static int unpack_vartime(ge25519_p3 *r, const unsigned char p[32]) { + fe25519 t, chk, num, den, den2, den4, den6; + unsigned char par = p[31] >> 7; + + fe25519_setint(&r->z,1); + fe25519_unpack(&r->y, p); + fe25519_square(&num, &r->y); /* x = y^2 */ + fe25519_mul(&den, &num, &ecd); /* den = dy^2 */ + fe25519_sub(&num, &num, &r->z); /* x = y^2-1 */ + fe25519_add(&den, &r->z, &den); /* den = dy^2+1 */ + + /* Computation of sqrt(num/den) + 1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8) + */ + fe25519_square(&den2, &den); + fe25519_square(&den4, &den2); + fe25519_mul(&den6, &den4, &den2); + fe25519_mul(&t, &den6, &num); + fe25519_mul(&t, &t, &den); + + fe25519_pow2523(&t, &t); + /* 2. computation of r->x = t * num * den^3 + */ + fe25519_mul(&t, &t, &num); + fe25519_mul(&t, &t, &den); + fe25519_mul(&t, &t, &den); + fe25519_mul(&r->x, &t, &den); + + /* 3. Check whether sqrt computation gave correct result, multiply by sqrt(-1) if not: + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + fe25519_mul(&r->x, &r->x, &sqrtm1); + + /* 4. Now we have one of the two square roots, except if input was not a square + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + return -1; + + /* 5. Choose the desired square root according to parity: + */ + if(fe25519_getparity(&r->x) == (1-par)) + fe25519_neg(&r->x, &r->x); + + fe25519_mul(&r->t, &r->x, &r->y); + return 0; +} + +static void p3_to_pniels(ge25519_pniels* out, ge25519_p3 const* src) { + fe25519_sub(&out->ysubx, &src->y, &src->x); + fe25519_add(&out->xaddy, &src->x, &src->y); + fe25519_mul(&out->t2d, &src->t, &ec2d); + out->z = src->z; +} + +/* _similar_ to ge_scalarmult in src/crypto/crypto-ops.c */ +static void monero_scalarmult(ge25519_p3* r, char const* sec) { + signed char b[64]; + ge25519_pniels base_multiples[8]; + + ge25519_pniels t; + ge25519_p1p1 tp1p1; + + sc25519 s; + memcpy(s.v, sec, sizeof(s)); + sc25519_window4(b, &s); + + p3_to_pniels(&base_multiples[0], r); + for (int i = 0; i < 7; ++i) { + ge25519_pnielsadd_p1p1(&tp1p1, r, &base_multiples[i]); + ge25519_p1p1_to_pniels(&base_multiples[i + 1], &tp1p1); + } + + // set neutral + fe25519_setint(&r->x, 0); + fe25519_setint(&r->y, 1); + fe25519_setint(&r->t, 0); + fe25519_setint(&r->z, 1); + // end set neutral + + for (int i = 63; /* break below*/ ; --i) { + choose_tp(&t, (unsigned long long) 0, (signed long long) b[i], base_multiples); + ge25519_pnielsadd_p1p1(&tp1p1, r, &t); + ge25519_p1p1_to_p2((ge25519_p2*)r, &tp1p1); + + if (i == 0) break; + + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p3(r, &tp1p1); + } + + // non-standard, monero specific - guarantees point is in ed25519 group + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p3(r, &tp1p1); +} + +static int get_tx_key(char* out, char const* pub, char const* sec) { + ge25519 unpacked; + if (unpack_vartime(&unpacked, (unsigned char const*)pub) != 0) + return -1; + + monero_scalarmult(&unpacked, sec); + ge25519_pack((unsigned char*)out, &unpacked); + return 0; +} + +static int get_tx_output_public(char* out, char const* base, char const* sec) { + ge25519 unpacked1; + ge25519 unpacked2; + sc25519 unpacked3; + + if (unpack_vartime(&unpacked1, (unsigned char const*)base) != 0) + return -1; + + memcpy(unpacked3.v, sec, sizeof(unpacked3.v)); + ge25519_scalarmult_base(&unpacked2, &unpacked3); + ge25519_add(&unpacked1, &unpacked1, &unpacked2); + ge25519_pack((unsigned char*)out, &unpacked1); + return 0; +} + diff --git a/src/wallet/crypto/amd64.cmake b/src/wallet/crypto/amd64.cmake new file mode 100644 index 0000000000..93bb3eb28c --- /dev/null +++ b/src/wallet/crypto/amd64.cmake @@ -0,0 +1,40 @@ + + +function(add_wallet_amd64_lib LIBNAME FILENAME LIBFOLDER) + set(MULTIVARS SOURCES DIRECTORIES) + cmake_parse_arguments(AMD64_PERF "" "" "${MULTIVARS}" ${ARGN}) + + enable_language(ASM-ATT) + add_library(${LIBNAME} + "${FILENAME}.h" "${FILENAME}.c" "${FILENAME}-choose_tp.s" + "${LIBFOLDER}/choose_t.s" + "${LIBFOLDER}/consts.s" + "${LIBFOLDER}/fe25519_getparity.c" + "${LIBFOLDER}/fe25519_freeze.s" + "${LIBFOLDER}/fe25519_invert.c" + "${LIBFOLDER}/fe25519_iseq.c" + "${LIBFOLDER}/fe25519_mul.s" + "${LIBFOLDER}/fe25519_neg.c" + "${LIBFOLDER}/fe25519_pack.c" + "${LIBFOLDER}/fe25519_pow2523.c" + "${LIBFOLDER}/fe25519_setint.c" + "${LIBFOLDER}/fe25519_square.s" + "${LIBFOLDER}/fe25519_unpack.c" + "${LIBFOLDER}/ge25519_add.c" + "${LIBFOLDER}/ge25519_add_p1p1.s" + "${LIBFOLDER}/ge25519_dbl_p1p1.s" + "${LIBFOLDER}/ge25519_double.c" + "${LIBFOLDER}/ge25519_nielsadd_p1p1.s" + "${LIBFOLDER}/ge25519_nielsadd2.s" + "${LIBFOLDER}/ge25519_pack.c" + "${LIBFOLDER}/ge25519_p1p1_to_p2.s" + "${LIBFOLDER}/ge25519_p1p1_to_p3.s" + "${LIBFOLDER}/ge25519_pnielsadd_p1p1.s" + "${LIBFOLDER}/ge25519_scalarmult_base.c" + "${LIBFOLDER}/ge25519_unpackneg.c" + "${LIBFOLDER}/sc25519_from32bytes.c" + "${LIBFOLDER}/sc25519_window4.c" + ${AMD64_PERF_SOURCES} + ) + target_include_directories(${LIBNAME} PRIVATE ${LIBFOLDER} ${AMD64_PERF_DIRECTORIES}) +endfunction (add_wallet_amd64_lib) diff --git a/src/wallet/crypto/import.h.in b/src/wallet/crypto/import.h.in new file mode 100644 index 0000000000..bea32bb642 --- /dev/null +++ b/src/wallet/crypto/import.h.in @@ -0,0 +1,38 @@ +// Copyright (c) 2017, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "@WALLET_CRYPTO@.h" + +namespace tools { + namespace wallet_only { + using @WALLET_CRYPTO_NAMESPACE@::generate_key_derivation; + using @WALLET_CRYPTO_NAMESPACE@::derive_public_key; + } +} diff --git a/src/wallet/crypto/none.h b/src/wallet/crypto/none.h new file mode 100644 index 0000000000..e2a20d296b --- /dev/null +++ b/src/wallet/crypto/none.h @@ -0,0 +1,12 @@ +#pragma once + +#include "crypto/crypto.h" + +namespace tools { + namespace wallet_only { + namespace none { + using crypto::generate_key_derivation; + using crypto::derive_public_key; + } + } +} diff --git a/src/wallet/wallet2.cpp b/src/wallet/wallet2.cpp index b63e07b2d2..aff934cd43 100644 --- a/src/wallet/wallet2.cpp +++ b/src/wallet/wallet2.cpp @@ -59,6 +59,7 @@ using namespace epee; #include "common/base58.h" #include "common/scoped_message_writer.h" #include "ringct/rctSigs.h" +#include "wallet/crypto/import.h" extern "C" { @@ -138,6 +139,13 @@ uint64_t calculate_fee(uint64_t fee_per_kb, const cryptonote::blobdata &blob, ui return calculate_fee(fee_per_kb, blob.size(), fee_multiplier); } +bool is_out_to_acc_precomp(const crypto::public_key& spend_public_key, const txout_to_key& out_key, const crypto::key_derivation& derivation, size_t output_index) +{ + crypto::public_key pk; + tools::wallet_only::derive_public_key(derivation, output_index, spend_public_key, pk); + return pk == out_key.key; +} + std::unique_ptr make_basic(const boost::program_options::variables_map& vm, const options& opts) { const bool testnet = command_line::get_arg(vm, opts.testnet); @@ -607,7 +615,7 @@ void wallet2::check_acc_out_precomp(const crypto::public_key &spend_public_key, static uint64_t decodeRct(const rct::rctSig & rv, const crypto::public_key &pub, const crypto::secret_key &sec, unsigned int i, rct::key & mask) { crypto::key_derivation derivation; - bool r = crypto::generate_key_derivation(pub, sec, derivation); + bool r = tools::wallet_only::generate_key_derivation(pub, sec, derivation); if (!r) { LOG_ERROR("Failed to generate key derivation to decode rct output " << i); @@ -687,7 +695,7 @@ void wallet2::process_new_transaction(const crypto::hash &txid, const cryptonote int threads = tools::get_max_concurrency(); const cryptonote::account_keys& keys = m_account.get_keys(); crypto::key_derivation derivation; - generate_key_derivation(tx_pub_key, keys.m_view_secret_key, derivation); + tools::wallet_only::generate_key_derivation(tx_pub_key, keys.m_view_secret_key, derivation); if (miner_tx && m_refresh_type == RefreshNoCoinbase) { // assume coinbase isn't for us @@ -5171,7 +5179,7 @@ crypto::public_key wallet2::get_tx_pub_key_from_received_outs(const tools::walle while (find_tx_extra_field_by_type(tx_extra_fields, pub_key_field, pk_index++)) { const crypto::public_key tx_pub_key = pub_key_field.pub_key; crypto::key_derivation derivation; - generate_key_derivation(tx_pub_key, keys.m_view_secret_key, derivation); + tools::wallet_only::generate_key_derivation(tx_pub_key, keys.m_view_secret_key, derivation); for (size_t i = 0; i < td.m_tx.vout.size(); ++i) { diff --git a/tests/unit_tests/CMakeLists.txt b/tests/unit_tests/CMakeLists.txt index 2f62dc2aae..f028c6a183 100644 --- a/tests/unit_tests/CMakeLists.txt +++ b/tests/unit_tests/CMakeLists.txt @@ -62,7 +62,8 @@ set(unit_tests_sources varint.cpp ringct.cpp output_selection.cpp - vercmp.cpp) + vercmp.cpp + wallet_crypto.cpp) set(unit_tests_headers unit_tests_utils.h) @@ -72,6 +73,7 @@ add_executable(unit_tests ${unit_tests_headers}) target_link_libraries(unit_tests PRIVATE + crypto ringct cryptonote_protocol cryptonote_core diff --git a/tests/unit_tests/wallet_crypto.cpp b/tests/unit_tests/wallet_crypto.cpp new file mode 100644 index 0000000000..2c5b67b9f2 --- /dev/null +++ b/tests/unit_tests/wallet_crypto.cpp @@ -0,0 +1,135 @@ +// Copyright (c) 2017, The Monero Project +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, are +// permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of +// conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list +// of conditions and the following disclaimer in the documentation and/or other +// materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be +// used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "gtest/gtest.h" + +#include +#include "crypto/crypto.h" +#include "cryptonote_basic/cryptonote_basic.h" +#include "wallet/crypto/import.h" + +TEST(WalletCrypto, KeyDerivation) { + for (unsigned i = 0; i < 50; ++i) { + const auto one = cryptonote::keypair::generate(); + const auto two = cryptonote::keypair::generate(); + crypto::key_derivation reference{}; + ASSERT_TRUE(crypto::generate_key_derivation(one.pub, two.sec, reference)); + + crypto::key_derivation first{}; + ASSERT_TRUE(tools::wallet_only::generate_key_derivation(one.pub, two.sec, first)); + ASSERT_TRUE(boost::equal(first.data, reference.data)); + + crypto::key_derivation second{}; + ASSERT_TRUE(tools::wallet_only::generate_key_derivation(two.pub, one.sec, second)); + ASSERT_TRUE(boost::equal(first.data, second.data)); + } +} + +TEST(WalletCrypto, DerivePublic) { + for (unsigned i = 0; i < 50; ++i) { + const auto one = cryptonote::keypair::generate(); + const auto two = cryptonote::keypair::generate(); + + crypto::key_derivation derived{}; + static_assert(sizeof(derived) == sizeof(one.pub), "bad memcpy"); + std::memcpy(std::addressof(derived), std::addressof(one.pub), sizeof(derived)); + + for (unsigned j = 0; j < 50; ++j) { + crypto::public_key reference{}; + ASSERT_TRUE(crypto::derive_public_key(derived, j, two.pub, reference)); + + crypto::public_key first{}; + ASSERT_TRUE(tools::wallet_only::derive_public_key(derived, j, two.pub, first)); + ASSERT_TRUE(boost::equal(first.data, reference.data)); + } + } +} +/* +TEST(WalletCrypto, Perf) { + const auto one = cryptonote::keypair::generate(); + const auto two = cryptonote::keypair::generate(); + for (unsigned i = 0; i < 10; ++i) { + crypto::key_derivation reference{}; + ASSERT_TRUE(crypto::generate_key_derivation(one.pub, two.sec, reference)); + } + + unsigned count = 0; + auto start = std::chrono::steady_clock::now(); + for (unsigned i = 0; i < 10000; ++i) { + crypto::key_derivation reference{}; + count += crypto::generate_key_derivation(one.pub, two.sec, reference); + } + std::cout << "Standard Stage 1 (ns):\t\t" << (std::chrono::steady_clock::now() - start).count() << " - Iterations: " << count << std::endl; + + for (unsigned i = 0; i < 10; ++i) { + crypto::key_derivation reference{}; + ASSERT_TRUE(tools::wallet_only::generate_key_derivation(one.pub, two.sec, reference)); + } + + count = 0; + start = std::chrono::steady_clock::now(); + for (unsigned i = 0; i < 10000; ++i) { + crypto::key_derivation reference{}; + count += tools::wallet_only::generate_key_derivation(one.pub, two.sec, reference); + } + std::cout << "Perf Stage 1 (ns):\t\t" << (std::chrono::steady_clock::now() - start).count() << " - Iterations: " << count << std::endl; +} + +TEST(WalletCrypto, Perf2) { + const auto one = cryptonote::keypair::generate(); + const auto two = cryptonote::keypair::generate(); + crypto::key_derivation derived{}; + static_assert(sizeof(derived) == sizeof(one.pub), "bad memcpy"); + std::memcpy(std::addressof(derived), std::addressof(one.pub), sizeof(derived)); + + for (unsigned i = 0; i < 10; ++i) { + crypto::public_key reference{}; + ASSERT_TRUE(crypto::derive_public_key(derived, i, two.pub, reference)); + } + unsigned count = 0; + auto start = std::chrono::steady_clock::now(); + for (unsigned i = 0; i < 10000; ++i) { + crypto::public_key reference{}; + count += crypto::derive_public_key(derived, i, two.pub, reference); + } + std::cout << "Standard Stage 2 (ns):\t\t" << (std::chrono::steady_clock::now() - start).count() << " - Iterations: " << count << std::endl; + + for (unsigned i = 0; i < 10; ++i) { + crypto::public_key reference{}; + ASSERT_TRUE(tools::wallet_only::derive_public_key(derived, i, two.pub, reference)); + } + + count = 0; + start = std::chrono::steady_clock::now(); + for (unsigned i = 0; i < 10000; ++i) { + crypto::public_key reference{}; + count += tools::wallet_only::derive_public_key(derived, i, two.pub, reference); + } + std::cout << "Perf Stage 2 (ns):\t\t" << (std::chrono::steady_clock::now() - start).count() << " - Iterations: " << count << std::endl; +} +*/